pgmpy icon indicating copy to clipboard operation
pgmpy copied to clipboard

MarkovModel to_junction_tree hardcodes DiscreteFactor

Open ryan112358 opened this issue 7 years ago • 0 comments

I created my own Factor class, inheriting from DiscreteFactor but with overwritten implementations of all the key functions like product, sum, reduce, marginalize, etc. which would be more convenient to work with for my application. I then created a MarkovModel using this new class and attempted to do belief propagation on it, but when I ran it the clique beliefs were no longer members of this class, they were ordinary DiscreteFactors.

I think this is because in the to_junction_tree function in MarkovModel, where the clique_potentials are set to the product of clique factors, an Identity DiscreteFactor is hard-coded in case the list of factors is empty, but this causes the resulting clique potential to always be a DiscreteFactor, even when the the list is non-empty and the factors are members of my custom class.

from pgmpy.factors.base import BaseFactor
from pgmpy.factors.discrete import DiscreteFactor
from scipy.special import logsumexp
import numpy as np

class CustomFactor(DiscreteFactor):
    def __init__(self, potential):
        """ values is a nd array with size 1 along dimensions that are not in the factor """
        self.potential = potential
        shape = potential.shape
        variables = tuple(i for i in range(len(shape)) if shape[i] != 1)
        cardinality = tuple(shape[i] for i in variables)
        DiscreteFactor.__init__(self, variables, cardinality, np.exp(potential))

    def copy(self):
        return CustomFactor(self.potential)

    def marginalize(self, variables, inplace=False):
        assert inplace == False
        potentials = logsumexp(self.potential, axis=variables, keepdims=True)
        return CustomFactor(potentials)
    
    def maximize(self, variables, inplace=False):
        assert inplace == False
        # double check this
        potential = np.max(self.potential, axis=variables, keepdims=True)
        return CustomFactor(maximize)
       
    def product(self, other, inplace=False):
        assert inplace == False
        return CustomFactor(self.potential + other.potential)

    def divide(self, other, inplace=False):
        assert inplace == False
        return CustomFactor(self.potential - other.potential)

    def sum(self, other, inplace=False):
        assert inplace == False
        potential = np.logaddexp(self.potential, other.potential)
        return CustomFactor(potential)

    def reduce(self, values, inplace=False):
        assert inplace == False
        slice_ = [slice(None)] * len(self.potential.shape)
        for var, state in values:
            slice_i[var] = [state]
        return CustomFactor(self.potential[slice_])

    def normalize(self, inplace=False):
        assert inplace == False
        logZ = logsumexp(self.potential)
        return CustomFactor(self.potential - logZ)
       
    def __repr__(self):
        var_card = ", ".join(['{var}:{card}'.format(var=var, card=card)
                              for var, card in zip(self.variables, self.cardinality)])
        return "<CustomFactor representing phi({var_card}) at {address}>".format(
            address=hex(id(self)), var_card=var_card)

 
if __name__ == '__main__':
    from pgmpy.models import MarkovModel
    from pgmpy.inference import BeliefPropagation
    P = np.random.rand(4,5,6)
    A = CustomFactor(P.sum(axis=2, keepdims=True))
    B = CustomFactor(P.sum(axis=1, keepdims=True))
   
    model = MarkovModel([(0,1), (0,2)])
    model.add_factors(A,B)
    bp = BeliefPropagation(model)
    bp.calibrate()
    beliefs = bp.get_clique_beliefs()
 
    print type(A*B)
    print type(beliefs[(0,1)]) # should be CustomFactor, but actually is DiscreteFactor
  

ryan112358 avatar Jan 07 '18 03:01 ryan112358