IncrementalInference.jl icon indicating copy to clipboard operation
IncrementalInference.jl copied to clipboard

bug on approxConv through partial relative factor

Open dehann opened this issue 3 years ago • 5 comments

MWE https://github.com/JuliaRobotics/Caesar.jl/blob/7167be93d3eef646ff6293e8398f8dfca7c8ef60/examples/dev/scalar/boxy.jl#L280

julia> pts = approxConv(fg, :x0x4f1, :x4)

ERROR: DimensionMismatch("array could not be broadcast to match destination")
Stacktrace:
  [1] check_broadcast_shape
    @ ./broadcast.jl:520 [inlined]
  [2] check_broadcast_axes
    @ ./broadcast.jl:523 [inlined]
  [3] instantiate
    @ ./broadcast.jl:269 [inlined]
  [4] materialize!
    @ ./broadcast.jl:894 [inlined]
  [5] materialize!
    @ ./broadcast.jl:891 [inlined]
  [6] (::IncrementalInference.var"#201#203"{IncrementalInference.var"#208#209"{SubArray{Float64, 1, Matrix{Float64}, Tuple{Vector{Int64}, Int64}, false}, IncrementalInference.var"#205#206"{Int64, Tuple{Matrix{Float64}}, CalcFactor{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, FactorMetadata{SubArray{DFGVariable{Point2}, 1, Vector{DFGVariable{Point2}}, Tuple{Vector{Int64}}, false}, SubArray{Symbol, 1, Vector{Symbol}, Tuple{Vector{Int64}}, false}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}, Nothing}, Tuple{Matrix{Float64}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}}, Vector{Float64}})(x::Vector{Float64})
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/NumericalCalculations.jl:61
  [7] finite_difference_gradient!(df::Vector{Float64}, f::IncrementalInference.var"#201#203"{IncrementalInference.var"#208#209"{SubArray{Float64, 1, Matrix{Float64}, Tuple{Vector{Int64}, Int64}, false}, IncrementalInference.var"#205#206"{Int64, Tuple{Matrix{Float64}}, CalcFactor{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, FactorMetadata{SubArray{DFGVariable{Point2}, 1, Vector{DFGVariable{Point2}}, Tuple{Vector{Int64}}, false}, SubArray{Symbol, 1, Vector{Symbol}, Tuple{Vector{Int64}}, false}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}, Nothing}, Tuple{Matrix{Float64}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}}, Vector{Float64}}, x::Vector{Float64}, cache::FiniteDiff.GradientCache{Nothing, Nothing, Nothing, Vector{Float64}, Val{:central}(), Float64, Val{true}()}; relstep::Float64, absstep::Float64, dir::Bool)
    @ FiniteDiff ~/.julia/packages/FiniteDiff/blirf/src/gradients.jl:273
  [8] finite_difference_gradient!(df::Vector{Float64}, f::Function, x::Vector{Float64}, cache::FiniteDiff.GradientCache{Nothing, Nothing, Nothing, Vector{Float64}, Val{:central}(), Float64, Val{true}()})
    @ FiniteDiff ~/.julia/packages/FiniteDiff/blirf/src/gradients.jl:224
  [9] (::NLSolversBase.var"#g!#15"{IncrementalInference.var"#201#203"{IncrementalInference.var"#208#209"{SubArray{Float64, 1, Matrix{Float64}, Tuple{Vector{Int64}, Int64}, false}, IncrementalInference.var"#205#206"{Int64, Tuple{Matrix{Float64}}, CalcFactor{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, FactorMetadata{SubArray{DFGVariable{Point2}, 1, Vector{DFGVariable{Point2}}, Tuple{Vector{Int64}}, false}, SubArray{Symbol, 1, Vector{Symbol}, Tuple{Vector{Int64}}, false}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}, Nothing}, Tuple{Matrix{Float64}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}}, Vector{Float64}}, FiniteDiff.GradientCache{Nothing, Nothing, Nothing, Vector{Float64}, Val{:central}(), Float64, Val{true}()}})(storage::Vector{Float64}, x::Vector{Float64})
    @ NLSolversBase ~/.julia/packages/NLSolversBase/geyh3/src/objective_types/oncedifferentiable.jl:57
 [10] (::NLSolversBase.var"#fg!#16"{IncrementalInference.var"#201#203"{IncrementalInference.var"#208#209"{SubArray{Float64, 1, Matrix{Float64}, Tuple{Vector{Int64}, Int64}, false}, IncrementalInference.var"#205#206"{Int64, Tuple{Matrix{Float64}}, CalcFactor{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, FactorMetadata{SubArray{DFGVariable{Point2}, 1, Vector{DFGVariable{Point2}}, Tuple{Vector{Int64}}, false}, SubArray{Symbol, 1, Vector{Symbol}, Tuple{Vector{Int64}}, false}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}, Nothing}, Tuple{Matrix{Float64}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}}, Vector{Float64}}})(storage::Vector{Float64}, x::Vector{Float64})
    @ NLSolversBase ~/.julia/packages/NLSolversBase/geyh3/src/objective_types/oncedifferentiable.jl:61
 [11] value_gradient!!(obj::NLSolversBase.OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, x::Vector{Float64})
    @ NLSolversBase ~/.julia/packages/NLSolversBase/geyh3/src/interface.jl:82
 [12] initial_state(method::Optim.BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Nothing, Optim.Flat}, options::Optim.Options{Float64, Nothing}, d::NLSolversBase.OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, initial_x::Vector{Float64})
    @ Optim ~/.julia/packages/Optim/uwNqi/src/multivariate/solvers/first_order/bfgs.jl:85
 [13] optimize
    @ ~/.julia/packages/Optim/uwNqi/src/multivariate/optimize/optimize.jl:35 [inlined]
 [14] #optimize#87
    @ ~/.julia/packages/Optim/uwNqi/src/multivariate/optimize/interface.jl:142 [inlined]
 [15] optimize(f::Function, initial_x::Vector{Float64}, method::Optim.BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Nothing, Optim.Flat}, options::Optim.Options{Float64, Nothing}) (repeats 2 times)
    @ Optim ~/.julia/packages/Optim/uwNqi/src/multivariate/optimize/interface.jl:141
 [16] _solveLambdaNumeric(fcttype::PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, objResX::IncrementalInference.var"#208#209"{SubArray{Float64, 1, Matrix{Float64}, Tuple{Vector{Int64}, Int64}, false}, IncrementalInference.var"#205#206"{Int64, Tuple{Matrix{Float64}}, CalcFactor{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, FactorMetadata{SubArray{DFGVariable{Point2}, 1, Vector{DFGVariable{Point2}}, Tuple{Vector{Int64}}, false}, SubArray{Symbol, 1, Vector{Symbol}, Tuple{Vector{Int64}}, false}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}, Nothing}, Tuple{Matrix{Float64}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}, SubArray{Matrix{Float64}, 1, Vector{Matrix{Float64}}, Tuple{Vector{Int64}}, false}}}, residual::Vector{Float64}, u0::Vector{Float64}, islen1::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/NumericalCalculations.jl:61
 [17] _solveCCWNumeric!(ccwl::CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, Nothing, Vector{Int64}}; perturb::Float64, testshuffle::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/NumericalCalculations.jl:205
 [18] _solveCCWNumeric!(ccwl::CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, Nothing, Vector{Int64}})
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/NumericalCalculations.jl:183
 [19] approxConvOnElements!(ccwl::CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, Nothing, Vector{Int64}}, elements::Vector{Int64}, #unused#::Type{SingleThreaded})
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:40
 [20] approxConvOnElements!(ccwl::CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, Nothing, Vector{Int64}}, elements::Vector{Int64})
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:50
 [21] computeAcrossHypothesis!(ccwl::CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, Nothing, Vector{Int64}}, allelements::Vector{Any}, activehypo::Vector{Any}, certainidx::Vector{Int64}, sfidx::Int64, maxlen::Int64, maniAddOps::Tuple{typeof(+), typeof(+)}; spreadNH::Float64, inflateCycles::Int64, skipSolve::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:256
 [22] evalPotentialSpecific(Xi::Vector{DFGVariable{Point2}}, ccwl::CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, Nothing, Vector{Int64}}, solvefor::Symbol, T_::Type{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}}, measurement::Tuple{Matrix{Float64}}; needFreshMeasurements::Bool, solveKey::Symbol, N::Int64, spreadNH::Float64, inflateCycles::Int64, dbg::Bool, skipSolve::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:366
 [23] #evalPotentialSpecific#241
    @ ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:484 [inlined]
 [24] evalFactor(dfg::LightDFG{SolverParams, DFGVariable, DFGFactor}, fct::DFGFactor{CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, H, C} where {H<:Union{Nothing, Categorical{P, Ps} where {P<:Real, Ps<:AbstractVector{P}}}, C<:Union{Nothing, Vector{Int64}}}, 2}, solvefor::Symbol, measurement::Tuple{Matrix{Float64}}; needFreshMeasurements::Bool, solveKey::Symbol, N::Int64, inflateCycles::Int64, dbg::Bool, skipSolve::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:527
 [25] approxConv(dfg::LightDFG{SolverParams, DFGVariable, DFGFactor}, fc::DFGFactor{CommonConvWrapper{PartialLinearRelative{AliasingScalarSampler, Tuple{Int64}}, H, C} where {H<:Union{Nothing, Categorical{P, Ps} where {P<:Real, Ps<:AbstractVector{P}}}, C<:Union{Nothing, Vector{Int64}}}, 2}, target::Symbol, measurement::Tuple{Matrix{Float64}}; solveKey::Symbol, N::Int64, skipSolve::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:563
 [26] approxConv(dfg::LightDFG{SolverParams, DFGVariable, DFGFactor}, from::Symbol, target::Symbol, measurement::Tuple{Matrix{Float64}}; solveKey::Symbol, N::Int64, tfg::LightDFG{SolverParams, DFGVariable, DFGFactor}, setPPEmethod::Nothing, setPPE::Bool, path::Vector{Symbol}, skipSolve::Bool)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:643
 [27] approxConv(dfg::LightDFG{SolverParams, DFGVariable, DFGFactor}, from::Symbol, target::Symbol, measurement::Tuple{Matrix{Float64}}) (repeats 2 times)
    @ IncrementalInference ~/.julia/dev/IncrementalInference/src/ApproxConv.jl:607
 [28] top-level scope
    @ REPL[132]:1

dehann avatar May 19 '21 22:05 dehann

when ccw.params gets created on an approxConv, and if is .partial factor, then only the partial dimensions of interest should be used on the construction of ccw before numerical calculations. Options for future consolidation include:

Maybe should consolidate these two function calls? https://github.com/JuliaRobotics/IncrementalInference.jl/blob/f37ef01755cf5b74c6cc062426d0cdc1de5d8307/src/ApproxConv.jl#L341-L349


https://github.com/JuliaRobotics/IncrementalInference.jl/blob/f37ef01755cf5b74c6cc062426d0cdc1de5d8307/src/ApproxConv.jl#L298-L316

https://github.com/JuliaRobotics/IncrementalInference.jl/blob/f37ef01755cf5b74c6cc062426d0cdc1de5d8307/src/ApproxConv.jl#L62-L121

dehann avatar May 20 '21 00:05 dehann

On partial factors, it may be easier to provide the residual function directly instead of the dimension. A (crude) example would be:

bel = AliasingScalarSampler(qr, pv)
plr = PartialLinearRelative(bel, (1,))
addFactor!(fg, [:x0, :x1], plr, tags=[:TERRAIN,:MATCH,:NORTH])

replacing the factor (line 2) with:

f(x, y)=x[2]-y[2]
plr = PartialLinearRelative(bel, f)

This may generalize better as you move towards manifold elements for variables

pvazteixeira avatar May 20 '21 04:05 pvazteixeira

would it be sensible to have a function that just returns the partial dimensions instead? E.g.

plr = PartialLinearRelative(..., (p)->p[2])

and let the factor residual functions handle the calculation mechanics? I'm thinking about avoiding duplicate definitions of the residual mechanics...

an example might be horizontal only range on pose3, which means a lookup function

(p)->view(p,1:2)

or perhaps heading-only factor on a Pose3Quat

(p)->Circle(convert(YAW, p.rotation)

assuming p is a point on the manifold and all other interesting data wrangling considerations in-toe.

xref #1242


ah, another problem is that indexing in (especially on manifold conversions) is that the answer out should influence the original data (not simply an inverse function per se).

dehann avatar May 21 '21 03:05 dehann

I'm going to walk back my original suggestion and say I'm not convinced a function is the way to go (at least for a PartialLinearRelative), the argument being that if you need more complexity than passing the dimensions, you should be specifying a factor.

Your two examples - horizontal range and heading-only constraints - would normally not be implemented as generic partials, right?

pvazteixeira avatar May 21 '21 21:05 pvazteixeira

Your two examples - horizontal range and heading-only constraints - would normally not be implemented as generic partials, right?

Correct, the way I have it at this time is to implement the partial mechanics in a dedicated factor. The .partial field is not a wholesale replacement in all cases.

See also: https://github.com/JuliaRobotics/Caesar.jl/discussions/680#discussioncomment-769763

dehann avatar May 22 '21 08:05 dehann

think this issue should be fixed with the partial factor fixes in v0.32.0. A factor manifold now just represents the partial manifold, ie zDim is only the dimension of the partial manifold representing the factor measurements. A bearing only would be dimension 1. At present, the inference implementation (at least for nonparametric) is still defining the partial dimensions on the coordinates. More work is needed to consolidate partials through projection or embedding functions between the connected factors and that of the factor manifold itself.

dehann avatar Jan 02 '23 07:01 dehann