1   /* Copyright 2002-2021 CS GROUP
2    * Licensed to CS GROUP (CS) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * CS licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *   http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  package org.orekit.estimation.measurements.gnss;
18  
19  import java.util.SortedSet;
20  import java.util.TreeSet;
21  
22  import org.hipparchus.util.FastMath;
23  
24  /** Decorrelation/reduction engine for LAMBDA method.
25   * <p>
26   * This class implements PJG Teunissen Least Square Ambiguity Decorrelation
27   * Adjustment (LAMBDA) method, as described in both the 1996 paper <a
28   * href="https://www.researchgate.net/publication/2790708_The_LAMBDA_method_for_integer_ambiguity_estimation_implementation_aspects">
29   * The LAMBDA method for integer ambiguity estimation: implementation aspects</a> by
30   * Paul de Jonge and Christian Tiberius and on the 2005 paper <a
31   * href="https://www.researchgate.net/publication/225518977_MLAMBDA_a_modified_LAMBDA_method_for_integer_least-squares_estimation">
32   * A modified LAMBDA method for integer least-squares estimation</a> by X.-W Chang, X. Yang
33   * and T. Zhou, Journal of Geodesy 79(9):552-565, DOI: 10.1007/s00190-005-0004-x
34   * </p>
35   * <p>
36   * It slightly departs on the original LAMBDA method as it does implement
37   * the following improvements proposed in the de Jonge and Tiberius 1996 paper
38   * that vastly speed up the search:
39   * </p>
40   * <ul>
41   *   <li>alternate search starting from the middle and expanding outwards</li>
42   *   <li>automatic shrinking of ellipsoid during the search</li>
43   * </ul>
44   * @see AmbiguitySolver
45   * @author Luc Maisonobe
46   * @since 10.0
47   */
48  public class LambdaMethod extends AbstractLambdaMethod {
49  
50      /** Margin factor to apply to estimated search limit parameter. */
51      private static final double CHI2_MARGIN_FACTOR = 1.1;
52  
53      /** {@inheritDoc} */
54      @Override
55      protected void ltdlDecomposition() {
56  
57          // get references to the diagonal and lower triangular parts
58          final double[] diag = getDiagReference();
59          final double[] low  = getLowReference();
60  
61          // perform Lᵀ.D.L decomposition
62          for (int k = diag.length - 1; k >= 0; --k) {
63              final double inv = 1.0 / diag[k];
64              for (int i = 0; i < k; ++i) {
65                  final double lki = low[lIndex(k, i)];
66                  for (int j = 0; j < i; ++j) {
67                      low[lIndex(i, j)] -= lki * low[lIndex(k, j)] * inv;
68                  }
69                  diag[i] -= lki * lki * inv;
70              }
71              for (int j = 0; j < k; ++j) {
72                  low[lIndex(k, j)] *= inv;
73              }
74          }
75  
76      }
77  
78      /** {@inheritDoc} */
79      @Override
80      protected void reduction() {
81  
82          // get references to the diagonal and lower triangular parts
83          final double[] diag = getDiagReference();
84          final double[] low  = getLowReference();
85          final int n = diag.length;
86  
87          int kSaved = n - 2;
88          int k0 = kSaved;
89          while (k0 >= 0) {
90              final int k1 = k0 + 1;
91              if (k0 <= kSaved) {
92                  for (int i = k0 + 1; i < n; ++i) {
93                      integerGaussTransformation(i, k0);
94                  }
95              }
96              final double lk1k0 = low[lIndex(k1, k0)];
97              final double delta = diag[k0] + lk1k0 * lk1k0 * diag[k1];
98              if (delta < diag[k1]) {
99                  permutation(k0, delta);
100                 kSaved = k0;
101                 k0 = n - 2;
102             } else {
103                 k0--;
104             }
105         }
106     }
107 
108     /** {@inheritDoc} */
109     @Override
110     protected void discreteSearch() {
111 
112         // get references to the diagonal part
113         final double[] diag = getDiagReference();
114         final int n = diag.length;
115 
116         // estimate search domain limit
117         final long[]   fixed   = new long[n];
118         final double[] offsets = new double[n];
119         final double[] left    = new double[n];
120         final double[] right   = new double[n];
121 
122         final int maxSolutions = getMaxSolution();
123         final double[] decorrelated = getDecorrelatedReference();
124 
125         final AlternatingSampler[] samplers = new AlternatingSampler[n];
126 
127         // set up top level sampling for last ambiguity
128         double chi2 = estimateChi2(fixed, offsets);
129         right[n - 1] = chi2 / diag[n - 1];
130         int index = n - 1;
131         while (index < n) {
132             if (index == -1) {
133 
134                 // all ambiguities have been fixed
135                 final double squaredNorm = chi2 - diag[0] * (right[0] - left[0]);
136                 addSolution(fixed, squaredNorm);
137                 final int size = getSolutionsSize();
138                 if (size >= maxSolutions) {
139 
140                     // shrink the ellipsoid
141                     chi2 = squaredNorm;
142                     right[n - 1] = chi2 / diag[n - 1];
143                     for (int i = n - 1; i > 0; --i) {
144                         samplers[i].setRadius(FastMath.sqrt(right[i]));
145                         right[i - 1] = diag[i] / diag[i - 1] * (right[i] - left[i]);
146                     }
147                     samplers[0].setRadius(FastMath.sqrt(right[0]));
148 
149                     if (size > maxSolutions) {
150                         removeSolution();
151                     }
152 
153                 }
154 
155                 ++index;
156 
157             } else {
158 
159                 if (samplers[index] == null) {
160                     // we start exploring a new ambiguity
161                     samplers[index] = new AlternatingSampler(conditionalEstimate(index, offsets), FastMath.sqrt(right[index]));
162                 } else {
163                     // continue exploring the same ambiguity
164                     samplers[index].generateNext();
165                 }
166 
167                 if (samplers[index].inRange()) {
168                     fixed[index]       = samplers[index].getCurrent();
169                     offsets[index]     = fixed[index] - decorrelated[index];
170                     final double delta = fixed[index] - samplers[index].getMidPoint();
171                     left[index]        = delta * delta;
172                     if (index > 0) {
173                         right[index - 1]   = diag[index] / diag[index - 1] * (right[index] - left[index]);
174                     }
175 
176                     // go down one level
177                     --index;
178 
179                 } else {
180 
181                     // we have completed exploration of this ambiguity range
182                     samplers[index] = null;
183 
184                     // go up one level
185                     ++index;
186 
187                 }
188 
189             }
190         }
191 
192     }
193 
194     /** {@inheritDoc} */
195     @Override
196     protected void inverseDecomposition() {
197 
198         // get references to the diagonal and lower triangular parts
199         final double[] diag = getDiagReference();
200         final double[] low  = getLowReference();
201         final int n = diag.length;
202 
203         // we rely on the following equation, where a low triangular
204         // matrix L of dimension n is split into sub-matrices of dimensions
205         // k, l and m with k + l + m = n
206         //
207         // [  A  |      |    ]  [        A⁻¹        |         |       ]   [  Iₖ  |      |     ]
208         // [  B  |  Iₗ  |    ]  [       -BA⁻¹       |   Iₗ    |       ] = [      |  Iₗ  |     ]
209         // [  C  |  D   |  E ]  [ E⁻¹ (DB - C) A⁻¹  | -E⁻¹D   |  E⁻¹  ]   [      |      |  Iₘ ]
210         //
211         // considering we have already computed A⁻¹ (i.e. inverted rows 0 to k-1
212         // of L), and using l = 1 in the previous expression (i.e. the middle matrix
213         // is only one row), we see that elements 0 to k-1 of row k are given by -BA⁻¹
214         // and that element k is I₁ = 1. We can therefore invert L row by row and we
215         // obtain an inverse matrix L⁻¹ which is a low triangular matrix with unit
216         // diagonal. A⁻¹ is therefore also a low triangular matrix with unit diagonal.
217         // This property is used in the loops below to speed up the computation of -BA⁻¹
218         final double[] row = new double[n - 1];
219         diag[0] = 1.0 / diag[0];
220         for (int k = 1; k < n; ++k) {
221 
222             // compute row k of lower triangular part, by computing -BA⁻¹
223             final int iK = lIndex(k, 0);
224             System.arraycopy(low, iK, row, 0, k);
225             for (int j = 0; j < k; ++j) {
226                 double sum = row[j];
227                 for (int l = j + 1; l < k; ++l) {
228                     sum += row[l] * low[lIndex(l, j)];
229                 }
230                 low[iK + j] = -sum;
231             }
232 
233             // diagonal part
234             diag[k] = 1.0 / diag[k];
235 
236         }
237 
238     }
239 
240     /** Compute a safe estimate of search limit parameter χ².
241      * <p>
242      * The estimate is based on section 4.11 in de Jonge and Tiberius 1996,
243      * computing χ² such that it includes at least a few preset grid points
244      * </p>
245      * @param fixed placeholder for test fixed ambiguities
246      * @param offsets placeholder for offsets between fixed ambiguities and float ambiguities
247      * @return safe estimate of search limit parameter χ²
248      */
249     private double estimateChi2(final long[] fixed, final double[] offsets) {
250 
251         // get references to the diagonal part
252         final double[] diag = getDiagReference();
253         final int n = diag.length;
254         // maximum number of solutions seeked
255         final int maxSolutions = getMaxSolution();
256         // get references to the decorrelated ambiguities
257         final double[] decorrelated = getDecorrelatedReference();
258 
259 
260         // initialize test points, assuming ambiguities have been fully decorrelated
261         final AlternatingSampler[] samplers = new AlternatingSampler[n];
262         for (int i = 0; i < n; ++i) {
263             samplers[i] = new AlternatingSampler(decorrelated[i], 0.0);
264         }
265 
266         final SortedSet<Double> squaredNorms = new TreeSet<>();
267 
268         // first test point at center
269         for (int i = 0; i < n; ++i) {
270             fixed[i] = samplers[i].getCurrent();
271         }
272         squaredNorms.add(squaredNorm(fixed, offsets));
273 
274         while (squaredNorms.size() < maxSolutions) {
275             // add a series of grid points, each shifted from center along a different axis
276             final int remaining = FastMath.min(n, maxSolutions - squaredNorms.size());
277             for (int i = 0; i < remaining; ++i) {
278                 final long saved = fixed[i];
279                 samplers[i].generateNext();
280                 fixed[i] = samplers[i].getCurrent();
281                 squaredNorms.add(squaredNorm(fixed, offsets));
282                 fixed[i] = saved;
283             }
284         }
285 
286         // select a limit ensuring at least the needed number of grid points are in the search domain
287         int count = 0;
288         for (final double s : squaredNorms) {
289             if (++count == maxSolutions) {
290                 return s * CHI2_MARGIN_FACTOR;
291             }
292         }
293 
294         // never reached
295         return Double.NaN;
296 
297     }
298 
299     /** Compute squared norm of a set of fixed ambiguities.
300      * @param fixed fixed ambiguities
301      * @param offsets placeholder for offsets between fixed ambiguities and float ambiguities
302      * @return squared norm of a set of fixed ambiguities
303      */
304     private double squaredNorm(final long[] fixed, final double[] offsets) {
305         // get references to the lower triangular part and the decorrelated ambiguities
306         final double[] diag = getDiagReference();
307         final double[] decorrelated = getDecorrelatedReference();
308         double n2 = 0;
309         for (int i = diag.length - 1; i >= 0; --i) {
310             offsets[i] = fixed[i] - decorrelated[i];
311             final double delta = fixed[i] - conditionalEstimate(i, offsets);
312             n2 += diag[i] * delta * delta;
313         }
314         return n2;
315     }
316 
317     /** Compute conditional estimate of an ambiguity.
318      * <p>
319      * This corresponds to equation 4.4 in de Jonge and Tiberius 1996
320      * </p>
321      * @param i index of the ambiguity
322      * @param offsets offsets between already fixed ambiguities and float ambiguities
323      * @return conditional estimate of ambiguity â<sub>i|i+1...n</sub>
324      */
325     private double conditionalEstimate(final int i, final double[] offsets) {
326         // get references to the diagonal and lower triangular parts
327         final double[] diag = getDiagReference();
328         final double[] low  = getLowReference();
329         // get references to the decorrelated ambiguities
330         final double[] decorrelated = getDecorrelatedReference();
331 
332         double conditional = decorrelated[i];
333         for (int j = i + 1; j < diag.length; ++j) {
334             conditional -= low[lIndex(j, i)] * offsets[j];
335         }
336         return conditional;
337     }
338 
339 }