RRTXstatic.cpp
1 /*********************************************************************
2 * Software License Agreement (BSD License)
3 *
4 * Copyright (c) 2016, Georgia Institute of Technology
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 * * Neither the name of the Rice University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *********************************************************************/
34 
35 /* Author: Florian Hauer */
36 
37 #include "ompl/geometric/planners/rrt/RRTXstatic.h"
38 #include <algorithm>
39 #include <boost/math/constants/constants.hpp>
40 #include <limits>
41 #include "ompl/base/Goal.h"
42 #include "ompl/base/goals/GoalSampleableRegion.h"
43 #include "ompl/base/goals/GoalState.h"
44 #include "ompl/base/objectives/PathLengthOptimizationObjective.h"
45 #include "ompl/base/samplers/InformedStateSampler.h"
46 #include "ompl/base/samplers/informed/RejectionInfSampler.h"
47 #include "ompl/tools/config/SelfConfig.h"
48 #include "ompl/util/GeometricEquations.h"
49 
50 ompl::geometric::RRTXstatic::RRTXstatic(const base::SpaceInformationPtr &si)
51  : base::Planner(si, "RRTXstatic")
52  , mc_(opt_, pdef_)
53  , q_(mc_)
54 {
56  specs_.optimizingPaths = true;
58 
59  Planner::declareParam<double>("range", this, &RRTXstatic::setRange, &RRTXstatic::getRange, "0.:1.:10000.");
60  Planner::declareParam<double>("goal_bias", this, &RRTXstatic::setGoalBias, &RRTXstatic::getGoalBias, "0.:.05:1.");
61  Planner::declareParam<double>("epsilon", this, &RRTXstatic::setEpsilon, &RRTXstatic::getEpsilon, "0.:.01:10.");
62  Planner::declareParam<double>("rewire_factor", this, &RRTXstatic::setRewireFactor, &RRTXstatic::getRewireFactor,
63  "1.0:0.01:2."
64  "0");
65  Planner::declareParam<bool>("use_k_nearest", this, &RRTXstatic::setKNearest, &RRTXstatic::getKNearest, "0,1");
66  Planner::declareParam<bool>("update_children", this, &RRTXstatic::setUpdateChildren, &RRTXstatic::getUpdateChildren,
67  "0,1");
68  Planner::declareParam<int>("rejection_variant", this, &RRTXstatic::setVariant, &RRTXstatic::getVariant, "0:3");
69  Planner::declareParam<double>("rejection_variant_alpha", this, &RRTXstatic::setAlpha, &RRTXstatic::getAlpha, "0.:"
70  "1.");
71  Planner::declareParam<bool>("informed_sampling", this, &RRTXstatic::setInformedSampling,
73  "1");
74  Planner::declareParam<bool>("sample_rejection", this, &RRTXstatic::setSampleRejection,
76  Planner::declareParam<unsigned int>("number_sampling_attempts", this, &RRTXstatic::setNumSamplingAttempts,
77  &RRTXstatic::getNumSamplingAttempts, "10:10:100000");
78 
79  addPlannerProgressProperty("iterations INTEGER", [this] { return numIterationsProperty(); });
80  addPlannerProgressProperty("motions INTEGER", [this] { return numMotionsProperty(); });
81  addPlannerProgressProperty("best cost REAL", [this] { return bestCostProperty(); });
82 }
83 
84 ompl::geometric::RRTXstatic::~RRTXstatic()
85 {
86  freeMemory();
87 }
88 
90 {
91  Planner::setup();
92  tools::SelfConfig sc(si_, getName());
93  sc.configurePlannerRange(maxDistance_);
94  if (!si_->getStateSpace()->hasSymmetricDistance() || !si_->getStateSpace()->hasSymmetricInterpolate())
95  {
96  OMPL_WARN("%s requires a state space with symmetric distance and symmetric interpolation.", getName().c_str());
97  }
98 
99  if (!nn_)
100  nn_.reset(tools::SelfConfig::getDefaultNearestNeighbors<Motion *>(this));
101  nn_->setDistanceFunction([this](const Motion *a, const Motion *b) { return distanceFunction(a, b); });
102 
103  // Setup optimization objective
104  //
105  // If no optimization objective was specified, then default to
106  // optimizing path length as computed by the distance() function
107  // in the state space.
108  if (pdef_)
109  {
110  if (pdef_->hasOptimizationObjective())
111  opt_ = pdef_->getOptimizationObjective();
112  else
113  {
114  OMPL_INFORM("%s: No optimization objective specified. Defaulting to optimizing path length for the allowed "
115  "planning time.",
116  getName().c_str());
117  opt_ = std::make_shared<base::PathLengthOptimizationObjective>(si_);
118 
119  // Store the new objective in the problem def'n
120  pdef_->setOptimizationObjective(opt_);
121  }
122  // Set the bestCost_ as infinite
123  bestCost_ = opt_->infiniteCost();
124  mc_ = MotionCompare(opt_, pdef_);
126  }
127  else
128  {
129  OMPL_INFORM("%s: problem definition is not set, deferring setup completion...", getName().c_str());
130  setup_ = false;
131  }
132 
133  // Calculate some constants:
134  calculateRewiringLowerBounds();
135 }
136 
138 {
139  setup_ = false;
140  Planner::clear();
141  sampler_.reset();
142  infSampler_.reset();
143  freeMemory();
144  if (nn_)
145  nn_->clear();
146 
147  lastGoalMotion_ = nullptr;
148  goalMotions_.clear();
149 
150  iterations_ = 0;
151  bestCost_ = base::Cost(std::numeric_limits<double>::quiet_NaN());
152 }
153 
155 {
156  checkValidity();
157  base::Goal *goal = pdef_->getGoal().get();
158  auto *goal_s = dynamic_cast<base::GoalSampleableRegion *>(goal);
159 
160  // Check if there are more starts
161  if (pis_.haveMoreStartStates() == true)
162  {
163  // There are, add them
164  while (const base::State *st = pis_.nextStart())
165  {
166  auto *motion = new Motion(si_);
167  si_->copyState(motion->state, st);
168  motion->cost = opt_->identityCost();
169  nn_->add(motion);
170  }
171 
172  // And assure that, if we're using an informed sampler, it's reset
173  infSampler_.reset();
174  }
175  // No else
176 
177  if (nn_->size() == 0)
178  {
179  OMPL_ERROR("%s: There are no valid initial states!", getName().c_str());
181  }
182 
183  // Allocate a sampler if necessary
184  if (!sampler_ && !infSampler_)
185  {
186  allocSampler();
187  }
188 
189  OMPL_INFORM("%s: Starting planning with %u states already in datastructure", getName().c_str(), nn_->size());
190 
191  if (!si_->getStateSpace()->isMetricSpace())
192  OMPL_WARN("%s: The state space (%s) is not metric and as a result the optimization objective may not satisfy "
193  "the triangle inequality. "
194  "You may need to disable rejection.",
195  getName().c_str(), si_->getStateSpace()->getName().c_str());
196 
197  const base::ReportIntermediateSolutionFn intermediateSolutionCallback = pdef_->getIntermediateSolutionCallback();
198 
199  Motion *solution = lastGoalMotion_;
200 
201  Motion *approximation = nullptr;
202  double approximatedist = std::numeric_limits<double>::infinity();
203  bool sufficientlyShort = false;
204 
205  auto *rmotion = new Motion(si_);
206  base::State *rstate = rmotion->state;
207  base::State *xstate = si_->allocState();
208  base::State *dstate;
209 
210  Motion *motion;
211  Motion *nmotion;
212  Motion *nb;
213  Motion *min;
214  Motion *c;
215  bool feas;
216 
217  unsigned int rewireTest = 0;
218  unsigned int statesGenerated = 0;
219 
220  base::Cost incCost, cost;
221 
222  if (solution)
223  OMPL_INFORM("%s: Starting planning with existing solution of cost %.5f", getName().c_str(),
224  solution->cost.value());
225 
226  if (useKNearest_)
227  OMPL_INFORM("%s: Initial k-nearest value of %u", getName().c_str(),
228  (unsigned int)std::ceil(k_rrt_ * log((double)(nn_->size() + 1u))));
229  else
230  OMPL_INFORM(
231  "%s: Initial rewiring radius of %.2f", getName().c_str(),
232  std::min(maxDistance_, r_rrt_ * std::pow(log((double)(nn_->size() + 1u)) / ((double)(nn_->size() + 1u)),
233  1 / (double)(si_->getStateDimension()))));
234 
235  while (ptc == false)
236  {
237  iterations_++;
238 
239  // Computes the RRG values for this iteration (number or radius of neighbors)
240  calculateRRG();
241 
242  // sample random state (with goal biasing)
243  // Goal samples are only sampled until maxSampleCount() goals are in the tree, to prohibit duplicate goal
244  // states.
245  if (goal_s && goalMotions_.size() < goal_s->maxSampleCount() && rng_.uniform01() < goalBias_ &&
246  goal_s->canSample())
247  goal_s->sampleGoal(rstate);
248  else
249  {
250  // Attempt to generate a sample, if we fail (e.g., too many rejection attempts), skip the remainder of this
251  // loop and return to try again
252  if (!sampleUniform(rstate))
253  continue;
254  }
255 
256  // find closest state in the tree
257  nmotion = nn_->nearest(rmotion);
258 
259  if (intermediateSolutionCallback && si_->equalStates(nmotion->state, rstate))
260  continue;
261 
262  dstate = rstate;
263 
264  // find state to add to the tree
265  double d = si_->distance(nmotion->state, rstate);
266  if (d > maxDistance_)
267  {
268  si_->getStateSpace()->interpolate(nmotion->state, rstate, maxDistance_ / d, xstate);
269  dstate = xstate;
270  }
271 
272  // Check if the motion between the nearest state and the state to add is valid
273  if (si_->checkMotion(nmotion->state, dstate))
274  {
275  // create a motion
276  motion = new Motion(si_);
277  si_->copyState(motion->state, dstate);
278  motion->parent = nmotion;
279  incCost = opt_->motionCost(nmotion->state, motion->state);
280  motion->cost = opt_->combineCosts(nmotion->cost, incCost);
281 
282  // Find nearby neighbors of the new motion
283  getNeighbors(motion);
284 
285  // find which one we connect the new state to
286  for (auto it = motion->nbh.begin(); it != motion->nbh.end();)
287  {
288  nb = it->first;
289  feas = it->second;
290 
291  // Compute cost using nb as a parent
292  incCost = opt_->motionCost(nb->state, motion->state);
293  cost = opt_->combineCosts(nb->cost, incCost);
294  if (opt_->isCostBetterThan(cost, motion->cost))
295  {
296  // Check range and feasibility
297  if ((!useKNearest_ || distanceFunction(motion, nb) < maxDistance_) &&
298  si_->checkMotion(nb->state, motion->state))
299  {
300  // mark than the motino has been checked as valid
301  it->second = true;
302 
303  motion->cost = cost;
304  motion->parent = nb;
305  ++it;
306  }
307  else
308  {
309  // Remove unfeasible neighbor from the list of neighbors
310  it = motion->nbh.erase(it);
311  }
312  }
313  else
314  {
315  ++it;
316  }
317  }
318 
319  // Check if the vertex should included
320  if (!includeVertex(motion))
321  {
322  si_->freeState(motion->state);
323  delete motion;
324  continue;
325  }
326 
327  // Update neighbor motions neighbor datastructure
328  for (auto it = motion->nbh.begin(); it != motion->nbh.end(); ++it)
329  {
330  it->first->nbh.emplace_back(motion, it->second);
331  }
332 
333  // add motion to the tree
334  ++statesGenerated;
335  nn_->add(motion);
336  if (updateChildren_)
337  motion->parent->children.push_back(motion);
338 
339  // add the new motion to the queue to propagate the changes
340  updateQueue(motion);
341 
342  bool checkForSolution = false;
343 
344  // Add the new motion to the goalMotion_ list, if it satisfies the goal
345  double distanceFromGoal;
346  if (goal->isSatisfied(motion->state, &distanceFromGoal))
347  {
348  goalMotions_.push_back(motion);
349  checkForSolution = true;
350  }
351 
352  // Process the elements in the queue and rewire the tree until epsilon-optimality
353  while (!q_.empty())
354  {
355  // Get element to update
356  min = q_.top()->data;
357  // Remove element from the queue and NULL the handle so that we know it's not in the queue anymore
358  q_.pop();
359  min->handle = nullptr;
360 
361  // Stop cost propagation if it is not in the relevant region
362  if (opt_->isCostBetterThan(bestCost_, mc_.costPlusHeuristic(min)))
363  break;
364 
365  // Try min as a parent to optimize each neighbor
366  for (auto it = min->nbh.begin(); it != min->nbh.end();)
367  {
368  nb = it->first;
369  feas = it->second;
370 
371  // Neighbor culling: removes neighbors farther than the neighbor radius
372  if ((!useKNearest_ || min->nbh.size() > rrg_k_) && distanceFunction(min, nb) > rrg_r_)
373  {
374  it = min->nbh.erase(it);
375  continue;
376  }
377 
378  // Calculate cost of nb using min as a parent
379  incCost = opt_->motionCost(min->state, nb->state);
380  cost = opt_->combineCosts(min->cost, incCost);
381 
382  // If cost improvement is better than epsilon
383  if (opt_->isCostBetterThan(opt_->combineCosts(cost, epsilonCost_), nb->cost))
384  {
385  if (nb->parent != min)
386  {
387  // changing parent, check feasibility
388  if (!feas)
389  {
390  feas = si_->checkMotion(nb->state, min->state);
391  if (!feas)
392  {
393  // Remove unfeasible neighbor from the list of neighbors
394  it = min->nbh.erase(it);
395  continue;
396  }
397  else
398  {
399  // mark than the motino has been checked as valid
400  it->second = true;
401  }
402  }
403  if (updateChildren_)
404  {
405  // Remove this node from its parent list
406  removeFromParent(nb);
407  // add it as a children of min
408  min->children.push_back(nb);
409  }
410  // Add this node to the new parent
411  nb->parent = min;
412  ++rewireTest;
413  }
414  nb->cost = cost;
415 
416  // Add to the queue for more improvements
417  updateQueue(nb);
418 
419  checkForSolution = true;
420  }
421  ++it;
422  }
423  if (updateChildren_)
424  {
425  // Propagatino of the cost to the children
426  for (auto it = min->children.begin(), end = min->children.end(); it != end; ++it)
427  {
428  c = *it;
429  incCost = opt_->motionCost(min->state, c->state);
430  cost = opt_->combineCosts(min->cost, incCost);
431  c->cost = cost;
432  // Add to the queue for more improvements
433  updateQueue(c);
434 
435  checkForSolution = true;
436  }
437  }
438  }
439 
440  // empty q and reset handles
441  while (!q_.empty())
442  {
443  q_.top()->data->handle = nullptr;
444  q_.pop();
445  }
446  q_.clear();
447 
448  // Checking for solution or iterative improvement
449  if (checkForSolution)
450  {
451  bool updatedSolution = false;
452  for (auto &goalMotion : goalMotions_)
453  {
454  if (opt_->isCostBetterThan(goalMotion->cost, bestCost_))
455  {
456  if (opt_->isFinite(bestCost_) == false)
457  {
458  OMPL_INFORM("%s: Found an initial solution with a cost of %.2f in %u iterations (%u "
459  "vertices in the graph)",
460  getName().c_str(), goalMotion->cost.value(), iterations_, nn_->size());
461  }
462  bestCost_ = goalMotion->cost;
463  updatedSolution = true;
464  }
465 
466  sufficientlyShort = opt_->isSatisfied(goalMotion->cost);
467  if (sufficientlyShort)
468  {
469  solution = goalMotion;
470  break;
471  }
472  else if (!solution || opt_->isCostBetterThan(goalMotion->cost, solution->cost))
473  {
474  solution = goalMotion;
475  updatedSolution = true;
476  }
477  }
478 
479  if (updatedSolution)
480  {
481  if (intermediateSolutionCallback)
482  {
483  std::vector<const base::State *> spath;
484  Motion *intermediate_solution =
485  solution->parent; // Do not include goal state to simplify code.
486 
487  // Push back until we find the start, but not the start itself
488  while (intermediate_solution->parent != nullptr)
489  {
490  spath.push_back(intermediate_solution->state);
491  intermediate_solution = intermediate_solution->parent;
492  }
493 
494  intermediateSolutionCallback(this, spath, bestCost_);
495  }
496  }
497  }
498 
499  // Checking for approximate solution (closest state found to the goal)
500  if (goalMotions_.size() == 0 && distanceFromGoal < approximatedist)
501  {
502  approximation = motion;
503  approximatedist = distanceFromGoal;
504  }
505  }
506 
507  // terminate if a sufficient solution is found
508  if (solution && sufficientlyShort)
509  break;
510  }
511 
512  bool approximate = (solution == nullptr);
513  bool addedSolution = false;
514  if (approximate)
515  solution = approximation;
516  else
517  lastGoalMotion_ = solution;
518 
519  if (solution != nullptr)
520  {
521  ptc.terminate();
522  // construct the solution path
523  std::vector<Motion *> mpath;
524  while (solution != nullptr)
525  {
526  mpath.push_back(solution);
527  solution = solution->parent;
528  }
529 
530  // set the solution path
531  auto path = std::make_shared<PathGeometric>(si_);
532  for (int i = mpath.size() - 1; i >= 0; --i)
533  path->append(mpath[i]->state);
534 
535  // Add the solution path.
536  base::PlannerSolution psol(path);
537  psol.setPlannerName(getName());
538  if (approximate)
539  psol.setApproximate(approximatedist);
540  // Does the solution satisfy the optimization objective?
541  psol.setOptimized(opt_, bestCost_, sufficientlyShort);
542  pdef_->addSolutionPath(psol);
543 
544  addedSolution = true;
545  }
546 
547  si_->freeState(xstate);
548  if (rmotion->state)
549  si_->freeState(rmotion->state);
550  delete rmotion;
551 
552  OMPL_INFORM("%s: Created %u new states. Checked %u rewire options. %u goal states in tree. Final solution cost "
553  "%.3f",
554  getName().c_str(), statesGenerated, rewireTest, goalMotions_.size(), bestCost_.value());
555 
556  return {addedSolution, approximate};
557 }
558 
560 {
561  // If x->handle is not NULL, x is already in the queue and needs to be update, otherwise it is inserted
562  if (x->handle != nullptr)
563  {
564  q_.update(x->handle);
565  }
566  else
567  {
568  x->handle = q_.insert(x);
569  }
570 }
571 
573 {
574  for (auto it = m->parent->children.begin(); it != m->parent->children.end(); ++it)
575  {
576  if (*it == m)
577  {
578  m->parent->children.erase(it);
579  break;
580  }
581  }
582 }
583 
585 {
586  auto cardDbl = static_cast<double>(nn_->size() + 1u);
587  rrg_k_ = std::ceil(k_rrt_ * log(cardDbl));
588  rrg_r_ = std::min(maxDistance_,
589  r_rrt_ * std::pow(log(cardDbl) / cardDbl, 1 / static_cast<double>(si_->getStateDimension())));
590 }
591 
593 {
594  if (motion->nbh.size() > 0)
595  {
596  return;
597  }
598 
599  std::vector<Motion *> nbh;
600  if (useKNearest_)
601  {
602  //- k-nearest RRT*
603  nn_->nearestK(motion, rrg_k_, nbh);
604  }
605  else
606  {
607  nn_->nearestR(motion, rrg_r_, nbh);
608  }
609 
610  motion->nbh.resize(nbh.size());
611  std::transform(nbh.begin(), nbh.end(), motion->nbh.begin(),
612  [](Motion *m) { return std::pair<Motion *, bool>(m, false); });
613 }
614 
616 {
617  switch (variant_)
618  {
619  case 1:
620  return opt_->isCostBetterThan(mc_.alphaCostPlusHeuristic(x, alpha_), opt_->infiniteCost()); // Always true?
621  case 2:
622  return opt_->isCostBetterThan(mc_.alphaCostPlusHeuristic(x->parent, alpha_), bestCost_);
623  case 3:
624  return opt_->isCostBetterThan(mc_.alphaCostPlusHeuristic(x, alpha_), bestCost_);
625  default: // no rejection
626  return true;
627  }
628 }
629 
631 {
632  if (nn_)
633  {
634  std::vector<Motion *> motions;
635  nn_->list(motions);
636  for (auto &motion : motions)
637  {
638  if (motion->state)
639  si_->freeState(motion->state);
640  delete motion;
641  }
642  }
643 }
644 
646 {
647  Planner::getPlannerData(data);
648 
649  std::vector<Motion *> motions;
650  if (nn_)
651  nn_->list(motions);
652 
653  if (lastGoalMotion_)
654  data.addGoalVertex(base::PlannerDataVertex(lastGoalMotion_->state));
655 
656  for (auto &motion : motions)
657  {
658  if (motion->parent == nullptr)
659  data.addStartVertex(base::PlannerDataVertex(motion->state));
660  else
661  data.addEdge(base::PlannerDataVertex(motion->parent->state), base::PlannerDataVertex(motion->state));
662  }
663 }
664 
666 {
667  if (static_cast<bool>(opt_) == true)
668  {
669  if (opt_->hasCostToGoHeuristic() == false)
670  {
671  OMPL_INFORM("%s: No cost-to-go heuristic set. Informed techniques will not work well.", getName().c_str());
672  }
673  }
674 
675  // This option is mutually exclusive with setSampleRejection, assert that:
676  if (informedSampling == true && useRejectionSampling_ == true)
677  {
678  OMPL_ERROR("%s: InformedSampling and SampleRejection are mutually exclusive options.", getName().c_str());
679  }
680 
681  // Check if we're changing the setting of informed sampling. If we are, we will need to create a new sampler, which
682  // we only want to do if one is already allocated.
683  if (informedSampling != useInformedSampling_)
684  {
685  // Store the value
686  useInformedSampling_ = informedSampling;
687 
688  // If we currently have a sampler, we need to make a new one
689  if (sampler_ || infSampler_)
690  {
691  // Reset the samplers
692  sampler_.reset();
693  infSampler_.reset();
694 
695  // Create the sampler
696  allocSampler();
697  }
698  }
699 }
700 
702 {
703  if (static_cast<bool>(opt_) == true)
704  {
705  if (opt_->hasCostToGoHeuristic() == false)
706  {
707  OMPL_INFORM("%s: No cost-to-go heuristic set. Informed techniques will not work well.", getName().c_str());
708  }
709  }
710 
711  // This option is mutually exclusive with setSampleRejection, assert that:
712  if (reject == true && useInformedSampling_ == true)
713  {
714  OMPL_ERROR("%s: InformedSampling and SampleRejection are mutually exclusive options.", getName().c_str());
715  }
716 
717  // Check if we're changing the setting of rejection sampling. If we are, we will need to create a new sampler, which
718  // we only want to do if one is already allocated.
719  if (reject != useRejectionSampling_)
720  {
721  // Store the setting
722  useRejectionSampling_ = reject;
723 
724  // If we currently have a sampler, we need to make a new one
725  if (sampler_ || infSampler_)
726  {
727  // Reset the samplers
728  sampler_.reset();
729  infSampler_.reset();
730 
731  // Create the sampler
732  allocSampler();
733  }
734  }
735 }
736 
738 {
739  // Allocate the appropriate type of sampler.
740  if (useInformedSampling_)
741  {
742  // We are using informed sampling, this can end-up reverting to rejection sampling in some cases
743  OMPL_INFORM("%s: Using informed sampling.", getName().c_str());
744  infSampler_ = opt_->allocInformedStateSampler(pdef_, numSampleAttempts_);
745  }
746  else if (useRejectionSampling_)
747  {
748  // We are explicitly using rejection sampling.
749  OMPL_INFORM("%s: Using rejection sampling.", getName().c_str());
750  infSampler_ = std::make_shared<base::RejectionInfSampler>(pdef_, numSampleAttempts_);
751  }
752  else
753  {
754  // We are using a regular sampler
755  sampler_ = si_->allocStateSampler();
756  }
757 }
758 
760 {
761  // Use the appropriate sampler
762  if (useInformedSampling_ || useRejectionSampling_)
763  {
764  // Attempt the focused sampler and return the result.
765  // If bestCost is changing a lot by small amounts, this could
766  // be prunedCost_ to reduce the number of times the informed sampling
767  // transforms are recalculated.
768  return infSampler_->sampleUniform(statePtr, bestCost_);
769  }
770  else
771  {
772  // Simply return a state from the regular sampler
773  sampler_->sampleUniform(statePtr);
774 
775  // Always true
776  return true;
777  }
778 }
779 
781 {
782  auto dimDbl = static_cast<double>(si_->getStateDimension());
783 
784  // k_rrt > 2^(d + 1) * e * (1 + 1 / d). K-nearest RRT*
785  k_rrt_ = rewireFactor_ * (std::pow(2, dimDbl + 1) * boost::math::constants::e<double>() * (1.0 + 1.0 / dimDbl));
786 
787  // r_rrt > (2*(1+1/d))^(1/d)*(measure/ballvolume)^(1/d)
788  r_rrt_ = rewireFactor_ *
789  std::pow(2 * (1.0 + 1.0 / dimDbl) * (si_->getSpaceMeasure() / unitNBallMeasure(si_->getStateDimension())),
790  1.0 / dimDbl);
791 }
void configurePlannerRange(double &range)
Compute what a good length for motion segments is.
Definition: SelfConfig.cpp:225
void setVariant(const int variant)
Set variant used for rejection sampling.
Definition: RRTXstatic.h:347
double getEpsilon() const
Get the threshold epsilon the planner is using.
Definition: RRTXstatic.h:275
std::vector< std::pair< Motion *, bool > > nbh
The set of neighbors of this motion with a boolean indicating if the feasibility of edge as been test...
Definition: RRTXstatic.h:447
void setApproximate(double difference)
Specify that the solution is approximate and set the difference to the goal.
base::State * state
The state contained by the motion.
Definition: RRTXstatic.h:434
void setRange(double distance)
Set the range the planner is supposed to use.
Definition: RRTXstatic.h:285
Definition of an abstract state.
Definition: State.h:113
void setup() override
Perform extra configuration steps, if needed. This call will also issue a call to ompl::base::SpaceIn...
Definition: RRTXstatic.cpp:89
void allocSampler()
Create the samplers.
Definition: RRTXstatic.cpp:737
void addPlannerProgressProperty(const std::string &progressPropertyName, const PlannerProgressProperty &prop)
Add a planner progress property called progressPropertyName with a property querying function prop to...
Definition: Planner.h:467
This class contains methods that automatically configure various parameters for motion planning....
Definition: SelfConfig.h:123
bool getKNearest() const
Get the state of using a k-nearest search for rewiring.
Definition: RRTXstatic.h:329
bool getInformedSampling() const
Get the state direct heuristic sampling.
Definition: RRTXstatic.h:239
bool includeVertex(const Motion *x) const
Test if the vertex should be included according to the variant in use.
Definition: RRTXstatic.cpp:615
void setAlpha(const double a)
Set the value alpha used for rejection sampling.
Definition: RRTXstatic.h:361
double value() const
The value of the cost.
Definition: Cost.h:152
void getNeighbors(Motion *motion) const
Gets the neighbours of a given motion, using either k-nearest of radius as appropriate.
Definition: RRTXstatic.cpp:592
void setUpdateChildren(bool val)
Set whether or not to always propagate cost updates to children.
Definition: RRTXstatic.h:335
Representation of a solution to a planning problem.
void log(const char *file, int line, LogLevel level, const char *m,...)
Root level logging function. This should not be invoked directly, but rather used via a logging macro...
Definition: Console.cpp:120
Representation of a motion (node of the tree)
Definition: RRTXstatic.h:422
Definition of a cost value. Can represent the cost of a motion or the cost of a state.
Definition: Cost.h:111
Motion * parent
The parent motion in the exploration tree.
Definition: RRTXstatic.h:437
#define OMPL_INFORM(fmt,...)
Log a formatted information string.
Definition: Console.h:68
void setGoalBias(double goalBias)
Set the goal bias.
Definition: RRTXstatic.h:223
void calculateRewiringLowerBounds()
Calculate the k_RRG* and r_RRG* terms.
Definition: RRTXstatic.cpp:780
void setRewireFactor(double rewireFactor)
Set the rewiring scale factor, s, such that r_rrg = s \times r_rrg* (or k_rrg = s \times k_rrg*)
Definition: RRTXstatic.h:298
Object containing planner generated vertex and edge data. It is assumed that all vertices are unique,...
Definition: PlannerData.h:238
double unitNBallMeasure(unsigned int N)
The Lebesgue measure (i.e., "volume") of an n-dimensional ball with a unit radius.
void setSampleRejection(bool reject)
Controls whether heuristic rejection is used on samples (e.g., x_rand)
Definition: RRTXstatic.cpp:701
Encapsulate a termination condition for a motion planner. Planners will call operator() to decide whe...
void setNumSamplingAttempts(unsigned int numAttempts)
Set the number of attempts to make while performing rejection or informed sampling.
Definition: RRTXstatic.h:254
PlannerSpecs specs_
The specifications of the planner (its capabilities)
Definition: Planner.h:486
void setKNearest(bool useKNearest)
Use a k-nearest search for rewiring instead of a r-disc search.
Definition: RRTXstatic.h:323
bool getSampleRejection() const
Get the state of the sample rejection option.
Definition: RRTXstatic.h:248
base::Cost cost
The cost up to this motion.
Definition: RRTXstatic.h:440
bool optimizingPaths
Flag indicating whether the planner attempts to optimize the path and reduce its length until the max...
Definition: Planner.h:263
double getAlpha() const
Get the value alpha used for rejection sampling.
Definition: RRTXstatic.h:367
A class to store the exit status of Planner::solve()
bool canReportIntermediateSolutions
Flag indicating whether the planner is able to report the computation of intermediate paths.
Definition: Planner.h:275
void removeFromParent(Motion *m)
Removes the given motion from the parent's child list.
Definition: RRTXstatic.cpp:572
BinaryHeap< Motion *, MotionCompare >::Element * handle
Handle to identify the motion in the queue.
Definition: RRTXstatic.h:450
void setInformedSampling(bool informedSampling)
Use direct sampling of the heuristic for the generation of random samples (e.g., x_rand)....
Definition: RRTXstatic.cpp:665
bool sampleUniform(base::State *statePtr)
Generate a sample.
Definition: RRTXstatic.cpp:759
Abstract definition of goals.
Definition: Goal.h:126
void getPlannerData(base::PlannerData &data) const override
Get information about the current run of the motion planner. Repeated calls to this function will upd...
Definition: RRTXstatic.cpp:645
unsigned int getNumSamplingAttempts() const
Get the number of attempts to make while performing rejection or informed sampling.
Definition: RRTXstatic.h:260
#define OMPL_WARN(fmt,...)
Log a formatted warning string.
Definition: Console.h:66
Defines the operator to compare motions.
Definition: RRTXstatic.h:386
virtual void setEpsilon(double epsilon)
Set the threshold epsilon.
Definition: RRTXstatic.h:269
bool getUpdateChildren() const
True if the cost is always propagate to children.
Definition: RRTXstatic.h:341
double getRewireFactor() const
Set the rewiring scale factor, s, such that r_rrg = s \times r_rrg* > r_rrg* (or k_rrg = s \times k_r...
Definition: RRTXstatic.h:306
void setOptimized(const OptimizationObjectivePtr &opt, Cost cost, bool meetsObjective)
Set the optimization objective used to optimize this solution, the cost of the solution and whether i...
double getRange() const
Get the range the planner is using.
Definition: RRTXstatic.h:291
std::function< void(const Planner *, const std::vector< const base::State * > &, const Cost)> ReportIntermediateSolutionFn
When a planner has an intermediate solution (e.g., optimizing planners), a function with this signatu...
unsigned int addStartVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
void updateQueue(Motion *x)
Update (or add) a motion in the queue.
Definition: RRTXstatic.cpp:559
void clear() override
Clear all internal datastructures. Planner settings are not affected. Subsequent calls to solve() wil...
Definition: RRTXstatic.cpp:137
#define OMPL_ERROR(fmt,...)
Log a formatted error string.
Definition: Console.h:64
void terminate() const
Notify that the condition for termination should become true, regardless of what eval() returns....
bool approximateSolutions
Flag indicating whether the planner is able to compute approximate solutions.
Definition: Planner.h:259
void calculateRRG()
Calculate the rrg_r_ and rrg_k_ terms.
Definition: RRTXstatic.cpp:584
void setPlannerName(const std::string &name)
Set the name of the planner used to compute this solution.
virtual bool addEdge(unsigned int v1, unsigned int v2, const PlannerDataEdge &edge=PlannerDataEdge(), Cost weight=Cost(1.0))
Adds a directed edge between the given vertex indexes. An optional edge structure and weight can be s...
unsigned int addGoalVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
Abstract definition of a goal region that can be sampled.
base::PlannerStatus solve(const base::PlannerTerminationCondition &ptc) override
Function that can solve the motion planning problem. This function can be called multiple times on th...
Definition: RRTXstatic.cpp:154
@ INVALID_START
Invalid start state or no start state specified.
double getGoalBias() const
Get the goal bias the planner is using.
Definition: RRTXstatic.h:229
Base class for a vertex in the PlannerData structure. All derived classes must implement the clone an...
Definition: PlannerData.h:122
int getVariant() const
Get variant used for rejection sampling.
Definition: RRTXstatic.h:355
void freeMemory()
Free the memory allocated by this planner.
Definition: RRTXstatic.cpp:630
std::vector< Motion * > children
The set of motions descending from the current motion.
Definition: RRTXstatic.h:443