TRRT.cpp
1 /*********************************************************************
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2008, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of the Willow Garage nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *********************************************************************/
34 
35 /* Author: Dave Coleman, Ryan Luna */
36 
37 #include "ompl/geometric/planners/rrt/TRRT.h"
38 #include "ompl/base/objectives/MechanicalWorkOptimizationObjective.h"
39 #include "ompl/base/goals/GoalSampleableRegion.h"
40 #include "ompl/tools/config/SelfConfig.h"
41 #include "ompl/tools/config/MagicConstants.h"
42 #include <limits>
43 
44 ompl::geometric::TRRT::TRRT(const base::SpaceInformationPtr &si) : base::Planner(si, "TRRT")
45 {
46  // Standard RRT Variables
48  specs_.directed = true;
49 
50  Planner::declareParam<double>("range", this, &TRRT::setRange, &TRRT::getRange, "0.:1.:10000.");
51  Planner::declareParam<double>("goal_bias", this, &TRRT::setGoalBias, &TRRT::getGoalBias, "0.:.05:1.");
52 
53  // TRRT Specific Variables
54  frontierThreshold_ = 0.0; // set in setup()
55  setTempChangeFactor(0.1); // how much to increase the temp each time
56  costThreshold_ = base::Cost(std::numeric_limits<double>::infinity());
57  initTemperature_ = 100; // where the temperature starts out
58  frontierNodeRatio_ = 0.1; // 1/10, or 1 nonfrontier for every 10 frontier
59 
60  Planner::declareParam<double>("temp_change_factor", this, &TRRT::setTempChangeFactor, &TRRT::getTempChangeFactor,
61  "0.:.1:1.");
62  Planner::declareParam<double>("init_temperature", this, &TRRT::setInitTemperature, &TRRT::getInitTemperature);
63  Planner::declareParam<double>("frontier_threshold", this, &TRRT::setFrontierThreshold, &TRRT::getFrontierThreshold);
64  Planner::declareParam<double>("frontier_node_ratio", this, &TRRT::setFrontierNodeRatio, &TRRT::getFrontierNodeRatio);
65  Planner::declareParam<double>("cost_threshold", this, &TRRT::setCostThreshold, &TRRT::getCostThreshold);
66 }
67 
68 ompl::geometric::TRRT::~TRRT()
69 {
70  freeMemory();
71 }
72 
74 {
75  Planner::clear();
76  sampler_.reset();
77  freeMemory();
78  if (nearestNeighbors_)
79  nearestNeighbors_->clear();
80  lastGoalMotion_ = nullptr;
81 
82  // Clear TRRT specific variables ---------------------------------------------------------
83  temp_ = initTemperature_;
84  nonfrontierCount_ = 1;
85  frontierCount_ = 1; // init to 1 to prevent division by zero error
86  if (opt_)
87  bestCost_ = worstCost_ = opt_->identityCost();
88 }
89 
91 {
92  Planner::setup();
93  tools::SelfConfig selfConfig(si_, getName());
94 
95  if (!pdef_ || !pdef_->hasOptimizationObjective())
96  {
97  OMPL_INFORM("%s: No optimization objective specified. Defaulting to mechanical work minimization.",
98  getName().c_str());
99  opt_ = std::make_shared<base::MechanicalWorkOptimizationObjective>(si_);
100  }
101  else
102  opt_ = pdef_->getOptimizationObjective();
103 
104  // Set maximum distance a new node can be from its nearest neighbor
105  if (maxDistance_ < std::numeric_limits<double>::epsilon())
106  {
107  selfConfig.configurePlannerRange(maxDistance_);
109  }
110 
111  // Set the threshold that decides if a new node is a frontier node or non-frontier node
112  if (frontierThreshold_ < std::numeric_limits<double>::epsilon())
113  {
114  frontierThreshold_ = si_->getMaximumExtent() * 0.01;
115  OMPL_DEBUG("%s: Frontier threshold detected to be %lf", getName().c_str(), frontierThreshold_);
116  }
117 
118  // Create the nearest neighbor function the first time setup is run
119  if (!nearestNeighbors_)
120  nearestNeighbors_.reset(tools::SelfConfig::getDefaultNearestNeighbors<Motion *>(this));
121 
122  // Set the distance function
123  nearestNeighbors_->setDistanceFunction([this](const Motion *a, const Motion *b)
124  {
125  return distanceFunction(a, b);
126  });
127 
128  // Setup TRRT specific variables ---------------------------------------------------------
129  temp_ = initTemperature_;
130  nonfrontierCount_ = 1;
131  frontierCount_ = 1; // init to 1 to prevent division by zero error
132  bestCost_ = worstCost_ = opt_->identityCost();
133 }
134 
136 {
137  // Delete all motions, states and the nearest neighbors data structure
138  if (nearestNeighbors_)
139  {
140  std::vector<Motion *> motions;
141  nearestNeighbors_->list(motions);
142  for (auto &motion : motions)
143  {
144  if (motion->state)
145  si_->freeState(motion->state);
146  delete motion;
147  }
148  }
149 }
150 
153 {
154  // Basic error checking
155  checkValidity();
156 
157  // Goal information
158  base::Goal *goal = pdef_->getGoal().get();
159  auto *goalRegion = dynamic_cast<base::GoalSampleableRegion *>(goal);
160 
161  // Input States ---------------------------------------------------------------------------------
162 
163  // Loop through valid input states and add to tree
164  while (const base::State *state = pis_.nextStart())
165  {
166  // Allocate memory for a new start state motion based on the "space-information"-size
167  auto *motion = new Motion(si_);
168 
169  // Copy destination <= source
170  si_->copyState(motion->state, state);
171 
172  // Set cost for this start state
173  motion->cost = opt_->stateCost(motion->state);
174 
175  if (nearestNeighbors_->size() == 0) // do not overwrite best/worst from previous call to solve
176  worstCost_ = bestCost_ = motion->cost;
177 
178  // Add start motion to the tree
179  nearestNeighbors_->add(motion);
180  }
181 
182  // Check that input states exist
183  if (nearestNeighbors_->size() == 0)
184  {
185  OMPL_ERROR("%s: There are no valid initial states!", getName().c_str());
187  }
188 
189  // Create state sampler if this is TRRT's first run
190  if (!sampler_)
191  sampler_ = si_->allocStateSampler();
192 
193  // Debug
194  OMPL_INFORM("%s: Starting planning with %u states already in datastructure", getName().c_str(),
195  nearestNeighbors_->size());
196 
197  // Solver variables ------------------------------------------------------------------------------------
198 
199  // the final solution
200  Motion *solution = nullptr;
201  // the approximate solution, returned if no final solution found
202  Motion *approxSolution = nullptr;
203  // track the distance from goal to closest solution yet found
204  double approxDifference = std::numeric_limits<double>::infinity();
205 
206  // distance between states - the intial state and the interpolated state (may be the same)
207  double randMotionDistance;
208 
209  // Create random motion and a pointer (for optimization) to its state
210  auto *randMotion = new Motion(si_);
211  Motion *nearMotion;
212 
213  // STATES
214  // The random state
215  base::State *randState = randMotion->state;
216  // The new state that is generated between states *to* and *from*
217  base::State *interpolatedState = si_->allocState(); // Allocates "space information"-sized memory for a state
218  // The chosen state btw rand_state and interpolated_state
219  base::State *newState;
220 
221  // Begin sampling --------------------------------------------------------------------------------------
222  while (plannerTerminationCondition() == false)
223  {
224  // I.
225 
226  // Sample random state (with goal biasing probability)
227  if (goalRegion && rng_.uniform01() < goalBias_ && goalRegion->canSample())
228  {
229  // Bias sample towards goal
230  goalRegion->sampleGoal(randState);
231  }
232  else
233  {
234  // Uniformly Sample
235  sampler_->sampleUniform(randState);
236  }
237 
238  // II.
239 
240  // Find closest state in the tree
241  nearMotion = nearestNeighbors_->nearest(randMotion);
242 
243  // III.
244 
245  // Distance from near state q_n to a random state
246  randMotionDistance = si_->distance(nearMotion->state, randState);
247 
248  // Check if the rand_state is too far away
249  if (randMotionDistance > maxDistance_)
250  {
251  // Computes the state that lies at time t in [0, 1] on the segment that connects *from* state to *to* state.
252  // The memory location of *state* is not required to be different from the memory of either *from* or *to*.
253  si_->getStateSpace()->interpolate(nearMotion->state, randState, maxDistance_ / randMotionDistance,
254  interpolatedState);
255 
256  // Update the distance between near and new with the interpolated_state
257  randMotionDistance = si_->distance(nearMotion->state, interpolatedState);
258 
259  // Use the interpolated state as the new state
260  newState = interpolatedState;
261  }
262  else // Random state is close enough
263  newState = randState;
264 
265  // IV.
266  // this stage integrates collision detections in the presence of obstacles and checks for collisions
267  if (!si_->checkMotion(nearMotion->state, newState))
268  continue; // try a new sample
269 
270  // Minimum Expansion Control
271  // A possible side effect may appear when the tree expansion toward unexplored regions remains slow, and the
272  // new nodes contribute only to refine already explored regions.
273  if (!minExpansionControl(randMotionDistance))
274  continue; // give up on this one and try a new sample
275 
276  base::Cost childCost = opt_->stateCost(newState);
277 
278  // Only add this motion to the tree if the transition test accepts it
279  if (!transitionTest(opt_->motionCost(nearMotion->state, newState)))
280  continue; // give up on this one and try a new sample
281 
282  // V.
283 
284  // Create a motion
285  auto *motion = new Motion(si_);
286  si_->copyState(motion->state, newState);
287  motion->parent = nearMotion; // link q_new to q_near as an edge
288  motion->cost = childCost;
289 
290  // Add motion to data structure
291  nearestNeighbors_->add(motion);
292 
293  if (opt_->isCostBetterThan(motion->cost, bestCost_)) // motion->cost is better than the existing best
294  bestCost_ = motion->cost;
295  if (opt_->isCostBetterThan(worstCost_, motion->cost)) // motion->cost is worse than the existing worst
296  worstCost_ = motion->cost;
297 
298  // VI.
299 
300  // Check if this motion is the goal
301  double distToGoal = 0.0;
302  bool isSatisfied = goal->isSatisfied(motion->state, &distToGoal);
303  if (isSatisfied)
304  {
305  approxDifference = distToGoal; // the tolerated error distance btw state and goal
306  solution = motion; // set the final solution
307  break;
308  }
309 
310  // Is this the closest solution we've found so far
311  if (distToGoal < approxDifference)
312  {
313  approxDifference = distToGoal;
314  approxSolution = motion;
315  }
316 
317  } // end of solver sampling loop
318 
319  // Finish solution processing --------------------------------------------------------------------
320 
321  bool solved = false;
322  bool approximate = false;
323 
324  // Substitute an empty solution with the best approximation
325  if (solution == nullptr)
326  {
327  solution = approxSolution;
328  approximate = true;
329  }
330 
331  // Generate solution path for real/approx solution
332  if (solution != nullptr)
333  {
334  lastGoalMotion_ = solution;
335 
336  // construct the solution path
337  std::vector<Motion *> mpath;
338  while (solution != nullptr)
339  {
340  mpath.push_back(solution);
341  solution = solution->parent;
342  }
343 
344  // set the solution path
345  auto path(std::make_shared<PathGeometric>(si_));
346  for (int i = mpath.size() - 1; i >= 0; --i)
347  path->append(mpath[i]->state);
348 
349  pdef_->addSolutionPath(path, approximate, approxDifference, getName());
350  solved = true;
351  }
352 
353  // Clean up ---------------------------------------------------------------------------------------
354 
355  si_->freeState(interpolatedState);
356  if (randMotion->state)
357  si_->freeState(randMotion->state);
358  delete randMotion;
359 
360  OMPL_INFORM("%s: Created %u states", getName().c_str(), nearestNeighbors_->size());
361 
362  return {solved, approximate};
363 }
364 
366 {
367  Planner::getPlannerData(data);
368 
369  std::vector<Motion *> motions;
370  if (nearestNeighbors_)
371  nearestNeighbors_->list(motions);
372 
373  if (lastGoalMotion_)
374  data.addGoalVertex(base::PlannerDataVertex(lastGoalMotion_->state));
375 
376  for (auto &motion : motions)
377  {
378  if (motion->parent == nullptr)
379  data.addStartVertex(base::PlannerDataVertex(motion->state));
380  else
381  data.addEdge(base::PlannerDataVertex(motion->parent->state), base::PlannerDataVertex(motion->state));
382  }
383 }
384 
386 {
387  // Disallow any cost that is not better than the cost threshold
388  if (!opt_->isCostBetterThan(motionCost, costThreshold_))
389  return false;
390 
391  // Always accept if the cost is near or below zero
392  if (motionCost.value() < 1e-4)
393  return true;
394 
395  double dCost = motionCost.value();
396  double transitionProbability = exp(-dCost / temp_);
397  if (transitionProbability > 0.5)
398  {
399  double costRange = worstCost_.value() - bestCost_.value();
400  if (fabs(costRange) > 1e-4) // Do not divide by zero
401  // Successful transition test. Decrease the temperature slightly
402  temp_ /= exp(dCost / (0.1 * costRange));
403 
404  return true;
405  }
406 
407  // The transition failed. Increase the temperature (slightly)
408  temp_ *= tempChangeFactor_;
409  return false;
410 }
411 
412 bool ompl::geometric::TRRT::minExpansionControl(double randMotionDistance)
413 {
414  if (randMotionDistance > frontierThreshold_)
415  {
416  // participates in the tree expansion
417  ++frontierCount_;
418 
419  return true;
420  }
421  else
422  {
423  // participates in the tree refinement
424 
425  // check our ratio first before accepting it
426  if ((double)nonfrontierCount_ / (double)frontierCount_ > frontierNodeRatio_)
427  // reject this node as being too much refinement
428  return false;
429 
430  ++nonfrontierCount_;
431  return true;
432  }
433 }
void configurePlannerRange(double &range)
Compute what a good length for motion segments is.
Definition: SelfConfig.cpp:225
double getGoalBias() const
Get the goal bias the planner is using.
Definition: TRRT.h:208
base::Cost costThreshold_
All motion costs must be better than this cost (default is infinity)
Definition: TRRT.h:397
double getRange() const
Get the range the planner is using.
Definition: TRRT.h:224
double initTemperature_
The initial value of temp_.
Definition: TRRT.h:404
void freeMemory()
Free the memory allocated by this planner.
Definition: TRRT.cpp:135
void setCostThreshold(double maxCost)
Set the cost threshold (default is infinity). Any motion cost that is not better than this cost (acco...
Definition: TRRT.h:248
Definition of an abstract state.
Definition: State.h:113
Representation of a motion.
Definition: TRRT.h:321
This class contains methods that automatically configure various parameters for motion planning....
Definition: SelfConfig.h:123
double getInitTemperature() const
Get the temperature at the start of planning.
Definition: TRRT.h:270
void setup() override
Perform extra configuration steps, if needed. This call will also issue a call to ompl::base::SpaceIn...
Definition: TRRT.cpp:90
double value() const
The value of the cost.
Definition: Cost.h:152
double getFrontierThreshold() const
Get the distance between a new state and the nearest neighbor that qualifies that state as being a fr...
Definition: TRRT.h:284
Definition of a cost value. Can represent the cost of a motion or the cost of a state.
Definition: Cost.h:111
#define OMPL_INFORM(fmt,...)
Log a formatted information string.
Definition: Console.h:68
double getTempChangeFactor() const
Get the factor by which the temperature rises based on current acceptance/rejection rate.
Definition: TRRT.h:240
void setGoalBias(double goalBias)
Set the goal bias.
Definition: TRRT.h:202
Object containing planner generated vertex and edge data. It is assumed that all vertices are unique,...
Definition: PlannerData.h:238
double getFrontierNodeRatio() const
Get the ratio between adding nonfrontier nodes to frontier nodes, for example .1 is 1/10 or one nonfr...
Definition: TRRT.h:298
Encapsulate a termination condition for a motion planner. Planners will call operator() to decide whe...
double frontierThreshold_
The distance between an old state and a new state that qualifies it as a frontier state.
Definition: TRRT.h:415
PlannerSpecs specs_
The specifications of the planner (its capabilities)
Definition: Planner.h:486
void setFrontierNodeRatio(double frontierNodeRatio)
Set the ratio between adding nonfrontier nodes to frontier nodes, for example .1 is 1/10 or one nonfr...
Definition: TRRT.h:291
bool directed
Flag indicating whether the planner is able to account for the fact that the validity of a motion fro...
Definition: Planner.h:269
void setFrontierThreshold(double frontier_threshold)
Set the distance between a new state and the nearest neighbor that qualifies that state as being a fr...
Definition: TRRT.h:277
void setInitTemperature(double initTemperature)
Set the initial temperature at the beginning of the algorithm. Should be high to allow for initial ex...
Definition: TRRT.h:263
void setTempChangeFactor(double factor)
Set the factor by which the temperature is increased after a failed transition test....
Definition: TRRT.h:234
A class to store the exit status of Planner::solve()
void clear() override
Clear all internal datastructures. Planner settings are not affected. Subsequent calls to solve() wil...
Definition: TRRT.cpp:73
double frontierNodeRatio_
Target ratio of non-frontier nodes to frontier nodes. rho.
Definition: TRRT.h:418
TRRT(const base::SpaceInformationPtr &si)
Constructor.
Definition: TRRT.cpp:44
base::PlannerStatus solve(const base::PlannerTerminationCondition &plannerTerminationCondition) override
Function that can solve the motion planning problem. This function can be called multiple times on th...
Definition: TRRT.cpp:152
Abstract definition of goals.
Definition: Goal.h:126
base::State * state
The state contained by the motion.
Definition: TRRT.h:334
void getPlannerData(base::PlannerData &data) const override
Get information about the current run of the motion planner. Repeated calls to this function will upd...
Definition: TRRT.cpp:365
unsigned int addStartVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
double getCostThreshold() const
Get the cost threshold (default is infinity). Any motion cost that is not better than this cost (acco...
Definition: TRRT.h:256
base::Cost cost
Cost of the state.
Definition: TRRT.h:340
#define OMPL_ERROR(fmt,...)
Log a formatted error string.
Definition: Console.h:64
bool approximateSolutions
Flag indicating whether the planner is able to compute approximate solutions.
Definition: Planner.h:259
bool minExpansionControl(double randMotionDistance)
Use ratio to prefer frontier nodes to nonfrontier ones.
Definition: TRRT.cpp:412
virtual bool addEdge(unsigned int v1, unsigned int v2, const PlannerDataEdge &edge=PlannerDataEdge(), Cost weight=Cost(1.0))
Adds a directed edge between the given vertex indexes. An optional edge structure and weight can be s...
unsigned int addGoalVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
Abstract definition of a goal region that can be sampled.
bool transitionTest(const base::Cost &motionCost)
Filter irrelevant configuration regarding the search of low-cost paths before inserting into tree.
Definition: TRRT.cpp:385
@ INVALID_START
Invalid start state or no start state specified.
#define OMPL_DEBUG(fmt,...)
Log a formatted debugging string.
Definition: Console.h:70
Base class for a vertex in the PlannerData structure. All derived classes must implement the clone an...
Definition: PlannerData.h:122
Motion * parent
The parent motion in the exploration tree.
Definition: TRRT.h:337
void setRange(double distance)
Set the range the planner is supposed to use.
Definition: TRRT.h:218
static const double COST_MAX_MOTION_LENGTH_AS_SPACE_EXTENT_FRACTION
For cost-based planners it has been observed that smaller ranges are typically suitable....