TRRT.cpp
1 /*********************************************************************
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2008, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of the Willow Garage nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *********************************************************************/
34 
35 /* Author: Dave Coleman, Ryan Luna */
36 
37 #include "ompl/geometric/planners/rrt/TRRT.h"
38 #include "ompl/base/objectives/MechanicalWorkOptimizationObjective.h"
39 #include "ompl/base/goals/GoalSampleableRegion.h"
40 #include "ompl/tools/config/SelfConfig.h"
41 #include "ompl/tools/config/MagicConstants.h"
42 #include <limits>
43 
44 ompl::geometric::TRRT::TRRT(const base::SpaceInformationPtr &si) : base::Planner(si, "TRRT")
45 {
46  // Standard RRT Variables
48  specs_.directed = true;
49 
50  goalBias_ = 0.05;
51  maxDistance_ = 0.0; // set in setup()
52  lastGoalMotion_ = nullptr;
53 
54  Planner::declareParam<double>("range", this, &TRRT::setRange, &TRRT::getRange, "0.:1.:10000.");
55  Planner::declareParam<double>("goal_bias", this, &TRRT::setGoalBias, &TRRT::getGoalBias, "0.:.05:1.");
56 
57  // TRRT Specific Variables
58  frontierThreshold_ = 0.0; // set in setup()
59  setTempChangeFactor(0.1); // how much to increase the temp each time
60  costThreshold_ = base::Cost(std::numeric_limits<double>::infinity());
61  initTemperature_ = 100; // where the temperature starts out
62  frontierNodeRatio_ = 0.1; // 1/10, or 1 nonfrontier for every 10 frontier
63 
64  Planner::declareParam<double>("temp_change_factor", this, &TRRT::setTempChangeFactor, &TRRT::getTempChangeFactor,
65  "0.:.1:1.");
66  Planner::declareParam<double>("init_temperature", this, &TRRT::setInitTemperature, &TRRT::getInitTemperature);
67  Planner::declareParam<double>("frontier_threshold", this, &TRRT::setFrontierThreshold, &TRRT::getFrontierThreshold);
68  Planner::declareParam<double>("frontierNodeRatio", this, &TRRT::setFrontierNodeRatio, &TRRT::getFrontierNodeRatio);
69  Planner::declareParam<double>("cost_threshold", this, &TRRT::setCostThreshold, &TRRT::getCostThreshold);
70 }
71 
72 ompl::geometric::TRRT::~TRRT()
73 {
74  freeMemory();
75 }
76 
78 {
79  Planner::clear();
80  sampler_.reset();
81  freeMemory();
83  nearestNeighbors_->clear();
84  lastGoalMotion_ = nullptr;
85 
86  // Clear TRRT specific variables ---------------------------------------------------------
89  frontierCount_ = 1; // init to 1 to prevent division by zero error
90  if (opt_)
91  bestCost_ = worstCost_ = opt_->identityCost();
92 }
93 
95 {
96  Planner::setup();
97  tools::SelfConfig selfConfig(si_, getName());
98 
99  if (!pdef_ || !pdef_->hasOptimizationObjective())
100  {
101  OMPL_INFORM("%s: No optimization objective specified. Defaulting to mechanical work minimization.",
102  getName().c_str());
103  opt_ = std::make_shared<base::MechanicalWorkOptimizationObjective>(si_);
104  }
105  else
106  opt_ = pdef_->getOptimizationObjective();
107 
108  // Set maximum distance a new node can be from its nearest neighbor
109  if (maxDistance_ < std::numeric_limits<double>::epsilon())
110  {
113  }
114 
115  // Set the threshold that decides if a new node is a frontier node or non-frontier node
116  if (frontierThreshold_ < std::numeric_limits<double>::epsilon())
117  {
118  frontierThreshold_ = si_->getMaximumExtent() * 0.01;
119  OMPL_DEBUG("%s: Frontier threshold detected to be %lf", getName().c_str(), frontierThreshold_);
120  }
121 
122  // Create the nearest neighbor function the first time setup is run
123  if (!nearestNeighbors_)
124  nearestNeighbors_.reset(tools::SelfConfig::getDefaultNearestNeighbors<Motion *>(this));
125 
126  // Set the distance function
127  nearestNeighbors_->setDistanceFunction([this](const Motion *a, const Motion *b)
128  {
129  return distanceFunction(a, b);
130  });
131 
132  // Setup TRRT specific variables ---------------------------------------------------------
134  nonfrontierCount_ = 1;
135  frontierCount_ = 1; // init to 1 to prevent division by zero error
136  bestCost_ = worstCost_ = opt_->identityCost();
137 }
138 
140 {
141  // Delete all motions, states and the nearest neighbors data structure
142  if (nearestNeighbors_)
143  {
144  std::vector<Motion *> motions;
145  nearestNeighbors_->list(motions);
146  for (auto &motion : motions)
147  {
148  if (motion->state)
149  si_->freeState(motion->state);
150  delete motion;
151  }
152  }
153 }
154 
157 {
158  // Basic error checking
159  checkValidity();
160 
161  // Goal information
162  base::Goal *goal = pdef_->getGoal().get();
163  base::GoalSampleableRegion *goalRegion = dynamic_cast<base::GoalSampleableRegion *>(goal);
164 
165  // Input States ---------------------------------------------------------------------------------
166 
167  // Loop through valid input states and add to tree
168  while (const base::State *state = pis_.nextStart())
169  {
170  // Allocate memory for a new start state motion based on the "space-information"-size
171  auto *motion = new Motion(si_);
172 
173  // Copy destination <= source
174  si_->copyState(motion->state, state);
175 
176  // Set cost for this start state
177  motion->cost = opt_->stateCost(motion->state);
178 
179  if (nearestNeighbors_->size() == 0) // do not overwrite best/worst from previous call to solve
180  worstCost_ = bestCost_ = motion->cost;
181 
182  // Add start motion to the tree
183  nearestNeighbors_->add(motion);
184  }
185 
186  // Check that input states exist
187  if (nearestNeighbors_->size() == 0)
188  {
189  OMPL_ERROR("%s: There are no valid initial states!", getName().c_str());
191  }
192 
193  // Create state sampler if this is TRRT's first run
194  if (!sampler_)
195  sampler_ = si_->allocStateSampler();
196 
197  // Debug
198  OMPL_INFORM("%s: Starting planning with %u states already in datastructure", getName().c_str(),
199  nearestNeighbors_->size());
200 
201  // Solver variables ------------------------------------------------------------------------------------
202 
203  // the final solution
204  Motion *solution = nullptr;
205  // the approximate solution, returned if no final solution found
206  Motion *approxSolution = nullptr;
207  // track the distance from goal to closest solution yet found
208  double approxDifference = std::numeric_limits<double>::infinity();
209 
210  // distance between states - the intial state and the interpolated state (may be the same)
211  double randMotionDistance;
212 
213  // Create random motion and a pointer (for optimization) to its state
214  auto *randMotion = new Motion(si_);
215  Motion *nearMotion;
216 
217  // STATES
218  // The random state
219  base::State *randState = randMotion->state;
220  // The new state that is generated between states *to* and *from*
221  base::State *interpolatedState = si_->allocState(); // Allocates "space information"-sized memory for a state
222  // The chosen state btw rand_state and interpolated_state
223  base::State *newState;
224 
225  // Begin sampling --------------------------------------------------------------------------------------
226  while (plannerTerminationCondition() == false)
227  {
228  // I.
229 
230  // Sample random state (with goal biasing probability)
231  if (goalRegion && rng_.uniform01() < goalBias_ && goalRegion->canSample())
232  {
233  // Bias sample towards goal
234  goalRegion->sampleGoal(randState);
235  }
236  else
237  {
238  // Uniformly Sample
239  sampler_->sampleUniform(randState);
240  }
241 
242  // II.
243 
244  // Find closest state in the tree
245  nearMotion = nearestNeighbors_->nearest(randMotion);
246 
247  // III.
248 
249  // Distance from near state q_n to a random state
250  randMotionDistance = si_->distance(nearMotion->state, randState);
251 
252  // Check if the rand_state is too far away
253  if (randMotionDistance > maxDistance_)
254  {
255  // Computes the state that lies at time t in [0, 1] on the segment that connects *from* state to *to* state.
256  // The memory location of *state* is not required to be different from the memory of either *from* or *to*.
257  si_->getStateSpace()->interpolate(nearMotion->state, randState, maxDistance_ / randMotionDistance,
258  interpolatedState);
259 
260  // Update the distance between near and new with the interpolated_state
261  randMotionDistance = si_->distance(nearMotion->state, interpolatedState);
262 
263  // Use the interpolated state as the new state
264  newState = interpolatedState;
265  }
266  else // Random state is close enough
267  newState = randState;
268 
269  // IV.
270  // this stage integrates collision detections in the presence of obstacles and checks for collisions
271  if (!si_->checkMotion(nearMotion->state, newState))
272  continue; // try a new sample
273 
274  // Minimum Expansion Control
275  // A possible side effect may appear when the tree expansion toward unexplored regions remains slow, and the
276  // new nodes contribute only to refine already explored regions.
277  if (!minExpansionControl(randMotionDistance))
278  continue; // give up on this one and try a new sample
279 
280  base::Cost childCost = opt_->stateCost(newState);
281 
282  // Only add this motion to the tree if the transition test accepts it
283  if (!transitionTest(opt_->motionCost(nearMotion->state, newState)))
284  continue; // give up on this one and try a new sample
285 
286  // V.
287 
288  // Create a motion
289  auto *motion = new Motion(si_);
290  si_->copyState(motion->state, newState);
291  motion->parent = nearMotion; // link q_new to q_near as an edge
292  motion->cost = childCost;
293 
294  // Add motion to data structure
295  nearestNeighbors_->add(motion);
296 
297  if (opt_->isCostBetterThan(motion->cost, bestCost_)) // motion->cost is better than the existing best
298  bestCost_ = motion->cost;
299  if (opt_->isCostBetterThan(worstCost_, motion->cost)) // motion->cost is worse than the existing worst
300  worstCost_ = motion->cost;
301 
302  // VI.
303 
304  // Check if this motion is the goal
305  double distToGoal = 0.0;
306  bool isSatisfied = goal->isSatisfied(motion->state, &distToGoal);
307  if (isSatisfied)
308  {
309  approxDifference = distToGoal; // the tolerated error distance btw state and goal
310  solution = motion; // set the final solution
311  break;
312  }
313 
314  // Is this the closest solution we've found so far
315  if (distToGoal < approxDifference)
316  {
317  approxDifference = distToGoal;
318  approxSolution = motion;
319  }
320 
321  } // end of solver sampling loop
322 
323  // Finish solution processing --------------------------------------------------------------------
324 
325  bool solved = false;
326  bool approximate = false;
327 
328  // Substitute an empty solution with the best approximation
329  if (solution == nullptr)
330  {
331  solution = approxSolution;
332  approximate = true;
333  }
334 
335  // Generate solution path for real/approx solution
336  if (solution != nullptr)
337  {
338  lastGoalMotion_ = solution;
339 
340  // construct the solution path
341  std::vector<Motion *> mpath;
342  while (solution != nullptr)
343  {
344  mpath.push_back(solution);
345  solution = solution->parent;
346  }
347 
348  // set the solution path
349  auto path(std::make_shared<PathGeometric>(si_));
350  for (int i = mpath.size() - 1; i >= 0; --i)
351  path->append(mpath[i]->state);
352 
353  pdef_->addSolutionPath(path, approximate, approxDifference, getName());
354  solved = true;
355  }
356 
357  // Clean up ---------------------------------------------------------------------------------------
358 
359  si_->freeState(interpolatedState);
360  if (randMotion->state)
361  si_->freeState(randMotion->state);
362  delete randMotion;
363 
364  OMPL_INFORM("%s: Created %u states", getName().c_str(), nearestNeighbors_->size());
365 
366  return base::PlannerStatus(solved, approximate);
367 }
368 
370 {
371  Planner::getPlannerData(data);
372 
373  std::vector<Motion *> motions;
374  if (nearestNeighbors_)
375  nearestNeighbors_->list(motions);
376 
377  if (lastGoalMotion_)
379 
380  for (auto &motion : motions)
381  {
382  if (motion->parent == nullptr)
383  data.addStartVertex(base::PlannerDataVertex(motion->state));
384  else
385  data.addEdge(base::PlannerDataVertex(motion->parent->state), base::PlannerDataVertex(motion->state));
386  }
387 }
388 
390 {
391  // Disallow any cost that is not better than the cost threshold
392  if (!opt_->isCostBetterThan(motionCost, costThreshold_))
393  return false;
394 
395  // Always accept if the cost is near or below zero
396  if (motionCost.value() < 1e-4)
397  return true;
398 
399  double dCost = motionCost.value();
400  double transitionProbability = exp(-dCost / temp_);
401  if (transitionProbability > 0.5)
402  {
403  double costRange = worstCost_.value() - bestCost_.value();
404  if (fabs(costRange) > 1e-4) // Do not divide by zero
405  // Successful transition test. Decrease the temperature slightly
406  temp_ /= exp(dCost / (0.1 * costRange));
407 
408  return true;
409  }
410 
411  // The transition failed. Increase the temperature (slightly)
413  return false;
414 }
415 
416 bool ompl::geometric::TRRT::minExpansionControl(double randMotionDistance)
417 {
418  if (randMotionDistance > frontierThreshold_)
419  {
420  // participates in the tree expansion
421  ++frontierCount_;
422 
423  return true;
424  }
425  else
426  {
427  // participates in the tree refinement
428 
429  // check our ratio first before accepting it
430  if ((double)nonfrontierCount_ / (double)frontierCount_ > frontierNodeRatio_)
431  // reject this node as being too much refinement
432  return false;
433 
435  return true;
436  }
437 }
bool approximateSolutions
Flag indicating whether the planner is able to compute approximate solutions.
Definition: Planner.h:212
Object containing planner generated vertex and edge data. It is assumed that all vertices are unique...
Definition: PlannerData.h:174
double initTemperature_
The initial value of temp_.
Definition: TRRT.h:309
std::shared_ptr< NearestNeighbors< Motion * > > nearestNeighbors_
A nearest-neighbors datastructure containing the tree of motions.
Definition: TRRT.h:269
double distanceFunction(const Motion *a, const Motion *b) const
Compute distance between motions (actually distance between contained states)
Definition: TRRT.h:252
double temp_
Temperature parameter used to control the difficulty level of transition tests. Low temperatures limi...
Definition: TRRT.h:293
base::PlannerStatus solve(const base::PlannerTerminationCondition &plannerTerminationCondition) override
Function that can solve the motion planning problem. This function can be called multiple times on th...
Definition: TRRT.cpp:156
unsigned int addGoalVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
void getPlannerData(base::PlannerData &data) const override
Get information about the current run of the motion planner. Repeated calls to this function will upd...
Definition: TRRT.cpp:369
Abstract definition of goals.
Definition: Goal.h:62
base::Cost worstCost_
The least desirable (e.g., maximum) cost value in the search tree.
Definition: TRRT.h:299
Encapsulate a termination condition for a motion planner. Planners will call operator() to decide whe...
double getGoalBias() const
Get the goal bias the planner is using.
Definition: TRRT.h:112
RNG rng_
The random number generator.
Definition: TRRT.h:279
void setTempChangeFactor(double factor)
Set the factor by which the temperature is increased after a failed transition test. This value should be in the range (0, 1], typically close to zero (default is 0.1). This value is an exponential (e^factor) that is multiplied with the current temperature.
Definition: TRRT.h:138
double maxDistance_
The maximum length of a motion to be added to a tree.
Definition: TRRT.h:276
double getFrontierNodeRatio() const
Get the ratio between adding nonfrontier nodes to frontier nodes, for example .1 is 1/10 or one nonfr...
Definition: TRRT.h:201
ProblemDefinitionPtr pdef_
The user set problem definition.
Definition: Planner.h:418
bool directed
Flag indicating whether the planner is able to account for the fact that the validity of a motion fro...
Definition: Planner.h:222
virtual void sampleGoal(State *st) const =0
Sample a state in the goal region.
double frontierThreshold_
The distance between an old state and a new state that qualifies it as a frontier state...
Definition: TRRT.h:320
double getTempChangeFactor() const
Get the factor by which the temperature rises based on current acceptance/rejection rate...
Definition: TRRT.h:144
double uniform01()
Generate a random real between 0 and 1.
Definition: RandomNumbers.h:68
Base class for a vertex in the PlannerData structure. All derived classes must implement the clone an...
Definition: PlannerData.h:58
Invalid start state or no start state specified.
Definition: PlannerStatus.h:56
Abstract definition of a goal region that can be sampled.
bool transitionTest(const base::Cost &motionCost)
Filter irrelevant configuration regarding the search of low-cost paths before inserting into tree...
Definition: TRRT.cpp:389
TRRT(const base::SpaceInformationPtr &si)
Constructor.
Definition: TRRT.cpp:44
void setGoalBias(double goalBias)
Set the goal bias.
Definition: TRRT.h:106
base::Cost costThreshold_
All motion costs must be better than this cost (default is infinity)
Definition: TRRT.h:302
Representation of a motion.
Definition: TRRT.h:224
Motion * lastGoalMotion_
The most recent goal motion. Used for PlannerData computation.
Definition: TRRT.h:282
#define OMPL_ERROR(fmt,...)
Log a formatted error string.
Definition: Console.h:64
base::Cost cost
Cost of the state.
Definition: TRRT.h:245
ompl::base::OptimizationObjectivePtr opt_
The optimization objective being optimized by TRRT.
Definition: TRRT.h:326
bool canSample() const
Return true if maxSampleCount() > 0, since in this case samples can certainly be produced.
void clear() override
Clear all internal datastructures. Planner settings are not affected. Subsequent calls to solve() wil...
Definition: TRRT.cpp:77
void setRange(double distance)
Set the range the planner is supposed to use.
Definition: TRRT.h:122
base::State * state
The state contained by the motion.
Definition: TRRT.h:239
A class to store the exit status of Planner::solve()
Definition: PlannerStatus.h:48
virtual bool addEdge(unsigned int v1, unsigned int v2, const PlannerDataEdge &edge=PlannerDataEdge(), Cost weight=Cost(1.0))
Adds a directed edge between the given vertex indexes. An optional edge structure and weight can be s...
base::StateSamplerPtr sampler_
State sampler.
Definition: TRRT.h:266
A shared pointer wrapper for ompl::base::SpaceInformation.
double getCostThreshold() const
Get the cost threshold (default is infinity). Any motion cost that is not better than this cost (acco...
Definition: TRRT.h:160
unsigned int addStartVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
Definition of an abstract state.
Definition: State.h:49
virtual void checkValidity()
Check to see if the planner is in a working state (setup has been called, a goal was set...
Definition: Planner.cpp:101
virtual bool isSatisfied(const State *st) const =0
Return true if the state satisfies the goal constraints.
PlannerInputStates pis_
Utility class to extract valid input states.
Definition: Planner.h:421
double frontierCount_
The number of frontier nodes in the search tree.
Definition: TRRT.h:316
double getRange() const
Get the range the planner is using.
Definition: TRRT.h:128
double getInitTemperature() const
Get the temperature at the start of planning.
Definition: TRRT.h:173
double goalBias_
The fraction of time the goal is picked as the state to expand towards (if such a state is available)...
Definition: TRRT.h:273
PlannerSpecs specs_
The specifications of the planner (its capabilities)
Definition: Planner.h:427
const State * nextStart()
Return the next valid start state or nullptr if no more valid start states are available.
Definition: Planner.cpp:230
#define OMPL_DEBUG(fmt,...)
Log a formatted debugging string.
Definition: Console.h:70
void setup() override
Perform extra configuration steps, if needed. This call will also issue a call to ompl::base::SpaceIn...
Definition: TRRT.cpp:94
void setFrontierNodeRatio(double frontierNodeRatio)
Set the ratio between adding nonfrontier nodes to frontier nodes, for example .1 is 1/10 or one nonfr...
Definition: TRRT.h:194
base::Cost bestCost_
The most desirable (e.g., minimum) cost value in the search tree.
Definition: TRRT.h:296
static const double COST_MAX_MOTION_LENGTH_AS_SPACE_EXTENT_FRACTION
For cost-based planners it has been observed that smaller ranges are typically suitable. The same range computation strategy is used for all planners, but for cost planners an additional factor (smaller than 1) is multiplied in.
bool minExpansionControl(double randMotionDistance)
Use ratio to prefer frontier nodes to nonfrontier ones.
Definition: TRRT.cpp:416
const std::string & getName() const
Get the name of the planner.
Definition: Planner.cpp:56
void setInitTemperature(double initTemperature)
Set the initial temperature at the beginning of the algorithm. Should be high to allow for initial ex...
Definition: TRRT.h:167
void configurePlannerRange(double &range)
Compute what a good length for motion segments is.
Definition: SelfConfig.cpp:225
This class contains methods that automatically configure various parameters for motion planning...
Definition: SelfConfig.h:59
double value() const
The value of the cost.
Definition: Cost.h:56
double nonfrontierCount_
The number of non-frontier nodes in the search tree.
Definition: TRRT.h:314
double getFrontierThreshold() const
Get the distance between a new state and the nearest neighbor that qualifies that state as being a fr...
Definition: TRRT.h:187
SpaceInformationPtr si_
The space information for which planning is done.
Definition: Planner.h:415
double tempChangeFactor_
The value of the expression exp^T_rate. The temperature is increased by this factor whenever the tran...
Definition: TRRT.h:306
void freeMemory()
Free the memory allocated by this planner.
Definition: TRRT.cpp:139
double frontierNodeRatio_
Target ratio of non-frontier nodes to frontier nodes. rho.
Definition: TRRT.h:323
void setCostThreshold(double maxCost)
Set the cost threshold (default is infinity). Any motion cost that is not better than this cost (acco...
Definition: TRRT.h:152
Definition of a cost value. Can represent the cost of a motion or the cost of a state.
Definition: Cost.h:47
void setFrontierThreshold(double frontier_threshold)
Set the distance between a new state and the nearest neighbor that qualifies that state as being a fr...
Definition: TRRT.h:180
#define OMPL_INFORM(fmt,...)
Log a formatted information string.
Definition: Console.h:68