55 for (auto &state : states_)
62 for (auto state : states_)
64 double d = si_->distance(st, state);
73 out << states_.size() << " goal states, threshold = " << threshold_ << ", memory address = " << this << std::endl;
74 for (auto state : states_)
76 si_->printState(state, out);
83 if (states_.empty())
84 throw Exception("There are no goals to sample");
87 samplePosition_ = samplePosition_ % states_.size();
90 // Increment the counter. Do NOT roll over incase a new state is added before sampleGoal is called again.
96 return states_.size();
111 if (index >= states_.size())
112 throw Exception("Index " + std::to_string(index) + " out of range. Only " + std::to_string(states_.size()) +
114 return states_[index];
119 return states_.size();
124 return !states_.empty();
virtual void clear()
Clear all goal states.
std::vector< State * > states_
The goal states. Only ones that are valid are considered by the motion planner.
virtual void addState(const State *st)
Add a goal state.
virtual const State * getState(unsigned int index) const
Return a pointer to the indexth state in the state list.
StateType * get()
Returns a pointer to the contained state.
unsigned int maxSampleCount() const override
Return the maximum number of samples that can be asked for before repeating.
void sampleGoal(State *st) const override
Sample a state in the goal region.
The space information for this goal.
void print(std::ostream &out=std::cout) const override
Print information about the goal data structure to a stream.
virtual bool hasStates() const
Check if there are any states in this goal region.
double distanceGoal(const State *st) const override
Compute the distance to the goal (heuristic). This function is the one used in computing the distance...
virtual std::size_t getStateCount() const
Return the number of valid goal states.