update irace/fastga to follow recent IOH refactoring

This commit is contained in:
Johann Dreo 2021-07-07 12:59:41 +02:00
commit 8e960b23f3
6 changed files with 423 additions and 434 deletions

View file

@ -1,5 +1,6 @@
# ParadiseO
######################################################################################
### 0) Check the CMake version
######################################################################################

View file

@ -45,46 +45,23 @@ set(PARADISEO_LIBRARIES ga eoutils eo)
# IOH
set(IOH_ROOT "~/code/IOHexperimenter/" CACHE PATH "Where to find IOHexperimenter")
find_path(IOH_PROBLEM_H "IOHprofiler_problem.h" PATHS ${IOH_ROOT}/src/Template/)
find_library(IOH_LIBRARY "IOH" PATHS ${IOH_ROOT} PATH_SUFFIXES release Release debug Debug build Build)
find_path(IOH_HPP "ioh.hpp" PATHS ${IOH_ROOT}/include/)
# find_library(IOH_LIBRARY "IOH" PATHS ${IOH_ROOT} PATH_SUFFIXES release Release debug Debug build Build)
if(EXISTS ${IOH_PROBLEM_H} AND EXISTS ${IOH_LIBRARY})
if(EXISTS ${IOH_HPP}) # AND EXISTS ${IOH_LIBRARY})
message(STATUS "Found IOH in ${IOH_ROOT}")
include_directories(${IOH_ROOT}/build/Cpp/src/)
link_directories(${IOH_ROOT}/build/Cpp/bin/)
# Workaround IOH's poorly designed headers inclusion scheme.
SET(PROBLEMS_BBOB_DIR "src/Problems/BBOB")
SET(PROBLEMS_BBOB_COMMON_DIR "src/Problems/BBOB/bbob_common_used_functions")
SET(PROBLEMS_COMMON_DIR "src/Problems/common_used_functions")
SET(PROBLEMS_PBO_DIR "src/Problems/PBO")
SET(PROBLEMS_WMODEL_DIR "src/Problems/WModel")
SET(PROBLEMS_PYTHON_DIR "src/Problems/Python")
SET(SUITES_DIR "src/Suites")
SET(TEMPLATE_DIR "src/Template")
SET(TEMPLATE_EXPERIMENTS_DIR "src/Template/Experiments")
SET(TEMPLATE_LOGGERS_DIR "src/Template/Loggers")
SET(IOHEXPERIMENTER_DIR
"${IOH_ROOT}/${PROBLEMS_COMMON_DIR}"
"${IOH_ROOT}/${PROBLEMS_BBOB_DIR}"
"${IOH_ROOT}/${PROBLEMS_BBOB_COMMON_DIR}"
"${IOH_ROOT}/${PROBLEMS_PBO_DIR}"
"${IOH_ROOT}/${PROBLEMS_WMODEL_DIR}"
"${IOH_ROOT}/${PROBLEMS_PYTHON_DIR}"
"${IOH_ROOT}/${SUITES_DIR}"
"${IOH_ROOT}/${TEMPLATE_DIR}"
"${IOH_ROOT}/${TEMPLATE_EXPERIMENTS_DIR}"
"${IOH_ROOT}/${TEMPLATE_LOGGERS_DIR}"
)
include_directories(${IOHEXPERIMENTER_DIR})
include_directories(${IOH_ROOT}/include/)
include_directories(${IOH_ROOT}/external/fmt/include/)
include_directories(${IOH_ROOT}/external/clutchlog/)
link_directories(${IOH_ROOT}/release/external/fmt/)
else()
if(NOT EXISTS ${IOH_PROBLEM_H})
message(FATAL_ERROR "Could not find `IOHprofiler_problem.h` in: ${IOH_ROOT}/src/Template/ (did you forget to compile it?)")
endif()
if(NOT EXISTS ${IOH_LIBRARIES})
message(FATAL_ERROR "Could not find `libIOH` in: ${IOH_ROOT}/[release|debug|build] (did you forget to compile it?)")
if(NOT EXISTS ${IOH_HPP})
message(FATAL_ERROR "Could not find `ioh.hpp` in: ${IOH_ROOT}/include/")
endif()
# if(NOT EXISTS ${IOH_LIBRARIES})
# message(FATAL_ERROR "Could not find `libIOH` in: ${IOH_ROOT}/[release|debug|build] (did you forget to compile it?)")
# endif()
endif()
@ -93,5 +70,6 @@ endif()
######################################################################################
add_executable(fastga fastga.cpp)
target_link_libraries(fastga ${PARADISEO_LIBRARIES} ${IOH_LIBRARY} stdc++fs)
# target_link_libraries(fastga ${PARADISEO_LIBRARIES} ${IOH_LIBRARY} stdc++fs)
target_link_libraries(fastga ${PARADISEO_LIBRARIES} stdc++fs fmt)

Binary file not shown.

View file

@ -2,16 +2,19 @@
#include <iostream>
#include <cstdlib>
#include <string>
#include <memory>
#include <eo>
#include <ga.h>
#include <utils/checkpointing>
#include <eoInt.h>
#include <problems/eval/eoEvalIOH.h>
#include <IOHprofiler_ecdf_logger.h>
#include <IOHprofiler_csv_logger.h>
#include <IOHprofiler_observer_combine.h>
#include <f_w_model_one_max.hpp>
#include <ioh.hpp>
/*****************************************************************************
* ParadisEO algorithmic grammar definition.
*****************************************************************************/
// using Particle = eoRealParticle<eoMaximizingFitness>;
using Ints = eoInt<eoMaximizingFitnessT<int>, size_t>;
@ -110,6 +113,10 @@ eoAlgoFoundryFastGA<Bits>& make_foundry(
return foundry;
}
/*****************************************************************************
* irace helper functions.
*****************************************************************************/
Bits::Fitness fake_func(const Bits&) { return 0; }
void print_irace_full(const eoParam& param, const size_t slot_size, std::string type="i", std::ostream& out = std::cout)
@ -210,12 +217,16 @@ std::ostream& operator<<(std::ostream& os, const Problem& pb)
return os;
}
/*****************************************************************************
* Command line interface.
*****************************************************************************/
int main(int argc, char* argv[])
{
/***** Global parameters. *****/
enum { NO_ERROR = 0, ERROR_USAGE = 100 };
std::map<size_t, Problem> problem_config_mapping {
std::map<size_t, Problem> benchmark {
/* ┌ problem index in the map
* problem ID in IOH experimenter
* dummy
@ -249,16 +260,22 @@ int main(int argc, char* argv[])
eoParser parser(argc, argv, "FastGA interface for iRace");
/***** Problem parameters *****/
auto problem_p = parser.getORcreateParam<size_t>(0,
"problem", "Problem ID",
'p', "Problem", /*required=*/true);
const size_t problem = problem_p.value();
assert(0 <= problem and problem < problem_config_mapping.size());
assert(0 <= problem and problem < benchmark.size());
// const size_t dimension = parser.getORcreateParam<size_t>(1000,
// "dimension", "Dimension size",
// 'd', "Problem").value();
const size_t dimension = problem_config_mapping[problem].dimension;
const size_t dimension = benchmark[problem].dimension;
auto instance_p = parser.getORcreateParam<size_t>(0,
"instance", "Instance ID",
'i', "Instance", /*required=*/false);
const size_t instance = instance_p.value();
const size_t max_evals = parser.getORcreateParam<size_t>(5 * dimension,
"max-evals", "Maximum number of evaluations",
@ -268,6 +285,7 @@ int main(int argc, char* argv[])
"buckets", "Number of buckets for discretizing the ECDF",
'b', "Performance estimation").value();
/***** Generic options *****/
uint32_t seed =
parser.getORcreateParam<uint32_t>(0,
"seed", "Random number seed (0 = epoch)",
@ -280,25 +298,31 @@ int main(int argc, char* argv[])
bool full_log =
parser.getORcreateParam<bool>(0,
"full-log", "Log the full search in CSV files (using the IOH profiler format)",
"full-log", "Log the full search in CSV files"/* (using the IOH profiler format)"*/,
'F').value();
bool output_mat =
parser.getORcreateParam<bool>(0,
"output-mat", "Output the aggregated attainment matrix instead of its scalar sum.",
"output-mat", "Output the aggregated attainment matrix instead of its scalar sum (fancy colormap on stderr, parsable CSV on stdout).",
'A').value();
/***** populations sizes *****/
auto pop_size_p = parser.getORcreateParam<size_t>(5,
"pop-size", "Population size",
'P', "Operator Choice", /*required=*/false);
const size_t pop_size = pop_size_p.value();
auto instance_p = parser.getORcreateParam<size_t>(0,
"instance", "Instance ID",
'i', "Instance", /*required=*/false);
const size_t instance = instance_p.value();
auto offspring_size_p = parser.getORcreateParam<size_t>(0,
"offspring-size", "Offsprings size (0 = same size than the parents pop, see --pop-size)",
'O', "Operator Choice", /*required=*/false); // Single alternative, not required.
const size_t offspring_size = offspring_size_p.value();
const size_t generations = static_cast<size_t>(std::floor(
static_cast<double>(max_evals) / static_cast<double>(pop_size)));
// const size_t generations = std::numeric_limits<size_t>::max();
eo::log << eo::debug << "Number of generations: " << generations << std::endl;
/***** operators / parameters *****/
auto continuator_p = parser.getORcreateParam<size_t>(0,
"continuator", "Stopping criterion",
'o', "Operator Choice", /*required=*/false); // Single alternative, not required.
@ -344,12 +368,6 @@ int main(int argc, char* argv[])
'r', "Operator Choice", /*required=*/true);
const size_t replacement = replacement_p.value();
auto offspring_size_p = parser.getORcreateParam<size_t>(0,
"offspring-size", "Offsprings size (0 = same size than the parents pop, see --pop-size)",
'O', "Operator Choice", /*required=*/false); // Single alternative, not required.
const size_t offspring_size = offspring_size_p.value();
// Help + Verbose routines
make_verbose(parser);
make_help(parser, /*exit_after*/false, std::clog);
@ -410,28 +428,19 @@ int main(int argc, char* argv[])
exit(NO_ERROR);
}
const size_t generations = static_cast<size_t>(std::floor(
static_cast<double>(max_evals) / static_cast<double>(pop_size)));
// const size_t generations = std::numeric_limits<size_t>::max();
eo::log << eo::debug << "Number of generations: " << generations << std::endl;
/*****************************************************************************
* IOH stuff.
*****************************************************************************/
/***** IOH logger *****/
auto max_target = benchmark[problem].max_target;
ioh::logger::eah::Log10Scale<double> target_range(0, max_target, buckets);
ioh::logger::eah::Log10Scale<size_t> budget_range(0, max_evals, buckets);
ioh::logger::EAH eah_logger(target_range, budget_range);
ioh::logger::Combine loggers(eah_logger);
auto max_target_para = problem_config_mapping[problem].max_target;
IOHprofiler_RangeLinear<size_t> target_range(0, max_target_para, buckets);
IOHprofiler_RangeLinear<size_t> budget_range(0, max_evals, buckets);
IOHprofiler_ecdf_logger<int, size_t, size_t> ecdf_logger(
target_range, budget_range,
/*use_known_optimum*/false);
// ecdf_logger.set_complete_flag(true);
// ecdf_logger.set_interval(0);
ecdf_logger.activate_logger();
IOHprofiler_observer_combine<int> loggers(ecdf_logger);
std::shared_ptr<IOHprofiler_csv_logger<int>> csv_logger;
std::shared_ptr<ioh::logger::FlatFile> csv_logger = nullptr;
if(full_log) {
// Build up an algorithm name from main parameters.
std::ostringstream name;
@ -453,57 +462,54 @@ int main(int argc, char* argv[])
// Build up a problem description.
std::ostringstream desc;
desc << "pb=" << problem << "_";
desc << problem_config_mapping[problem]; // Use the `operator<<` above.
desc << benchmark[problem]; // Use the `operator<<` above.
std::clog << desc.str() << std::endl;
std::string dir(name.str());
std::filesystem::path d = name.str();
std::filesystem::create_directory(d);
std::filesystem::path folder = desc.str();
std::filesystem::create_directories(folder);
std::string folder(desc.str());
std::filesystem::path f = desc.str();
std::filesystem::create_directory(d);
std::filesystem::create_directory(d/f);
csv_logger = std::make_shared<IOHprofiler_csv_logger<int>>(dir, folder, d, f);
loggers.add(*csv_logger);
ioh::trigger::OnImprovement on_improvement;
ioh::watch::Evaluations evaluations;
ioh::watch::TransformedYBest transformed_y_best;
std::vector<std::reference_wrapper<ioh::logger::Trigger >> t = {std::ref(on_improvement)};
std::vector<std::reference_wrapper<ioh::logger::Property>> w = {std::ref(evaluations),std::ref(transformed_y_best)};
csv_logger = std::make_shared<ioh::logger::FlatFile>(
// {std::ref(on_improvement)},
// {std::ref(evaluations),std::ref(transformed_y_best)},
t, w,
name.str(),
folder
);
loggers.append(*csv_logger);
}
/***** IOH problem *****/
double w_model_suite_dummy_para = problem_config_mapping[problem].dummy;
int w_model_suite_epitasis_para = problem_config_mapping[problem].epistasis;
int w_model_suite_neutrality_para = problem_config_mapping[problem].neutrality;
int w_model_suite_ruggedness_para = problem_config_mapping[problem].ruggedness;
double w_dummy = benchmark[problem].dummy;
int w_epitasis = benchmark[problem].epistasis;
int w_neutrality = benchmark[problem].neutrality;
int w_ruggedness = benchmark[problem].ruggedness;
W_Model_OneMax w_model_om;
std::string problem_name = "OneMax";
problem_name = problem_name
+ "_D" + std::to_string((int)(w_model_suite_dummy_para * dimension))
+ "_E" + std::to_string(w_model_suite_epitasis_para)
+ "_N" + std::to_string(w_model_suite_neutrality_para)
+ "_R" + std::to_string(w_model_suite_ruggedness_para);
// std::string problem_name = "OneMax";
// problem_name = problem_name
// + "_D" + std::to_string((int)(w_dummy * dimension))
// + "_E" + std::to_string(w_epitasis)
// + "_N" + std::to_string(w_neutrality)
// + "_R" + std::to_string(w_ruggedness);
/// This must be called to configure the w-model to be tested.
w_model_om.set_w_setting(w_model_suite_dummy_para,w_model_suite_epitasis_para,
w_model_suite_neutrality_para,w_model_suite_ruggedness_para);
/// Set problem_name based on the configuration.
w_model_om.IOHprofiler_set_problem_name(problem_name);
/// Set problem_id as 1
w_model_om.IOHprofiler_set_problem_id(problem); // FIXME check what that means
// w_model_om.IOHprofiler_set_instance_id(instance); // FIXME changing the instance seems to change the target upper bound.
/// Set dimension.
w_model_om.IOHprofiler_set_number_of_variables(dimension);
ioh::problem::wmodel::WModelOneMax w_model_om(
instance,
dimension,
w_dummy,
w_epitasis,
w_neutrality,
w_ruggedness);
/***** Bindings *****/
ecdf_logger.track_problem(w_model_om);
if(full_log) {
csv_logger->track_problem(w_model_om);
}
w_model_om.attach_logger(loggers);
/*****************************************************************************
* Binding everything together.
*****************************************************************************/
eoEvalIOHproblem<Bits> onemax_pb(w_model_om, loggers);
@ -545,40 +551,45 @@ int main(int argc, char* argv[])
// // Actually instanciate and run the algorithm.
// eval_foundry(encoded_algo);
/*****************************************************************************
* Run and output results.
*****************************************************************************/
eoPop<Bits> pop;
pop.append(pop_size, onemax_init);
onemax_eval(pop,pop);
foundry(pop); // Actually run the selected algorithm.
/***** IOH perf stats *****/
IOHprofiler_ecdf_sum ecdf_sum;
// iRace expects minimization
long perf = ecdf_sum(ecdf_logger.data());
double perf = ioh::logger::eah::stat::under_curve::volume(eah_logger);
// assert(0 < perf and perf <= buckets*buckets);
if(perf <= 0 or buckets*buckets < perf) {
std::cerr << "WARNING: illogical performance: " << perf
<< ", check the bounds or the algorithm." << std::endl;
if(perf == 0 or perf > max_target * max_evals * 1.0) {
std::cerr << "WARNING: illogical performance? " << perf
<< " Check the bounds or the algorithm." << std::endl;
}
// std::clog << "After " << eval_count.getValue() << " / " << max_evals << " evaluations" << std::endl;
if(output_mat) {
std::vector<std::vector<double>> mat = ioh::logger::eah::stat::distribution(eah_logger);
IOHprofiler_ecdf_aggregate agg;
IOHprofiler_ecdf_aggregate::Mat mat = agg(ecdf_logger.data());
std::clog << "Attainment matrix sum: " << std::endl;
// Fancy color map on clog.
std::clog << ioh::logger::eah::colormap(mat) << std::endl;
// Parsable CSV on cout.
std::clog << "Attainment matrix distribution: " << std::endl;
assert(mat.size() > 0);
assert(mat[0].size() > 1);
for(int i = mat.size()-1; i >= 0; --i) {
for(size_t i = mat.size()-1; i >= 0; --i) {
std::cout << mat[i][0];
for(int j = 1; j < mat[i].size(); ++j) {
for(size_t j = 1; j < mat[i].size(); ++j) {
std::cout << "," << mat[i][j];
}
std::cout << std::endl;
}
} else {
// iRace expects minimization
std::cout << -1 * perf << std::endl;
}
}

Binary file not shown.

View file

@ -2,10 +2,7 @@
#ifndef _eoEvalIOH_h
#define _eoEvalIOH_h
#include <IOHprofiler_problem.h>
#include <IOHprofiler_suite.h>
#include <IOHprofiler_observer.h>
#include <IOHprofiler_ecdf_logger.h>
#include <ioh.hpp>
/** Wrap an IOHexperimenter's problem class within an eoEvalFunc.
*
@ -25,18 +22,19 @@ class eoEvalIOHproblem : public eoEvalFunc<EOT>
using Fitness = typename EOT::Fitness;
using ScalarType = typename Fitness::ScalarType;
eoEvalIOHproblem(IOHprofiler_problem<ScalarType> & pb) :
eoEvalIOHproblem(ioh::problem::Problem<ScalarType> & pb) :
_ioh_pb(&pb),
_has_log(false),
_ioh_log(nullptr)
{ }
eoEvalIOHproblem(IOHprofiler_problem<ScalarType> & pb, IOHprofiler_observer<ScalarType> & log ) :
eoEvalIOHproblem(ioh::problem::Problem<ScalarType> & pb, ioh::Logger & log ) :
_ioh_pb(&pb),
_has_log(true),
_ioh_log(&log)
{
_ioh_log->track_problem(*_ioh_pb);
// _ioh_log->track_problem(*_ioh_pb);
pb.attach_logger(log);
}
virtual void operator()(EOT& sol)
@ -55,322 +53,323 @@ class eoEvalIOHproblem : public eoEvalFunc<EOT>
* Instead of re-assembling your algorithm,
* just update the problem pointer.
*/
void problem(IOHprofiler_problem<ScalarType> & pb )
void problem(ioh::problem::Problem<ScalarType> & pb )
{
_ioh_pb = &pb;
_ioh_log->track_problem(pb);
// _ioh_log->track_problem(pb);
_ioh_pb->attach_logger(_ioh_log);
}
bool has_logger() const {return _has_log;}
IOHprofiler_observer<ScalarType> & observer() {return *_ioh_log;}
ioh::Logger & logger() {return *_ioh_log;}
protected:
IOHprofiler_problem<ScalarType> * _ioh_pb;
ioh::problem::Problem<ScalarType> * _ioh_pb;
bool _has_log;
IOHprofiler_observer<ScalarType> * _ioh_log;
ioh::Logger * _ioh_log;
virtual Fitness call(EOT& sol)
{
Fitness f = _ioh_pb->evaluate(sol);
Fitness f = (*_ioh_pb)(sol);
if(_has_log) {
_ioh_log->do_log(_ioh_pb->loggerInfo());
_ioh_log->log(_ioh_pb->log_info());
}
return f;
}
};
/** Wrap an IOHexperimenter's suite class within an eoEvalFunc. Useful for algorithm selection.
*
* WARNING: only handle a suite of problems of A UNIQUE, SINGLE DIMENSION.
* Because a given eoAlgo is bond to a instanciated eoInit (most probably an eoInitWithDim)
* which is parametrized with a given dimension.
*
* The idea is to run the given algorithm on a whole suite of problems
* and output its aggregated performance.
*
* See https://github.com/IOHprofiler/IOHexperimenter
*
* The main template EOT defines the interface of this functor,
* that is how the algorithm instance is encoded
* (e.g. an eoAlgoFoundry's integer vector).
* The SUBEOT template defines the encoding of the sub-problem,
* which the encoded algorithm have to solve
* (e.g. a OneMax problem).
*
* @note: This will not reset the given pop between two calls
* of the given algorithm on new problems.
* You most probably want to wrap your algorithm
* in an eoAlgoRestart to do that for you.
*
* Handle only IOHprofiler `stat` classes which template type STAT
* is explicitely convertible to the given fitness.
* Any scalar is most probably already convertible, but compound classes
* (i.e. for multi-objective problems) are most probàbly not.
*
* @note: You're responsible of adding a conversion operator
* to the given STAT type, if necessary
* (this is checked by a static assert in the constructor).
*
* @note: You're also responsible of matching the fitness' encoding scalar type
* (IOH handle double and int, as of 2020-03-09).
*
* You will need to pass the IOH include directory to your compiler
* (e.g. IOHexperimenter/build/Cpp/src/).
*/
template<class EOT, class SUBEOT, class STAT>
class eoEvalIOHsuiteSingleDim : public eoEvalFunc<EOT>
{
public:
using EOType = EOT;
using Fitness = typename EOType::Fitness;
using ScalarType = typename Fitness::ScalarType;
// /** Wrap an IOHexperimenter's suite class within an eoEvalFunc. Useful for algorithm selection.
// *
// * WARNING: only handle a suite of problems of A UNIQUE, SINGLE DIMENSION.
// * Because a given eoAlgo is bond to a instanciated eoInit (most probably an eoInitWithDim)
// * which is parametrized with a given dimension.
// *
// * The idea is to run the given algorithm on a whole suite of problems
// * and output its aggregated performance.
// *
// * See https://github.com/IOHprofiler/IOHexperimenter
// *
// * The main template EOT defines the interface of this functor,
// * that is how the algorithm instance is encoded
// * (e.g. an eoAlgoFoundry's integer vector).
// * The SUBEOT template defines the encoding of the sub-problem,
// * which the encoded algorithm have to solve
// * (e.g. a OneMax problem).
// *
// * @note: This will not reset the given pop between two calls
// * of the given algorithm on new problems.
// * You most probably want to wrap your algorithm
// * in an eoAlgoRestart to do that for you.
// *
// * Handle only IOH experimeneter `stat` classes which template type STAT
// * is explicitely convertible to the given fitness.
// * Any scalar is most probably already convertible, but compound classes
// * (i.e. for multi-objective problems) are most probàbly not.
// *
// * @note: You're responsible of adding a conversion operator
// * to the given STAT type, if necessary
// * (this is checked by a static assert in the constructor).
// *
// * @note: You're also responsible of matching the fitness' encoding scalar type
// * (IOH handle double and int, as of 2020-03-09).
// *
// * You will need to pass the IOH include directory to your compiler
// * (e.g. IOHexperimenter/build/Cpp/src/).
// */
// template<class EOT, class SUBEOT, class STAT>
// class eoEvalIOHsuiteSingleDim : public eoEvalFunc<EOT>
// {
// public:
// using EOType = EOT;
// using Fitness = typename EOType::Fitness;
// using ScalarType = typename Fitness::ScalarType;
/** Takes an ecdf_logger that computes the base data structure
* on which a ecdf_stat will be called to compute an
* aggregated performance measure, which will be the evaluated fitness.
*
* As such, the logger and the stat are mandatory.
*
* @note: The given logger should be at least embedded
* in the logger bound with the given eval.
*/
eoEvalIOHsuiteSingleDim(
eoEvalIOHproblem<SUBEOT>& eval,
eoAlgoFoundry<SUBEOT>& algo,
eoPop<SUBEOT>& pop,
IOHprofiler_suite<ScalarType>& suite,
IOHprofiler_ecdf_logger<ScalarType>& log,
IOHprofiler_ecdf_stat<STAT>& stat
) :
_eval(eval),
_algo(algo),
_pop(pop),
_ioh_suite(&suite),
_ioh_log(log),
_ioh_stat(stat)
{
static_assert(std::is_convertible<STAT,Fitness>::value);
assert(eval.has_log());
_ioh_log.target_suite(suite);
}
// /** Takes an ecdf_logger that computes the base data structure
// * on which a ecdf_stat will be called to compute an
// * aggregated performance measure, which will be the evaluated fitness.
// *
// * As such, the logger and the stat are mandatory.
// *
// * @note: The given logger should be at least embedded
// * in the logger bound with the given eval.
// */
// eoEvalIOHsuiteSingleDim(
// eoEvalIOHproblem<SUBEOT>& eval,
// eoAlgoFoundry<SUBEOT>& algo,
// eoPop<SUBEOT>& pop,
// ioh::suite::Suite<ScalarType>& suite,
// ioh::logger::ECDF<ScalarType>& log,
// ioh::logger::ECDFStat<STAT>& stat
// ) :
// _eval(eval),
// _algo(algo),
// _pop(pop),
// _ioh_suite(&suite),
// _ioh_log(log),
// _ioh_stat(stat)
// {
// static_assert(std::is_convertible<STAT,Fitness>::value);
// assert(eval.has_log());
// _ioh_log.target_suite(suite);
// }
virtual void operator()(EOType& sol)
{
if(not sol.invalid()) {
return;
}
// virtual void operator()(EOType& sol)
// {
// if(not sol.invalid()) {
// return;
// }
sol.fitness( call( sol ) );
}
// sol.fitness( call( sol ) );
// }
/** Update the suite pointer for a new one.
*
* This is useful if you assembled a ParadisEO algorithm
* and call it several time in an IOHexperimenter's loop across several suites.
* Instead of re-assembling your algorithm,
* just update the suite pointer.
*/
void suite( IOHprofiler_suite<ScalarType> & suite )
{
_ioh_suite = &suite;
_ioh_log.target_suite(suite);
}
// /** Update the suite pointer for a new one.
// *
// * This is useful if you assembled a ParadisEO algorithm
// * and call it several time in an IOHexperimenter's loop across several suites.
// * Instead of re-assembling your algorithm,
// * just update the suite pointer.
// */
// void suite( ioh::suite::Suite<ScalarType> & suite )
// {
// _ioh_suite = &suite;
// _ioh_log.target_suite(suite);
// }
protected:
//! Sub-problem @{
eoEvalIOHproblem<SUBEOT>& _eval;
eoAlgoFoundry<SUBEOT>& _algo;
eoPop<SUBEOT>& _pop;
//! @}
// protected:
// //! Sub-problem @{
// eoEvalIOHproblem<SUBEOT>& _eval;
// eoAlgoFoundry<SUBEOT>& _algo;
// eoPop<SUBEOT>& _pop;
// //! @}
//! IOH @{
IOHprofiler_suite<ScalarType> * _ioh_suite;
IOHprofiler_observer<ScalarType> & _ioh_log;
IOHprofiler_ecdf_stat<STAT>& _ioh_stat;
//! @}
// //! IOH @{
// ioh::suite::Suite<ScalarType> * _ioh_suite;
// ioh::logger::Observer<ScalarType> & _ioh_log;
// ioh::logger::ECDFStat<STAT>& _ioh_stat;
// //! @}
virtual Fitness call(EOType& sol)
{
// Decode the algorithm encoded in sol.
_algo = sol;
// virtual Fitness call(EOType& sol)
// {
// // Decode the algorithm encoded in sol.
// _algo = sol;
// Evaluate the performance of the encoded algo instance
// on a whole IOH suite benchmark.
typename IOHprofiler_suite<ScalarType>::Problem_ptr pb;
while( (pb = _ioh_suite->get_next_problem()) ) {
// // Evaluate the performance of the encoded algo instance
// // on a whole IOH suite benchmark.
// typename ioh::suite::Suite<ScalarType>::Problem_ptr pb;
// while( (pb = _ioh_suite->get_next_problem()) ) {
// Consider a new problem.
_eval.problem(*pb); // Will call logger's target_problem.
// // Consider a new problem.
// _eval.problem(*pb); // Will call logger's target_problem.
// Actually solve it.
_algo(_pop); // Will call the logger's write_line.
// There's no need to get back the best fitness from ParadisEO,
// because everything is captured on-the-fly by IOHprofiler.
}
// // Actually solve it.
// _algo(_pop); // Will call the logger's write_line.
// // There's no need to get back the best fitness from ParadisEO,
// // because everything is captured on-the-fly by IOH experimenter.
// }
// Get back the evaluated performance.
// The explicit cast from STAT to Fitness which should exists.
return static_cast<Fitness>(_ioh_stat(_ioh_log.data()));
}
};
// // Get back the evaluated performance.
// // The explicit cast from STAT to Fitness which should exists.
// return static_cast<Fitness>(_ioh_stat(_ioh_log.data()));
// }
// };
/** Operator that is called before search for each problem within an IOH suite.
*
* You most probably need to reinstanciate some operators within your algorithm:
* at least the operators depending on the dimension,
* as it will change between two calls.
*
* By providing an operator using this interface,
* you can have access to all the information needed to do so.
*/
template<class EOT>
class eoIOHSetup : public eoFunctorBase
{
public:
using AtomType = typename EOT::AtomType;
virtual void operator()(eoPop<EOT>& pop, typename IOHprofiler_suite<AtomType>::Problem_ptr pb) = 0;
};
// /** Operator that is called before search for each problem within an IOH suite.
// *
// * You most probably need to reinstanciate some operators within your algorithm:
// * at least the operators depending on the dimension,
// * as it will change between two calls.
// *
// * By providing an operator using this interface,
// * you can have access to all the information needed to do so.
// */
// template<class EOT>
// class eoIOHSetup : public eoFunctorBase
// {
// public:
// using AtomType = typename EOT::AtomType;
// virtual void operator()(eoPop<EOT>& pop, typename ioh::suite::Suite<AtomType>::Problem_ptr pb) = 0;
// };
/** Wrap an IOHexperimenter's suite class within an eoEvalFunc. Useful for algorithm selection.
*
* The idea is to run the given algorithm on a whole suite of problems
* and output its aggregated performance.
*
* See https://github.com/IOHprofiler/IOHexperimenter
*
* The main template EOT defines the interface of this functor,
* that is how the algorithm instance is encoded
* (e.g. an eoAlgoFoundry's integer vector).
* The SUBEOT template defines the encoding of the sub-problem,
* which the encoded algorithm have to solve
* (e.g. a OneMax problem).
*
* @note: This will not reset the given pop between two calls
* of the given algorithm on new problems.
* You most probably want to wrap your algorithm
* in an eoAlgoRestart to do that for you.
*
* Handle only IOHprofiler `stat` classes which template type STAT
* is explicitely convertible to the given fitness.
* Any scalar is most probably already convertible, but compound classes
* (i.e. for multi-objective problems) are most probàbly not.
*
* @note: You're responsible of adding a conversion operator
* to the given STAT type, if necessary
* (this is checked by a static assert in the constructor).
*
* @note: You're also responsible of matching the fitness' encoding scalar type
* (IOH handle double and int, as of 2020-03-09).
*
* You will need to pass the IOH include directory to your compiler
* (e.g. IOHexperimenter/build/Cpp/src/).
*/
template<class EOT, class SUBEOT, class STAT>
class eoEvalIOHsuite : public eoEvalFunc<EOT>
{
public:
using Fitness = typename EOT::Fitness;
using ScalarType = typename Fitness::ScalarType;
using SubAtomType = typename SUBEOT::AtomType;
// /** Wrap an IOHexperimenter's suite class within an eoEvalFunc. Useful for algorithm selection.
// *
// * The idea is to run the given algorithm on a whole suite of problems
// * and output its aggregated performance.
// *
// * See https://github.com/IOHprofiler/IOHexperimenter
// *
// * The main template EOT defines the interface of this functor,
// * that is how the algorithm instance is encoded
// * (e.g. an eoAlgoFoundry's integer vector).
// * The SUBEOT template defines the encoding of the sub-problem,
// * which the encoded algorithm have to solve
// * (e.g. a OneMax problem).
// *
// * @note: This will not reset the given pop between two calls
// * of the given algorithm on new problems.
// * You most probably want to wrap your algorithm
// * in an eoAlgoRestart to do that for you.
// *
// * Handle only IOHprofiler `stat` classes which template type STAT
// * is explicitely convertible to the given fitness.
// * Any scalar is most probably already convertible, but compound classes
// * (i.e. for multi-objective problems) are most probàbly not.
// *
// * @note: You're responsible of adding a conversion operator
// * to the given STAT type, if necessary
// * (this is checked by a static assert in the constructor).
// *
// * @note: You're also responsible of matching the fitness' encoding scalar type
// * (IOH handle double and int, as of 2020-03-09).
// *
// * You will need to pass the IOH include directory to your compiler
// * (e.g. IOHexperimenter/build/Cpp/src/).
// */
// template<class EOT, class SUBEOT, class STAT>
// class eoEvalIOHsuite : public eoEvalFunc<EOT>
// {
// public:
// using Fitness = typename EOT::Fitness;
// using ScalarType = typename Fitness::ScalarType;
// using SubAtomType = typename SUBEOT::AtomType;
/** Takes an ecdf_logger that computes the base data structure
* on which a ecdf_stat will be called to compute an
* aggregated performance measure, which will be the evaluated fitness.
*
* As such, the logger and the stat are mandatory.
*
* @note: The given logger should be at least embedded
* in the logger thas is bound with the given eval.
*/
eoEvalIOHsuite(
eoEvalIOHproblem<SUBEOT>& eval,
eoAlgoFoundry<SUBEOT>& foundry,
eoPop<SUBEOT>& pop,
eoIOHSetup<SUBEOT>& setup,
IOHprofiler_suite<SubAtomType>& suite,
IOHprofiler_ecdf_logger<SubAtomType>& log,
IOHprofiler_ecdf_stat<STAT>& stat
) :
_eval(eval),
_foundry(foundry),
_pop(pop),
_setup(setup),
_ioh_suite(&suite),
_ioh_log(log),
_ioh_stat(stat)
{
static_assert(std::is_convertible<STAT,Fitness>::value);
assert(_eval.has_logger());
_ioh_log.track_suite(suite);
}
// /** Takes an ecdf_logger that computes the base data structure
// * on which a ecdf_stat will be called to compute an
// * aggregated performance measure, which will be the evaluated fitness.
// *
// * As such, the logger and the stat are mandatory.
// *
// * @note: The given logger should be at least embedded
// * in the logger thas is bound with the given eval.
// */
// eoEvalIOHsuite(
// eoEvalIOHproblem<SUBEOT>& eval,
// eoAlgoFoundry<SUBEOT>& foundry,
// eoPop<SUBEOT>& pop,
// eoIOHSetup<SUBEOT>& setup,
// ioh::suite::Suite<SubAtomType>& suite,
// ioh::logger::ECDF<SubAtomType>& log,
// ioh::logger::ECDFStat<STAT>& stat
// ) :
// _eval(eval),
// _foundry(foundry),
// _pop(pop),
// _setup(setup),
// _ioh_suite(&suite),
// _ioh_log(log),
// _ioh_stat(stat)
// {
// static_assert(std::is_convertible<STAT,Fitness>::value);
// assert(_eval.has_logger());
// _ioh_log.track_suite(suite);
// }
virtual void operator()(EOT& sol)
{
if(not sol.invalid()) {
return;
}
// virtual void operator()(EOT& sol)
// {
// if(not sol.invalid()) {
// return;
// }
sol.fitness( call( sol ) );
}
// sol.fitness( call( sol ) );
// }
/** Update the suite pointer for a new one.
*
* This is useful if you assembled a ParadisEO algorithm
* and call it several time in an IOHexperimenter's loop across several suites.
* Instead of re-assembling your algorithm,
* just update the suite pointer.
*/
void suite( IOHprofiler_suite<SubAtomType> & suite )
{
_ioh_suite = &suite;
_ioh_log.target_suite(suite);
}
// /** Update the suite pointer for a new one.
// *
// * This is useful if you assembled a ParadisEO algorithm
// * and call it several time in an IOHexperimenter's loop across several suites.
// * Instead of re-assembling your algorithm,
// * just update the suite pointer.
// */
// void suite( ioh::suite::Suite<SubAtomType> & suite )
// {
// _ioh_suite = &suite;
// _ioh_log.target_suite(suite);
// }
protected:
eoEvalIOHproblem<SUBEOT>& _eval;
eoAlgoFoundry<SUBEOT>& _foundry;
eoPop<SUBEOT>& _pop;
eoIOHSetup<SUBEOT>& _setup;
// protected:
// eoEvalIOHproblem<SUBEOT>& _eval;
// eoAlgoFoundry<SUBEOT>& _foundry;
// eoPop<SUBEOT>& _pop;
// eoIOHSetup<SUBEOT>& _setup;
IOHprofiler_suite<SubAtomType> * _ioh_suite;
IOHprofiler_ecdf_logger<SubAtomType> & _ioh_log;
IOHprofiler_ecdf_stat<STAT>& _ioh_stat;
// ioh::suite::Suite<SubAtomType> * _ioh_suite;
// ioh::logger::ECDF<SubAtomType> & _ioh_log;
// ioh::logger::ECDFStat<STAT>& _ioh_stat;
virtual Fitness call(EOT& sol)
{
// Select an algorithm in the foundry
// from the given encoded solution.
std::vector<size_t> encoding;
std::transform(std::begin(sol), std::end(sol), std::back_inserter(encoding),
[](const SubAtomType& v) -> size_t {return static_cast<size_t>(std::floor(v));} );
_foundry.select(encoding);
// virtual Fitness call(EOT& sol)
// {
// // Select an algorithm in the foundry
// // from the given encoded solution.
// std::vector<size_t> encoding;
// std::transform(std::begin(sol), std::end(sol), std::back_inserter(encoding),
// [](const SubAtomType& v) -> size_t {return static_cast<size_t>(std::floor(v));} );
// _foundry.select(encoding);
// Evaluate the performance of the encoded algo instance
// on a whole IOH suite benchmark.
typename IOHprofiler_suite<SubAtomType>::Problem_ptr pb;
while( (pb = _ioh_suite->get_next_problem()) ) {
// // Evaluate the performance of the encoded algo instance
// // on a whole IOH suite benchmark.
// typename ioh::suite::Suite<SubAtomType>::Problem_ptr pb;
// while( (pb = _ioh_suite->get_next_problem()) ) {
// Setup selected operators.
_setup(_pop, pb);
// // Setup selected operators.
// _setup(_pop, pb);
// Consider a new problem.
_eval.problem(*pb); // Will call logger's target_problem.
// // Consider a new problem.
// _eval.problem(*pb); // Will call logger's target_problem.
// Actually solve it.
_foundry(_pop); // Will call the logger's write_line.
// There's no need to get back the best fitness from ParadisEO,
// because everything is captured on-the-fly by IOHprofiler.
}
// // Actually solve it.
// _foundry(_pop); // Will call the logger's write_line.
// // There's no need to get back the best fitness from ParadisEO,
// // because everything is captured on-the-fly by IOH experimenter.
// }
// Get back the evaluated performance.
// The explicit cast from STAT to Fitness which should exists.
return static_cast<Fitness>(_ioh_stat(_ioh_log.data()));
}
};
// // Get back the evaluated performance.
// // The explicit cast from STAT to Fitness which should exists.
// return static_cast<Fitness>(_ioh_stat(_ioh_log.data()));
// }
// };
#endif // _eoEvalIOH_h