Skip to contents

Iterated race is an extension of the Iterated F-race method for the automatic configuration of optimization algorithms, that is, (offline) tuning their parameters by finding the most appropriate settings given a set of instances of an optimization problem. M. López-Ibáñez, J. Dubois-Lacoste, L. Pérez Cáceres, T. Stützle, and M. Birattari (2016) <doi:10.1016/j.orp.2016.09.002>.

Details

License: GPL (>= 2)

References

Manuel López-Ibáñez, Jérémie Dubois-Lacoste, Leslie Pérez Cáceres, Thomas Stützle, and Mauro Birattari. The irace package: Iterated Racing for Automatic Algorithm Configuration. Operations Research Perspectives, 2016. doi:10.1016/j.orp.2016.09.002

Manuel López-Ibáñez, Jérémie Dubois-Lacoste, Thomas Stützle, and Mauro Birattari. The irace package, Iterated Race for Automatic Algorithm Configuration. Technical Report TR/IRIDIA/2011-004, IRIDIA, Université Libre de Bruxelles, Belgium, 2011.

Manuel López-Ibáñez and Thomas Stützle. The Automatic Design of Multi-Objective Ant Colony Optimization Algorithms. IEEE Transactions on Evolutionary Computation, 2012.

See also

irace.main to start irace with a given scenario.

Author

Maintainers: Manuel López-Ibáñez and Leslie Pérez Cáceres irace-package@googlegroups.com

Examples

 #######################################################################
 # This example illustrates how to tune the parameters of the simulated
 # annealing algorithm (SANN) provided by the optim() function in the
 # R base package.  The goal in this example is to optimize instances of
 # the following family:
 # f(x) = lambda * f_rastrigin(x) + (1 - lambda) * f_rosenbrock(x)
 # where lambda follows a normal distribution whose mean is 0.9 and
 # standard deviation is 0.02. f_rastrigin and f_rosenbrock are the
 # well-known Rastrigin and Rosenbrock benchmark functions (taken from
 # the cmaes package). In this scenario, different instances are given
 # by different values of lambda.
 #######################################################################
 ## First we provide an implementation of the functions to be optimized:
 f_rosenbrock <- function (x) {
   d <- length(x)
   z <- x + 1
   hz <- z[1:(d - 1)]
   tz <- z[2:d]
   s <- sum(100 * (hz^2 - tz)^2 + (hz - 1)^2)
   return(s)
 }
 f_rastrigin <- function (x) {
   sum(x * x - 10 * cos(2 * pi * x) + 10)
 }
 
 ## We generate 20 instances (in this case, weights):
 weights <- rnorm(20, mean = 0.9, sd = 0.02)
 
 ## On this set of instances, we are interested in optimizing two
 ## parameters of the SANN algorithm: tmax and temp. We setup the
 ## parameter space as follows:
 parameters_table <- '
 tmax "" i,log (1, 5000)
 temp "" r (0, 100)
 '
 
 ## We use the irace function readParameters to read this table:
 parameters <- readParameters(text = parameters_table)
 
 ## Next, we define the function that will evaluate each candidate
 ## configuration on a single instance. For simplicity, we restrict to
 ## three-dimensional functions and we set the maximum number of
 ## iterations of SANN to 1000.
 target_runner <- function(experiment, scenario)
 {
   instance <- experiment$instance
   configuration <- experiment$configuration
 
   D <- 3
   par <- runif(D, min=-1, max=1)
   fn <- function(x) {
     weight <- instance
     return(weight * f_rastrigin(x) + (1 - weight) * f_rosenbrock(x))
   }
   res <- stats::optim(par,fn, method="SANN",
                control=list(maxit=1000
                  , tmax = as.numeric(configuration[["tmax"]])
                  , temp = as.numeric(configuration[["temp"]])
                  ))
   ## New output interface in irace 2.0. This list may also contain:
   ## - 'time' if irace is called with 'maxTime'
   ## - 'error' is a string used to report an error
   ## - 'outputRaw' is a string used to report the raw output of calls to
   ##   an external program or function.
   ## - 'call' is a string used to report how target_runner called the
   ##   external program or function.
   return(list(cost = res$value))
 }
 
 ## We define a configuration scenario by setting targetRunner to the
 ## function define above, instances to the first 10 random weights, and
 ## a maximum budget of 'maxExperiments' calls to targetRunner.
 scenario <- list(targetRunner = target_runner,
                  instances = weights[1:10],
                  maxExperiments = 500,
                  # Do not create a logFile
                  logFile = "")
 
 ## We check that the scenario is valid. This will also try to execute
 ## target_runner.
 checkIraceScenario(scenario, parameters = parameters)
#> # 2023-01-21 23:37:27 UTC: Checking scenario
#> ## irace scenario:
#> scenarioFile = "./scenario.txt"
#> execDir = "/tmp/RtmpuB671g/file4ced5b2a27bb/reference"
#> parameterFile = "/tmp/RtmpuB671g/file4ced5b2a27bb/reference/parameters.txt"
#> initConfigurations = NULL
#> configurationsFile = ""
#> logFile = ""
#> recoveryFile = ""
#> instances = c(0.917520159863561, 0.901408317909986, 0.859593506103837, 0.901559247301797, 0.900660659011413, 0.886717480915505, 0.91330308553313, 0.862832906492732, 0.924682694955241, 0.909503158662737)
#> trainInstancesDir = "./Instances"
#> trainInstancesFile = ""
#> sampleInstances = TRUE
#> testInstancesDir = ""
#> testInstancesFile = ""
#> testInstances = NULL
#> testNbElites = 1L
#> testIterationElites = FALSE
#> testType = "friedman"
#> firstTest = 5L
#> blockSize = 1L
#> eachTest = 1L
#> targetRunner = function (experiment, scenario) {    instance <- experiment$instance    configuration <- experiment$configuration    D <- 3    par <- runif(D, min = -1, max = 1)    fn <- function(x) {        weight <- instance        return(weight * f_rastrigin(x) + (1 - weight) * f_rosenbrock(x))    }    res <- stats::optim(par, fn, method = "SANN", control = list(maxit = 1000,         tmax = as.numeric(configuration[["tmax"]]), temp = as.numeric(configuration[["temp"]])))    return(list(cost = res$value))}
#> targetRunnerLauncher = ""
#> targetCmdline = "{configurationID} {instanceID} {seed} {instance} {bound} {targetRunnerArgs}"
#> targetRunnerRetries = 0L
#> targetRunnerData = ""
#> targetRunnerParallel = NULL
#> targetEvaluator = NULL
#> deterministic = FALSE
#> maxExperiments = 500L
#> maxTime = 0L
#> budgetEstimation = 0.02
#> minMeasurableTime = 0.01
#> parallel = 0L
#> loadBalancing = TRUE
#> mpi = FALSE
#> batchmode = "0"
#> digits = 4L
#> quiet = FALSE
#> debugLevel = 2L
#> seed = NA_character_
#> softRestart = TRUE
#> softRestartThreshold = 1e-04
#> elitist = TRUE
#> elitistNewInstances = 1L
#> elitistLimit = 2L
#> repairConfiguration = NULL
#> capping = FALSE
#> cappingType = "median"
#> boundType = "candidate"
#> boundMax = NULL
#> boundDigits = 0L
#> boundPar = 1L
#> boundAsTimeout = TRUE
#> postselection = 0
#> aclib = FALSE
#> nbIterations = 0L
#> nbExperimentsPerIteration = 0L
#> minNbSurvival = 0L
#> nbConfigurations = 0L
#> mu = 5L
#> confidence = 0.95
#> ## end of irace scenario
#> # checkIraceScenario(): 'parameters' provided by user. Parameter file '/tmp/RtmpuB671g/file4ced5b2a27bb/reference/parameters.txt' will be ignored
#> # 2023-01-21 23:37:27 UTC: Checking target runner.
#> # Executing targetRunner ( 2 times)...
#> # targetRunner returned:
#> [[1]]
#> [[1]]$cost
#> [1] 4.57627945191252
#> 
#> [[1]]$time
#> [1] NA
#> 
#> 
#> [[2]]
#> [[2]]$cost
#> [1] 3.1421277904998
#> 
#> [[2]]$time
#> [1] NA
#> 
#> 
#> # 2023-01-21 23:37:27 UTC: Check successful.
#> [1] TRUE
 
 # \donttest{
 ## We are now ready to launch irace. We do it by means of the irace
 ## function. The function will print information about its
 ## progress. This may require a few minutes, so it is not run by default.
 tuned_confs <- irace(scenario = scenario, parameters = parameters)
#> # 2023-01-21 23:37:27 UTC: Initialization
#> # Elitist race
#> # Elitist new instances: 1
#> # Elitist limit: 2
#> # nbIterations: 3
#> # minNbSurvival: 3
#> # nbParameters: 2
#> # seed: 1221309612
#> # confidence level: 0.95
#> # budget: 500
#> # mu: 5
#> # deterministic: FALSE
#> 
#> # 2023-01-21 23:37:27 UTC: Iteration 1 of 3
#> # experimentsUsedSoFar: 0
#> # remainingBudget: 500
#> # currentBudget: 166
#> # nbConfigurations: 27
#> # Markers:
#>      x No test is performed.
#>      c Configurations are discarded only due to capping.
#>      - The test is performed and some configurations are discarded.
#>      = The test is performed but no configuration is discarded.
#>      ! The test is performed and configurations could be discarded but elite configurations are preserved.
#>      . All alive configurations are elite and nothing is discarded
#> 
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> | |   Instance|      Alive|       Best|       Mean best| Exp so far|  W time|  rho|KenW|  Qvar|
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> |x|          1|         27|         19|   0.02933603183|         27|00:00:00|   NA|  NA|    NA|
#> |x|          2|         27|         19|    0.1507706290|         54|00:00:00|+0.01|0.51|1.1286|
#> |x|          3|         27|         19|    0.5140751819|         81|00:00:00|+0.18|0.45|0.9997|
#> |x|          4|         27|         19|    0.6710436798|        108|00:00:00|+0.21|0.41|0.8923|
#> |-|          5|         17|          1|     1.763213119|        135|00:00:00|-0.09|0.13|1.0261|
#> |=|          6|         17|         27|     2.788778965|        152|00:00:00|-0.02|0.15|0.9708|
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> Best-so-far configuration:          27    mean value:      2.788778965
#> Description of the best-so-far configuration:
#>    .ID. tmax    temp .PARENT.
#> 27   27   96 59.9639       NA
#> 
#> # 2023-01-21 23:37:28 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks):
#>    tmax    temp
#> 27   96 59.9639
#> 1     4 44.3640
#> 16    3 59.1126
#> # 2023-01-21 23:37:28 UTC: Iteration 2 of 3
#> # experimentsUsedSoFar: 152
#> # remainingBudget: 348
#> # currentBudget: 174
#> # nbConfigurations: 27
#> # Markers:
#>      x No test is performed.
#>      c Configurations are discarded only due to capping.
#>      - The test is performed and some configurations are discarded.
#>      = The test is performed but no configuration is discarded.
#>      ! The test is performed and configurations could be discarded but elite configurations are preserved.
#>      . All alive configurations are elite and nothing is discarded
#> 
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> | |   Instance|      Alive|       Best|       Mean best| Exp so far|  W time|  rho|KenW|  Qvar|
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> |x|          7|         27|         43|    0.2682418934|         27|00:00:00|   NA|  NA|    NA|
#> |x|          6|         27|         27|    0.6539579548|         51|00:00:00|+0.08|0.54|1.0522|
#> |x|          2|         27|         27|    0.4976524621|         75|00:00:00|+0.07|0.38|1.0198|
#> |x|          4|         27|         27|     1.141577769|         99|00:00:00|+0.05|0.28|0.9691|
#> |=|          1|         27|          1|     1.311740919|        123|00:00:00|+0.03|0.22|0.9511|
#> |=|          3|         27|          1|     1.099488650|        147|00:00:00|+0.07|0.23|0.8879|
#> |-|          5|         21|         27|     2.503686301|        171|00:00:00|-0.08|0.08|1.0475|
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> Best-so-far configuration:          27    mean value:      2.503686301
#> Description of the best-so-far configuration:
#>    .ID. tmax    temp .PARENT.
#> 27   27   96 59.9639       NA
#> 
#> # 2023-01-21 23:37:30 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks):
#>    tmax    temp
#> 27   96 59.9639
#> 1     4 44.3640
#> 43   33 66.1013
#> # 2023-01-21 23:37:30 UTC: Iteration 3 of 3
#> # experimentsUsedSoFar: 323
#> # remainingBudget: 177
#> # currentBudget: 177
#> # nbConfigurations: 24
#> # Markers:
#>      x No test is performed.
#>      c Configurations are discarded only due to capping.
#>      - The test is performed and some configurations are discarded.
#>      = The test is performed but no configuration is discarded.
#>      ! The test is performed and configurations could be discarded but elite configurations are preserved.
#>      . All alive configurations are elite and nothing is discarded
#> 
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> | |   Instance|      Alive|       Best|       Mean best| Exp so far|  W time|  rho|KenW|  Qvar|
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> |x|          8|         24|         54|   0.02906690170|         24|00:00:00|   NA|  NA|    NA|
#> |x|          7|         24|         66|    0.2476010661|         45|00:00:00|+0.11|0.55|0.9091|
#> |x|          3|         24|         66|    0.3507787801|         66|00:00:00|+0.04|0.36|0.9850|
#> |x|          4|         24|         66|     1.075658782|         87|00:00:00|+0.07|0.31|0.9069|
#> |=|          6|         24|         66|    0.9152750112|        108|00:00:00|+0.13|0.30|0.9038|
#> |-|          1|         14|         72|     1.002482246|        129|00:00:00|-0.10|0.09|0.9961|
#> |=|          2|         14|         72|     1.287705078|        140|00:00:00|-0.06|0.09|0.9820|
#> |=|          5|         14|         72|     1.169620957|        151|00:00:00|-0.04|0.09|0.9882|
#> |=|          9|         14|         72|     1.074675168|        165|00:00:00|-0.03|0.08|0.9823|
#> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+
#> Best-so-far configuration:          72    mean value:      1.074675168
#> Description of the best-so-far configuration:
#>    .ID. tmax    temp .PARENT.
#> 72   72    4 37.7034        1
#> 
#> # 2023-01-21 23:37:31 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks):
#>    tmax    temp
#> 72    4 37.7034
#> 70   44 64.2534
#> 43   33 66.1013
#> # 2023-01-21 23:37:31 UTC: Stopped because there is not enough budget left to race more than the minimum (3)
#> # You may either increase the budget or set 'minNbSurvival' to a lower value
#> # Iteration: 4
#> # nbIterations: 4
#> # experimentsUsedSoFar: 488
#> # timeUsed: 0
#> # remainingBudget: 12
#> # currentBudget: 12
#> # number of elites: 3
#> # nbConfigurations: 3
#> # Total CPU user time: 4.492, CPU sys time: 0, Wall-clock time: 4.493
 
 ## We can print the best configurations found by irace as follows:
 configurations.print(tuned_confs)
#>    tmax    temp
#> 72    4 37.7034
#> 70   44 64.2534
#> 43   33 66.1013
 
 ## We can evaluate the quality of the best configuration found by
 ## irace versus the default configuration of the SANN algorithm on
 ## the other 10 instances previously generated.
 ## To do so, first we apply the default configuration of the SANN
 ## algorithm to these instances:
 test <- function(configuration)
 {
   res <- lapply(weights[11:20],
                 function(x) target_runner(
                               experiment = list(instance = x,
                                                 configuration = configuration),
                               scenario = scenario))
   return (sapply(res, getElement, name = "cost"))
 }
 default <- test(data.frame(tmax=10, temp=10))
 ## We extract and apply the winning configuration found by irace
 ## to these instances:
 tuned <- test(removeConfigurationsMetaData(tuned_confs[1,]))
 
 ## Finally, we can compare using a boxplot the quality obtained with the
 ## default parametrization of SANN and the quality obtained with the
 ## best configuration found by irace.
 boxplot(list(default = default, tuned = tuned))

# }