/** @file * @brief Test various boosting.ECC algorithms. * * $Id: runecoc.cpp 2637 2006-02-13 01:59:50Z ling $ */ // To compile, set VERBOSE_OUTPUT to 0 in learnmodel.h (to avoid // too much output), and BOOSTING_OUTPUT_CACHE to 0 in boosting.h // (to avoid using too much memory). // Probably also increase the cache in SVM to 256M. #include #include #include #include #include #include #include #include #include #include #include #include #include // give out individual errors on test examples so that // we may plot how the hypotheses classify examples #define OUTPUT_TEST_CLASS 0 #define CLASS_TYPE(lm,str) ((lm).id().find(str) != std::string::npos) int main (unsigned int argc, char* argv[]) { if (argc < 6) { std::cerr << "Usage: " << argv[0] << " data perm n_train #ite output [base] [multi]\n"; return -1; } // open the data file and the permutation file std::ifstream fd(argv[1]); if (!fd.is_open()) { std::cerr << "Data file (" << argv[1] << ") open error\n"; return -2; } lemga::DataFeeder df(fd); std::ifstream pd(argv[2]); UINT n_perm = 99999999; if (pd.is_open()) df.set_permutation(pd); else { std::cerr << "Permutation file (" << argv[2] << ") open error\n"; n_perm = std::atoi(argv[2]); } const UINT dsize = df.size(); const UINT n_tr = std::min(dsize, (UINT) std::atoi(argv[3])); std::cout << dsize << " data examples loaded.\n"; std::cout << " " << n_tr << " examples for training, " << (dsize-n_tr) << " examples for testing.\n"; df.set_train_size(n_tr); df.do_normalize(); const UINT n_in = df.data()->x(0).size(); const UINT n_agg = std::atoi(argv[4]); int base = -1, mult = -1; if (argc > 6) base = std::atoi(argv[6]); if (argc > 7) mult = std::atoi(argv[7]); set_seed(0); // set up base models pbase[category][models] std::vector > pbase; #define NEW_BASE_CAT pbase.push_back(std::vector()) #define NEW_BASE(lm,var) lm* var = new lm; pbase.back().push_back(var) NEW_BASE_CAT; NEW_BASE(lemga::Stump, st); NEW_BASE_CAT; NEW_BASE(lemga::Perceptron, perc); perc->set_parameter(0.002, 0, 200); perc->set_train_method(lemga::Perceptron::RCD_BIAS); // zero initial weight perc->set_weight(lemga::Input(n_in+1,0)); NEW_BASE_CAT; for (UINT ite = 10; ite < 51; ite += 40) { NEW_BASE(lemga::AdaBoost, ab); ab->set_base_model(*st); ab->set_max_models(ite); } NEW_BASE_CAT; for (REAL c = 1.0/8; c < (1<<11); c *= 8) { NEW_BASE(lemga::SVM, svm); svm->set_kernel(lemga::kernel::Stump()); svm->set_C(c); } NEW_BASE_CAT; for (REAL c = 1.0/8; c < (1<<11); c *= 8) { NEW_BASE(lemga::SVM, svm); svm->set_kernel(lemga::kernel::Perceptron()); svm->set_C(c); } // set up multiclass aggregation methods std::vector pmult; #define NEW_AGG(lm,var) lm* var = new lm; pmult.push_back(var) NEW_AGG(lemga::MultiClass_ECOC, ovo); ovo->set_ECOC_table(lemga::ONE_VS_ONE); NEW_AGG(lemga::MultiClass_ECOC, ova); ova->set_ECOC_table(lemga::ONE_VS_ALL); NEW_AGG(lemga::AdaBoost_ECOC, ada_rand); NEW_AGG(lemga::AdaBoost_ECOC, ada_maxc); ada_rand->set_partition_method(lemga::AdaBoost_ECOC::RANDOM_HALF); ada_maxc->set_partition_method(lemga::AdaBoost_ECOC::MAX_CUT); for (UINT l = 2; l < 5; ++l) { NEW_AGG(lemga::AdaBoost_ERP, p); p->set_partition_method(lemga::AdaBoost_ERP::MAX_2); p->set_lr_step(l); } for (UINT l = 2; l < 5; ++l) { NEW_AGG(lemga::AdaBoost_ERP, p); p->set_partition_method(lemga::AdaBoost_ERP::RANDOM_2); p->set_lr_step(l); } for (UINT l = 2; l < 5; ++l) { NEW_AGG(lemga::AdaBoost_ERP, p); p->set_partition_method(lemga::AdaBoost_ERP::RANDOM_HALF); p->set_lr_step(l); } // set up the combinations & cross-validation std::vector plm; // combined models std::vector nlm; // the model "serial number" for (UINT b = 0; b < pbase.size(); ++b) { if (base >= 0 && b != (UINT) base) continue; for (UINT m = 0; m < pmult.size(); ++m) { if (mult >= 0 && m != (UINT) mult) continue; char ser[1024]; std::sprintf(ser, "%lX%lX", b, m); assert(std::strlen(ser) == 2 && b < 16 && m < 16); // if we have both training and test sets, we may do CV bool do_cv = (n_tr < 0.85 * dsize) && (pbase[b].size() > 1) && CLASS_TYPE(*pbase[b][0], "SVM"); lemga::HoldoutCrossVal* cv = 0; if (do_cv) cv = new lemga::HoldoutCrossVal(0.3, 10); lemga::MultiClass_ECOC& agg = (lemga::MultiClass_ECOC&) *pmult[m]; for (UINT i = 0; i < pbase[b].size(); ++i) { agg.set_base_model(*pbase[b][i]); agg.set_max_models(n_agg); if (do_cv) cv->add_model(agg); // will be cloned() else { plm.push_back(agg.clone()); if (pbase[b].size() > 1) std::sprintf(ser+2, "_%lu", i+1); nlm.push_back(ser); } } if (do_cv) { plm.push_back(cv); std::sprintf(ser+2, "_cv"); nlm.push_back(ser); } } } pbase.clear(); pmult.clear(); for (UINT perm_idx = 1; perm_idx <= n_perm; ++perm_idx) { lemga::pDataSet p_tr, p_te; if (!df.next_train_test(p_tr, p_te)) return 0; assert(n_tr == p_tr->size() && dsize-n_tr == p_te->size()); // create the output file char oname[1024]; std::sprintf(oname, "%s_%lu", argv[5], perm_idx); std::ofstream fout(oname); if (!fout.is_open()) { std::cerr << "Output file (" << oname << ") create error\n"; return -7; } // to speed up, combine training and test sets for error output lemga::DataSet *ds = new lemga::DataSet(*p_tr); *ds += *p_te; lemga::pDataSet p_ds = ds; assert(p_ds->size() == dsize); // let training begin for (UINT i = 0; i < plm.size(); ++i) { lemga::pLearnModel p = plm[i]->clone(); //p->initialize(); // don't want to initialize perceptrons p->set_train_data(p_tr); // On Linux 2.6, the precision is 0.01 seconds. std::clock_t clk = std::clock(); p->train(); REAL elapsed = (std::clock() - clk) / (REAL) CLOCKS_PER_SEC; lemga::MultiClass_ECOC& agg = (lemga::MultiClass_ECOC&) (CLASS_TYPE(*p, "CrossVal")? ((lemga::CrossVal&) *p).best_model() : *p); #if OUTPUT_TEST_CLASS std::string cname = std::string(oname) + '_' + nlm[i]; std::ofstream fcls(cname.c_str()); if (!fcls.is_open()) { std::cerr << "Class file (" << cname << ") create error\n"; return -8; } #endif // record the cost const UINT ite = agg.size(); std::vector trc(ite+1); for (UINT j = 1; j <= ite; ++j) { agg.set_aggregation_size(j); trc[j] = agg.cost(); } // output errors agg.set_train_data(p_ds); for (UINT j = 1; j <= ite; ++j) { agg.set_aggregation_size(j); #if OUTPUT_TEST_CLASS { for (UINT c = 0; c < agg.n_class(); ++c) fcls << agg.ECOC_table()[c][j-1] << ' '; fcls << "nan"; for (UINT k = n_tr; k < dsize; ++k) fcls << ' ' << agg.get_output(k)[0]; fcls << '\n'; } #endif UINT tre = 0, tee = 0; for (UINT k = 0; k < n_tr; ++k) tre += (agg.c_error(agg.get_output(k), p_ds->y(k)) > 0); for (UINT k = n_tr; k < dsize; ++k) tee += (agg.c_error(agg.get_output(k), p_ds->y(k)) > 0); fout << tre << ' ' << tee << ' ' << trc[j] << ' ' << agg.model_weight(j-1) << ' '; } fout << p_tr->size() << ' ' << p_te->size() << ' ' << agg.n_class() << ' ' << elapsed << '\n'; } } }