Scaling.cpp

00001 /*          
00002  *             Copyright (C) 2005 Maarten Keijzer
00003  *
00004  *          This program is free software; you can redistribute it and/or modify
00005  *          it under the terms of version 2 of the GNU General Public License as 
00006  *          published by the Free Software Foundation. 
00007  *
00008  *          This program is distributed in the hope that it will be useful,
00009  *          but WITHOUT ANY WARRANTY; without even the implied warranty of
00010  *          MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00011  *          GNU General Public License for more details.
00012  *
00013  *          You should have received a copy of the GNU General Public License
00014  *          along with this program; if not, write to the Free Software
00015  *          Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
00016  */
00017 
00018 #include "Scaling.h"
00019 #include "TargetInfo.h"
00020 
00021 using namespace std;
00022 
00023 Scaling slope(const std::valarray<double>& x, const TargetInfo& targets) {
00024     
00025     double xx = 0.0;
00026     double xy = 0.0;
00027     
00028     const valarray<double>& y = targets.targets();
00029     
00030     for (unsigned i = 0; i < x.size(); ++i) {
00031         xx += x[i] * x[i];
00032         xy += x[i] * y[i];
00033     }
00034     
00035     if (xx < 1e-7) return Scaling(new LinearScaling(0.0,0.0));
00036     
00037     double b = xy / xx;
00038    
00039     return Scaling(new LinearScaling(0.0, b));
00040     
00041 }
00042 
00043 // Still needs proper testing with non-trivial lambda
00044 Scaling regularized_least_squares(const std::valarray<double>& inputs, const TargetInfo& targets, double lambda) {
00045     
00046     double n = inputs.size();
00047     
00048     valarray<double> x = inputs;
00049 
00050     double a,b,d;
00051     a=b=d=0;
00052 
00053     for (unsigned i = 0; i < n; ++i) {
00054         a += 1 + lambda;
00055         b += x[i];
00056         d += x[i] * x[i] + lambda;
00057     }
00058     
00059     //invert
00060     
00061     double ad_bc = a*d - b * b;
00062     // if ad_bc equals zero there's a problem
00063    
00064     if (ad_bc < 1e-17) return Scaling(new LinearScaling);
00065     
00066     double ai = d/ad_bc;
00067     double bi = -b/ad_bc;
00068     double di = a/ad_bc;
00069     double ci = bi;
00070     
00071     // Now multiply this inverted covariance matrix (C^-1) with x' * t
00072     
00073     std::valarray<double> ones = x;
00074     
00075     // calculate C^-1 * x' )
00076     for (unsigned i = 0; i < n; ++i) 
00077     {
00078         ones[i] = (ai + bi * x[i]);
00079         x[i]    = (ci + di * x[i]);
00080     }
00081 
00082     // results are in [ones, x], now multiply with y
00083 
00084     a = 0.0; // intercept
00085     b = 0.0; // slope
00086     
00087     const valarray<double>& t = targets.targets();
00088     
00089     for (unsigned i = 0; i < n; ++i)
00090     {
00091         a += ones[i] * t[i];
00092         b += x[i] * t[i];
00093     }
00094     
00095     return Scaling(new LinearScaling(a,b));
00096 }
00097 
00098 Scaling ols(const std::valarray<double>& y, const std::valarray<double>& t) {
00099     double n = y.size();
00100     
00101     double y_mean = y.sum() / n;
00102     double t_mean = t.sum() / n;
00103    
00104     std::valarray<double> y_var = (y - y_mean);
00105     std::valarray<double> t_var = (t - t_mean);
00106     std::valarray<double> cov = t_var * y_var;
00107     
00108     y_var *= y_var;
00109     t_var *= t_var;
00110     
00111     double sumvar = y_var.sum();
00112     
00113     if (sumvar == 0. || sumvar/n < 1e-7 || sumvar/n > 1e+7) // breakout when numerical problems are likely
00114         return Scaling(new LinearScaling(t_mean,0.));
00115 
00116     
00117     double b = cov.sum() / sumvar;
00118     double a = t_mean - b * y_mean;
00119     
00120     Scaling s = Scaling(new LinearScaling(a,b));
00121 
00122     return s;
00123 }
00124 
00125 Scaling ols(const std::valarray<double>& y, const TargetInfo& targets) {
00126     double n = y.size();
00127     
00128     double y_mean = y.sum() / n;
00129     
00130     std::valarray<double> y_var = (y - y_mean);
00131     std::valarray<double> cov = targets.tcov_part() * y_var;
00132     
00133     y_var *= y_var;
00134 
00135     double sumvar = y_var.sum();
00136     
00137     if (sumvar == 0. || sumvar/n < 1e-7 || sumvar/n > 1e+7) // breakout when numerical problems are likely
00138         return Scaling(new LinearScaling(targets.tmean(),0.));
00139 
00140     
00141     double b = cov.sum() / sumvar;
00142     double a = targets.tmean() - b * y_mean;
00143     
00144     if (!finite(b)) {
00145         
00146         cout << a << ' ' << b << endl;
00147         cout << sumvar << endl;
00148         cout << y_mean << endl;
00149         cout << cov.sum() << endl;
00150         exit(1);
00151     }
00152         
00153     Scaling s = Scaling(new LinearScaling(a,b));
00154 
00155     return s;
00156 }
00157 
00158 
00159 Scaling wls(const std::valarray<double>& inputs, const TargetInfo& targets) {
00160     
00161     std::valarray<double> x = inputs;
00162     const std::valarray<double>& w = targets.weights();
00163     
00164     unsigned n = x.size();
00165     // First calculate x'*W (as W is a diagonal matrix it's simply elementwise multiplication
00166     std::valarray<double> wx = targets.weights() * x;
00167     
00168     // Now x'*W is contained in [w,wx], calculate x' * W * x (the covariance)
00169     double a,b,d;
00170     a=b=d=0.0;
00171     
00172     for (unsigned i = 0; i < n; ++i)
00173     {
00174         a += w[i];
00175         b += wx[i];
00176         d += x[i] * wx[i];
00177     }
00178  
00179     //invert
00180     
00181     double ad_bc = a*d - b * b;
00182     // if ad_bc equals zero there's a problem
00183    
00184     if (ad_bc < 1e-17) return Scaling(new LinearScaling);
00185     
00186     double ai = d/ad_bc;
00187     double bi = -b/ad_bc;
00188     double di = a/ad_bc;
00189     double ci = bi;
00190     
00191     // Now multiply this inverted covariance matrix (C^-1) with x' * W * y
00192     
00193     // create alias to reuse the wx we do not need anymore
00194     std::valarray<double>& ones = wx;
00195     
00196     // calculate C^-1 * x' * W (using the fact that W is diagonal)
00197     for (unsigned i = 0; i < n; ++i) 
00198     {
00199         ones[i] = w[i]*(ai + bi * x[i]);
00200         x[i]    = w[i]*(ci + di * x[i]);
00201     }
00202 
00203     // results are in [ones, x], now multiply with y
00204 
00205     a = 0.0; // intercept
00206     b = 0.0; // slope
00207     
00208     const valarray<double>& t = targets.targets();
00209     
00210     for (unsigned i = 0; i < n; ++i)
00211     {
00212         a += ones[i] * t[i];
00213         b += x[i] * t[i];
00214     }
00215     
00216     return Scaling(new LinearScaling(a,b));
00217 }
00218 
00219 
00220 //Scaling med(const std::valarray<double>& inputs, const TargetInfo& targets);
00221 
00222 double mse(const std::valarray<double>& y, const TargetInfo& t) {
00223 
00224     valarray<double> residuals = t.targets()-y;
00225     residuals *= residuals;
00226     double sz = residuals.size();
00227     if (t.has_weights()) {
00228         residuals *= t.weights();
00229         sz = 1.0;
00230     }
00231         
00232     return residuals.sum() / sz;
00233 }
00234 
00235 double rms(const std::valarray<double>& y, const TargetInfo& t) {
00236     return sqrt(mse(y,t));
00237 }
00238     
00239 double mae(const std::valarray<double>& y, const TargetInfo& t) {
00240     valarray<double> residuals = abs(t.targets()-y);
00241     if (t.has_weights()) residuals *= t.weights();
00242     return residuals.sum() / residuals.size();
00243 }
00244 
00245 
00246 /*
00247     double standard_error(const std::valarray<double>& y, const std::pair<double,double>& scaling) {
00248         double a = scaling.first;
00249         double b = scaling.second;
00250         double n = y.size();
00251         double se = sqrt( pow(a+b*y-current_set->targets,2.0).sum() / (n-2));
00252         
00253         double mean_y = y.sum() / n;
00254         double sxx = pow( y - mean_y, 2.0).sum();
00255 
00256         return se / sqrt(sxx);
00257     }
00258   
00259     double scaled_mse(const std::valarray<double>& y){
00260         std::pair<double,double> scaling;
00261         return scaled_mse(y,scaling);
00262     }
00263     
00264     double scaled_mse(const std::valarray<double>& y, std::pair<double, double>& scaling)
00265     {
00266         scaling = scale(y);
00267         
00268         double a = scaling.first;
00269         double b = scaling.second;
00270         
00271         std::valarray<double> tmp = current_set->targets - a - b * y;
00272         tmp *= tmp;
00273         
00274         if (weights.size())
00275             return (weights * tmp).sum();
00276 
00277         return tmp.sum() / tmp.size();
00278     }
00279    
00280     double robust_mse(const std::valarray<double>& ny, std::pair<double, double>& scaling) {
00281         
00282         double smse = scaled_mse(ny,scaling);
00283 
00284         std::valarray<double> y = ny;
00285         // find maximum covariance case 
00286         double n = y.size();
00287 
00288         int largest = 0;
00289         
00290         {
00291             double y_mean = y.sum() / n;
00292         
00293             std::valarray<double> y_var = (y - y_mean);
00294             std::valarray<double> cov = tcov * y_var;
00295         
00296             std::valarray<bool> maxcov = cov == cov.max();
00297         
00298             for (unsigned i = 0; i < maxcov.size(); ++i) {
00299                 if (maxcov[i]) {
00300                     largest = i;
00301                     break;
00302                 }
00303             }
00304         }
00305         
00306         double y_mean = (y.sum() - y[largest]) / (n-1);
00307         y[largest] = y_mean; // dissappears from covariance calculation
00308         
00309         std::valarray<double> y_var = (y - y_mean);
00310         std::valarray<double> cov = tcov * y_var;
00311         y_var *= y_var;
00312 
00313         double sumvar = y_var.sum();
00314         
00315         if (sumvar == 0. || sumvar/n < 1e-7 || sumvar/n > 1e+7) // breakout when numerical problems are likely
00316             return worst_performance();
00317         
00318         double b = cov.sum() / sumvar;
00319         double a = tmean - b * y_mean;
00320         
00321         std::valarray<double> tmp = current_set->targets - a - b * y;
00322         tmp[largest] = 0.0;
00323         tmp *= tmp;
00324         
00325         double smse2 = tmp.sum() / (tmp.size()-1);
00326         
00327         static std::ofstream os("smse.txt");
00328         os << smse << ' ' << smse2 << '\n';
00329         
00330         if (smse2 > smse) {
00331             return worst_performance();
00332             //std::cerr << "overfit? " << smse << ' ' << smse2 << '\n';
00333         }
00334         
00335         scaling.first = a;
00336         scaling.second = b;
00337         
00338         return smse2;
00339     }
00340     
00341     class Sorter {
00342         const std::valarray<double>& scores;
00343         public:
00344             Sorter(const std::valarray<double>& _scores) : scores(_scores) {}
00345             
00346             bool operator()(unsigned i, unsigned j) const {
00347                 return scores[i] < scores[j];
00348             }
00349     };
00350     
00351     double coc(const std::valarray<double>& y) {
00352         std::vector<unsigned> indices(y.size());
00353         for (unsigned i = 0; i < y.size(); ++i) indices[i] = i;
00354         std::sort(indices.begin(), indices.end(), Sorter(y));
00355         
00356         const std::valarray<double>& targets = current_set->targets;
00357         
00358         double neg = 1.0 - targets[indices[0]];
00359         double pos = targets[indices[0]];
00360         
00361         double cumpos = 0;
00362         double cumneg = 0;
00363         double sum=0;
00364         
00365         double last_score = y[indices[0]];
00366         
00367         for(unsigned i = 1; i < targets.size(); ++i) {
00368                 
00369             if (fabs(y[indices[i]] - last_score) < 1e-9) { // we call it tied
00370                 pos += targets[indices[i]];
00371                 neg += 1.0 - targets[indices[i]];
00372                 
00373                 if (i < targets.size()-1)
00374                     continue;
00375             }
00376             sum += pos * cumneg + (pos * neg) * 0.5;
00377             cumneg += neg;
00378             cumpos += pos;
00379             pos = targets[indices[i]];
00380             neg = 1.0 - targets[indices[i]];
00381             last_score = y[indices[i]];
00382         }
00383         
00384         return sum / (cumneg * cumpos);
00385     }
00386    
00387     // iterative re-weighted least squares (for parameters.classification)
00388     double irls(const std::valarray<double>& scores, std::pair<double,double>& scaling) {
00389         const std::valarray<double>& t = current_set->targets;
00390         
00391         std::valarray<double> e(scores.size());
00392         std::valarray<double> u(scores.size()); 
00393         std::valarray<double> w(scores.size());
00394         std::valarray<double> z(scores.size());
00395         
00396         parameters.use_irls = false; parameters.classification=false;
00397         scaling = scale(scores);
00398         parameters.use_irls=true;parameters.classification=true;
00399         
00400         if (scaling.second == 0.0) return worst_performance(); 
00401         
00402         for (unsigned i = 0; i < 10; ++i) {
00403             e = exp(scaling.first + scaling.second*scores);
00404             u = e / (e + exp(-(scaling.first + scaling.second * scores)));
00405             w = u*(1.-u);
00406             z = (t-u)/w;
00407             scaling = wls(scores, u, w);
00408             //double ll = (log(u)*t + (1.-log(u))*(1.-t)).sum();
00409             //std::cout << "Scale " << i << ' ' << scaling.first << " " << scaling.second << " LL " << 2*ll << std::endl;
00410         }
00411 
00412         // log-likelihood
00413         u = exp(scaling.first + scaling.second*scores) / (1 + exp(scaling.first + scaling.second*scores));
00414         double ll = (log(u)*t + (1.-log(u))*(1.-t)).sum();
00415         return 2*ll;
00416     }
00417 */

Generated on Thu Oct 19 05:06:42 2006 for EO by  doxygen 1.3.9.1