string addBinary(string a, string b) { string r_a(a.rbegin(), a.rend()), r_b(b.rbegin(), b.rend()); string ret; string::const_iterator iter_a, iter_b; int carry = 0, sum = 0; for(iter_a = r_a.begin(), iter_b = r_b.begin(); iter_a != r_a.end() && iter_b != r_b.end(); ++iter_a, ++iter_b) { sum = carry + _atoi(*iter_a) + _atoi(*iter_b); carry = sum / 2; ret.push_back(_itoa(sum % 2)); } while(iter_a != r_a.end()) { sum = carry + _atoi(*iter_a); carry = sum / 2; ret.push_back(_itoa(sum % 2)); ++iter_a; } while(iter_b != r_b.end()) { sum = carry + _atoi(*iter_b); carry = sum / 2; ret.push_back(_itoa(sum % 2)); ++iter_b; } if(carry) //don't forget the last carry ret.push_back(_itoa(carry)); return string(ret.rbegin(), ret.rend()); }
// Compute the gradient of a^T K b void Permutohedral::gradient ( float* df, const float * a, const float* b, int value_size ) const { // Shift all values by 1 such that -1 -> 0 (used for blurring) float * values = new float[ (M_+2)*value_size ]; float * new_values = new float[ (M_+2)*value_size ]; // Set the results to 0 std::fill( df, df+N_*d_, 0.f ); // Initialize some constants std::vector<float> scale_factor( d_ ); float inv_std_dev = sqrt(2.0 / 3.0)*(d_+1); for( int i=0; i<d_; i++ ) scale_factor[i] = 1.0 / sqrt( double((i+2)*(i+1)) ) * inv_std_dev; // Alpha is a magic scaling constant multiplied by down_factor float alpha = 1.0f / (1+powf(2, -d_)) / (d_+1); for( int dir=0; dir<2; dir++ ) { for( int i=0; i<(M_+2)*value_size; i++ ) values[i] = new_values[i] = 0; // Splatting for( int i=0; i<N_; i++ ){ for( int j=0; j<=d_; j++ ){ int o = offset_[i*(d_+1)+j]+1; float w = barycentric_[i*(d_+1)+j]; for( int k=0; k<value_size; k++ ) values[ o*value_size+k ] += w * (dir?b:a)[ i*value_size+k ]; } } // BLUR for( int j=dir?d_:0; j<=d_ && j>=0; dir?j--:j++ ){ for( int i=0; i<M_; i++ ){ float * old_val = values + (i+1)*value_size; float * new_val = new_values + (i+1)*value_size; int n1 = blur_neighbors_[j*M_+i].n1+1; int n2 = blur_neighbors_[j*M_+i].n2+1; float * n1_val = values + n1*value_size; float * n2_val = values + n2*value_size; for( int k=0; k<value_size; k++ ) new_val[k] = old_val[k]+0.5*(n1_val[k] + n2_val[k]); } std::swap( values, new_values ); } // Slicing gradient computation std::vector<float> r_a( (d_+1)*value_size ), sm( value_size ); for( int i=0; i<N_; i++ ){ // Rotate a std::fill( r_a.begin(), r_a.end(), 0.f ); for( int j=0; j<=d_; j++ ){ int r0 = d_ - rank_[i*(d_+1)+j]; int r1 = r0+1>d_?0:r0+1; int o0 = offset_[i*(d_+1)+r0]+1; int o1 = offset_[i*(d_+1)+r1]+1; for( int k=0; k<value_size; k++ ) { r_a[ j*value_size+k ] += alpha*values[ o0*value_size+k ]; r_a[ j*value_size+k ] -= alpha*values[ o1*value_size+k ]; } } // Multiply by the elevation matrix std::copy( r_a.begin(), r_a.begin()+value_size, sm.begin() ); for( int j=1; j<=d_; j++ ) { float grad = 0; for( int k=0; k<value_size; k++ ) { // Elevate ... float v = scale_factor[j-1]*(sm[k]-j*r_a[j*value_size+k]); // ... and add grad += (dir?a:b)[ i*value_size+k ]*v; sm[k] += r_a[j*value_size+k]; } // Store the gradient df[i*d_+j-1] += grad; } } } delete[] values; delete[] new_values; }