void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  int num = bottom[0]->num();
  int dim = bottom[0]->count() / bottom[0]->num();
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  for (int i = 0; i < num; ++i) {
    // Top-k accuracy
    std::vector<std::pair<Dtype, int> > bottom_data_vector;
    for (int j = 0; j < dim; ++j) {
      bottom_data_vector.push_back(
          std::make_pair(bottom_data[i * dim + j], j));
    }
    std::partial_sort(
        bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
        bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
    // check if true label is in top k predictions
    for (int k = 0; k < top_k_; k++) {
      if (bottom_data_vector[k].second == static_cast<int>(bottom_label[i])) {
        ++accuracy;
        break;
      }
    }
  }

  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = accuracy / num;
  // Accuracy layer should not be used as a loss function.
}
Exemple #2
0
void
PointSamplerBase::finalize()
{
  // Save off for speed
  unsigned int pid = processor_id();

  /*
   * Figure out which processor is actually going "claim" each point.
   * If multiple processors found the point and computed values what happens is that
   * maxloc will give us the smallest PID in max_id
   */
  std::vector<unsigned int> max_id(_found_points.size());

  _communicator.maxloc(_found_points, max_id);

  for (unsigned int i=0; i<max_id.size(); i++)
  {
    // Only do this check on the proc zero because it's the same on every processor
    // _found_points should contain all 1's at this point (ie every point was found by a proc)
    if (pid == 0 && !_found_points[i])
      mooseError("In " << name() << ", sample point not found: " << _points[i]);

    if (max_id[i] == pid)
      SamplerBase::addSample(_points[i], _ids[i], _values[i]);
  }

  SamplerBase::finalize();
}
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  if (top.size() > 1) {
    caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data());
    caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data());
  }
  int count = 0;
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; ++j) {
      const int label_value =
          static_cast<int>(bottom_label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value];
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, num_labels);
      // Top-k accuracy
      std::vector<std::pair<Dtype, int> > bottom_data_vector;
      for (int k = 0; k < num_labels; ++k) {
        bottom_data_vector.push_back(std::make_pair(
            bottom_data[i * dim + k * inner_num_ + j], k));
      }
      std::partial_sort(
          bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
          bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
      for (int k = 0; k < top_k_; k++) {
        if (bottom_data_vector[k].second == label_value &&
           (threshold_ <= 0 || bottom_data_vector[k].first >= threshold_ ))
        {
          ++accuracy;
          if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value];
          break;
        }
      }
      ++count;
    }
  }

  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = accuracy / count;
  if (top.size() > 1) {
    for (int i = 0; i < top[1]->count(); ++i) {
      top[1]->mutable_cpu_data()[i] =
          nums_buffer_.cpu_data()[i] == 0 ? 0
          : top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i];
    }
  }
  // Accuracy layer should not be used as a loss function.
}
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  int num = bottom[0]->num();
  int dim = bottom[0]->channels();
  int height = bottom[0]->height();
  int width = bottom[0]->width();
  int gt_label;
  int nKnownPixels=0;
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);

  for (int i = 0; i < num; ++i) {
    // Top-k accuracy
    for (int h = 0; h < height; ++h){
        for (int w = 0; w < width; ++w){
          gt_label=static_cast<int>(bottom_label[ (i * height + h) * width 	+ w ]);
          if (gt_label==255)
            continue;

          ++nKnownPixels;
          std::vector<std::pair<Dtype, int> > bottom_data_vector;
          for (int j = 0; j < dim; ++j) {
            bottom_data_vector.push_back(
            std::make_pair(bottom_data[((i * dim + j) * height + h)*width + w], j));
          }
          std::partial_sort(
            bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
            bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
          for (int k = 0; k < top_k_; k++) {
            if (bottom_data_vector[k].second == gt_label) {
              ++accuracy;
              break;
            }
          }
        }
    }
  }
  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = accuracy / Dtype(nKnownPixels);
  // Accuracy layer should not be used as a loss function.
}
void EltwiseAccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  int num = bottom[0]->num();
  int dim = bottom[0]->count() / bottom[0]->num();
  int spatial_dim = bottom[0]->height() * bottom[0]->width();
  int channels = bottom[0]->channels();
  int ignored_pixel_num = 0;
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  for (int i = 0; i < num; ++i) {
    for (int j = 0; j < spatial_dim; j++){
      const int label_value = static_cast<int>(bottom_label[i * spatial_dim + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        ignored_pixel_num++;
        continue;
      }
      // Top-k accuracy
      std::vector<std::pair<Dtype, int> > bottom_data_vector;
      for (int k = 0; k < channels; ++k) {
        bottom_data_vector.push_back(
          std::make_pair(bottom_data[i * dim + k * spatial_dim + j], k));
      }
      std::partial_sort(
          bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
          bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
      for (int k = 0; k < top_k_; k++) {
        if (bottom_data_vector[k].second == label_value) {
          ++accuracy;
          break;
        }
      }
    }
  }
  // LOG(INFO) << "EltwiseAccuracy: " << eltwise_accuracy;
  top[0]->mutable_cpu_data()[0] = accuracy / (num * spatial_dim - ignored_pixel_num);
  // Accuracy layer should not be used as a loss function.
}
void TestStatisticLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  phase_ = Caffe::phase();

  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  int num = bottom[0]->num();
  int dim = bottom[0]->count() / bottom[0]->num();
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  for (int i = 0; i < num; ++i) {
    // Top-k accuracy
    std::vector<std::pair<Dtype, int> > bottom_data_vector;
    for (int j = 0; j < dim; ++j) {
      bottom_data_vector.push_back(
          std::make_pair(bottom_data[i * dim + j], j));
    }
    std::partial_sort(
        bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
        bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());

    // layout of line
    // true_label prob_0 label_0 prob_1 label_1 prob_2 label_2 prob_3 label_3
    if (phase_ == Caffe::TEST) {
      std::cout << "[>>] ";
    } else {
      std::cout << "[<<] ";
    }
    std::cout << static_cast<float>(bottom_label[i]) << "; ";
    for (int k = 0; k < top_k_; k ++) {
      std::cout << bottom_data_vector[k].first << "; ";
      std::cout << bottom_data_vector[k].second << "; ";
    }
    std::cout << std::endl;
  }
}
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  std::ofstream of;
  of.open("/media/DATA/BigVision/NLTK/caffe/tags_classifier/hierarchical_classification/result.txt", ios::app);
  // Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  int count = 0;
  int true_positive = 0;
  int true_negative = 0;
  int false_positive = 0;
  int false_negative = 0;
  const int auc_pts = 20;
  vector<int> auc_tp(2 * auc_pts + 1, 0);
  vector<int> auc_tn(2 * auc_pts + 1, 0);
  vector<int> auc_fp(2 * auc_pts + 1, 0);
  vector<int> auc_fn(2 * auc_pts + 1, 0);
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; ++j) {
      const int label_value =
          static_cast<int>(bottom_label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, num_labels);
      // Top-k accuracy
      std::vector<std::pair<Dtype, int> > bottom_data_vector;
      for (int k = 0; k < num_labels; ++k) {
        bottom_data_vector.push_back(std::make_pair(
            bottom_data[i * dim + k * inner_num_ + j], k));
      }
      std::partial_sort(
          bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
          bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
      count++;
      if (label_value == 0) {
        if (bottom_data_vector[0].second == 0) {
          true_negative++;
        } else {
          false_positive++;
        }
      } else {
        if (bottom_data_vector[0].second == 0) {
          false_negative++;
        } else {
          true_positive++;
        }
      }
      //for (int k = 0; k < 1; k++) {//top_k_ modified for binary classifier
      //}
      for (int k = 0; k < 2 * auc_pts + 1; k++) {
          int p = k - auc_pts;
          Dtype inc = (1 - exp(-p)) / (1 + exp(-p));
          bottom_data_vector.clear();
          for (int l = 0; l < num_labels; l++) {
            bottom_data_vector.push_back(std::make_pair(
                bottom_data[i * dim + l * inner_num_ + j], l));
          }
          bottom_data_vector[1].first += inc;
          // LOG(INFO) << "first: " << bottom_data_vector[0].first << ", second: " << bottom_data_vector[1].first;
          std::partial_sort(
              bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
              bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
          if (label_value == 0) {
            if (bottom_data_vector[0].second == 0) {
              auc_tn[k]++;
            } else {
              auc_fp[k]++;
            }
          } else {
            if (bottom_data_vector[0].second == 0) {
              auc_fn[k]++;
            } else {
              auc_tp[k]++;
            }
          }
      }
    }
    if (i==0) {
      //LOG(INFO) << "correct: " << correct << ", error: " << error;
      //LOG(INFO) << "positive1: " << positive1 << ", negative1: " <<negative1;
      //LOG(INFO) << "positive2: " << positive2 << ", negative2: " <<negative2;      
    }
  }
  //LOG(INFO) << "accuracy: " << accuracy << ", count: " <<count;
  //LOG(INFO) << "accuracy rate: " << accuracy / count;
  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = Dtype(true_positive) / (true_positive + false_negative);
  top[0]->mutable_cpu_data()[1] = Dtype(true_negative) / (true_negative + false_positive);
  top[0]->mutable_cpu_data()[2] = Dtype(false_positive) / (true_negative + false_positive);
  top[0]->mutable_cpu_data()[3] = Dtype(false_negative) / (true_positive + false_negative);
  top[0]->mutable_cpu_data()[4] = Dtype(true_positive) / (true_positive + false_positive);
  top[0]->mutable_cpu_data()[5] = Dtype(true_negative) / (true_negative + false_negative);
  int l = auc_pts;
  top[0]->mutable_cpu_data()[6] = Dtype(sqrt(Dtype(true_positive * true_negative) / 
      ((true_positive + false_negative) * (true_negative + false_positive))));
  //int l = auc_pts / 2;
  //of << Dtype(sqrt(Dtype(auc_tp[l] * auc_tn[l]) / ((auc_tp[l] + auc_fn[l]) * (auc_tn[l] + auc_fp[l])))) << std::endl;
  /*for(int i = 0; i < 2 * auc_pts + 1; i++) {
    of << Dtype(auc_tp[i]) / (auc_tp[i] + auc_fn[i]) << " ";
    of << Dtype(auc_fp[i]) / (auc_fp[i] + auc_tn[i]) << " ";
    of << std::endl;
  }
  of << std::endl;*/
  //LOG(INFO) << "Write in result.txt";
  /*for (int i = 0; i < auc_pts; i++) {
    of << auc_tp[i] << " " << auc_fn[i] << " " << auc_tn[i] << " " << auc_fp[i] << std::endl;
  }
  of << std::endl;*/
  // Accuracy layer should not be used as a loss function.
}
Exemple #8
0
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  int count = 0;
  if (this->layer_param_.accuracy_param().type() ==1)
  {
    for (int i = 0; i < outer_num_; ++i) {
      for (int j = 0; j < inner_num_; ++j) {
        const int label_value =
            static_cast<int>(bottom_label[i * inner_num_ + j]);
        if (has_ignore_label_ && label_value == ignore_label_) {
          continue;
        }
        DCHECK_GE(label_value, 0);
        DCHECK_LT(label_value, num_labels);
        // Top-k accuracy
        std::vector<std::pair<Dtype, int> > bottom_data_vector;
        for (int k = 0; k < num_labels; ++k) {
          bottom_data_vector.push_back(std::make_pair(
              bottom_data[i * dim + k * inner_num_ + j], k));
        }
        std::partial_sort(
            bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
            bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
        // check if true label is in top k predictions
        for (int k = 0; k < top_k_; k++) {
          if (bottom_data_vector[k].second == label_value) {
            ++accuracy;
            break;
          }
        }
        ++count;
      }
    }

    // LOG(INFO) << "Accuracy: " << accuracy;
    top[0]->mutable_cpu_data()[0] = accuracy / count;
    // Accuracy layer should not be used as a loss function.
  }else{
    #define POS_W 2
// dim = num_labels, inner_num_ = 1, outer_num_ = batch num, 
//              LOG(INFO)<<"outer_num_: " << outer_num_ << " inner_num_: " << inner_num_ << " dim:" << dim <<" num_labels: " << num_labels;
      for (int i = 0; i < outer_num_; ++i) {
          for (int j = 0; j < inner_num_; ++j) {
              const int label_value =
                      static_cast<int>(bottom_label[i * inner_num_ + j]);
              if (has_ignore_label_ && label_value == ignore_label_) {
                  continue;
              }
              
              DCHECK_GE(label_value, 0);
              DCHECK_LT(label_value, num_labels);
              for (int k = 0; k < num_labels; ++k) {
                  if (label_value == (k+1))
                  {// positive for class j
//          prob += max(Dtype(0), 1-bottom_data[i * dim + j]);
//                      prob += max(Dtype(0), 2-2*bottom_data[i * dim + k * inner_num_ + j]);
                      if (bottom_data[i * dim + k * inner_num_ + j] > 0) {
                          ++accuracy;
                      }
                  }
                  else
                  {// negative for class j
//                      prob += max(Dtype(0), 1+bottom_data[i * dim + k * inner_num_ + j]);
                      if (bottom_data[i * dim + k * inner_num_ + j] < 0) {
                          ++accuracy;
                      }
                  }
              }
              ++count;
          }
      }
  (top)[0]->mutable_cpu_data()[0] = num_labels - accuracy / count;
  }
}
void MultiLabelAccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int max_label_num = bottom[1]->shape(label_axis_) > 1 ? 1: bottom[1]->shape(label_axis_) - 1;
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  if (top.size() > 1) {
    caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data());
    caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data());
  }
  int count = 0;
  for (int i = 0; i < outer_num_; ++i) {
    bool temp_flag = true;
    for(int l = 0; l < max_label_num; l++){


      int label_value = static_cast<int>(bottom_label[i * max_label_num + l]);
      if(l > 0 && label_value < 0) break;
      else label_value = 0; // if there is no label at all, then it is a negative example

      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value];
      DCHECK_GT(label_value, 0);
      DCHECK_LE(label_value, num_labels);
      // Top-k accuracy
      std::vector<std::pair<Dtype, int> > bottom_data_vector;
      for (int k = 0; k < num_labels; ++k) {
        bottom_data_vector.push_back(std::make_pair(
            bottom_data[i * dim + k], k));
      }
      std::partial_sort(
          bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
          bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
      for (int k = 0; k < top_k_; k++) {

        if (bottom_data_vector[k].second == label_value) {
          ++accuracy;
          temp_flag = false;
          ++ count;
          if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value];
          break;
        }
      }
    }
    if(temp_flag) ++count;
  }

  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = accuracy / count;
  if (top.size() > 1) {
    for (int i = 0; i < top[1]->count(); ++i) {
      top[1]->mutable_cpu_data()[i] =
          nums_buffer_.cpu_data()[i] == 0 ? 0
          : top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i];
    }
  }
  // MultiLabelAccuracy layer should not be used as a loss function.
}