Пример #1
0
void EmbedLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* weight = this->blobs_[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  int index;
  for (int n = 0; n < M_; ++n) {
    index = static_cast<int>(bottom_data[n]);
    DCHECK_GE(index, 0);
    DCHECK_LT(index, K_);
    DCHECK_EQ(static_cast<Dtype>(index), bottom_data[n]) << "non-integer input";
    caffe_copy(N_, weight + index * N_, top_data + n * N_);
  }
  if (bias_term_) {
    const Dtype* bias = this->blobs_[1]->cpu_data();
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1),
        bias_multiplier_.cpu_data(), bias, Dtype(1), top_data);
  }
}
Пример #2
0
bool KeyframeEffectModelBase::sample(
    int iteration,
    double fraction,
    double iterationDuration,
    Vector<RefPtr<Interpolation>>& result) const {
  DCHECK_GE(iteration, 0);
  DCHECK(!isNull(fraction));
  ensureKeyframeGroups();
  ensureInterpolationEffectPopulated();

  bool changed = iteration != m_lastIteration || fraction != m_lastFraction ||
                 iterationDuration != m_lastIterationDuration;
  m_lastIteration = iteration;
  m_lastFraction = fraction;
  m_lastIterationDuration = iterationDuration;
  m_interpolationEffect.getActiveInterpolations(fraction, iterationDuration,
                                                result);
  return changed;
}
Пример #3
0
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  int count = 0;
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; ++j) {
      const int label_value =
          static_cast<int>(bottom_label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, num_labels);
      // Top-k accuracy
      std::vector<std::pair<Dtype, int> > bottom_data_vector;
      for (int k = 0; k < num_labels; ++k) {
        bottom_data_vector.push_back(std::make_pair(
            bottom_data[i * dim + k * inner_num_ + j], k));
      }
      std::partial_sort(
          bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
          bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
      for (int k = 0; k < top_k_; k++) {
        if (bottom_data_vector[k].second == label_value) {
          ++accuracy;
          break;
        }
      }
      ++count;
    }
  }

  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = accuracy / count;
  // Accuracy layer should not be used as a loss function.
}
Пример #4
0
DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
    : AudioDelayDSPKernel(processor, AudioUtilities::kRenderQuantumFrames) {
  DCHECK(processor);
  DCHECK_GT(processor->sampleRate(), 0);
  if (!(processor && processor->sampleRate() > 0))
    return;

  m_maxDelayTime = processor->maxDelayTime();
  DCHECK_GE(m_maxDelayTime, 0);
  DCHECK(!std::isnan(m_maxDelayTime));
  if (m_maxDelayTime < 0 || std::isnan(m_maxDelayTime))
    return;

  m_buffer.allocate(
      bufferLengthForDelay(m_maxDelayTime, processor->sampleRate()));
  m_buffer.zero();

  m_smoothingRate = AudioUtilities::discreteTimeConstantForSampleRate(
      SmoothingTimeConstant, processor->sampleRate());
}
Пример #5
0
void FocalLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) 
{
  // The forward pass computes the softmax prob values.
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);

  // compute all needed values
  compute_intermediate_values_of_cpu();
  const Dtype* label           = bottom[1]->cpu_data();
  const Dtype* log_prob_data   = log_prob_.cpu_data();
  const Dtype* power_prob_data = power_prob_.cpu_data();

  // compute loss
  int count    = 0;
  int channels = prob_.shape(softmax_axis_);
  int dim      = prob_.count() / outer_num_;

  Dtype loss = 0;
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; j++) {
      const int label_value = static_cast<int>(label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, channels);
      const int index = i * dim + label_value * inner_num_ + j;
      // FL(p_t) = -(1 - p_t) ^ gamma * log(p_t)
      // loss -= std::max(power_prob_data[index] * log_prob_data[index],
      //                      Dtype(log(Dtype(FLT_MIN))));
      loss -= power_prob_data[index] * log_prob_data[index];
      ++count;
    }
  }

  // prob
  top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
}
void MultinomialLogisticLossMaskLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const Dtype* bottom_mask = bottom[2]->cpu_data();
  int dim = bottom[0]->count() / outer_num_;
  int count = 0;
  Dtype loss = 0;
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; j++) {
      const int label_value = static_cast<int>(bottom_label[i * inner_num_ + j]);
      
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, bottom[0]->shape(1));

      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }

      Dtype prob = Dtype(kLOG_THRESHOLD);
      if (label_value != 0){
        prob = std::max(
          bottom_data[i * dim + label_value * inner_num_ + j]
          * bottom_mask[i * inner_num_+ j]
          , Dtype(kLOG_THRESHOLD));  
      }
      else{
        prob = std::max(
            bottom_data[i * dim + label_value * inner_num_ + j]
            * (1 - bottom_mask[i * inner_num_+ j])
            , Dtype(kLOG_THRESHOLD));    
      }
      
      loss -= log(prob);
      ++count;
    }
  }
  top[0]->mutable_cpu_data()[0] = loss / (std::max(Dtype(1.0), Dtype(count)));
}
Пример #7
0
void ResourceLoader::requestSynchronously(const ResourceRequest& request) {
  // downloadToFile is not supported for synchronous requests.
  DCHECK(!request.downloadToFile());
  DCHECK(m_loader);
  DCHECK_EQ(request.priority(), ResourceLoadPriorityHighest);

  WrappedResourceRequest requestIn(request);
  WebURLResponse responseOut;
  WebURLError errorOut;
  WebData dataOut;
  int64_t encodedDataLength = WebURLLoaderClient::kUnknownEncodedDataLength;
  int64_t encodedBodyLength = 0;
  m_loader->loadSynchronously(requestIn, responseOut, errorOut, dataOut,
                              encodedDataLength, encodedBodyLength);

  // A message dispatched while synchronously fetching the resource
  // can bring about the cancellation of this load.
  if (!m_loader)
    return;
  if (errorOut.reason) {
    didFail(errorOut, encodedDataLength, encodedBodyLength);
    return;
  }
  didReceiveResponse(responseOut);
  if (!m_loader)
    return;
  DCHECK_GE(responseOut.toResourceResponse().encodedBodyLength(), 0);

  // Follow the async case convention of not calling didReceiveData or
  // appending data to m_resource if the response body is empty. Copying the
  // empty buffer is a noop in most cases, but is destructive in the case of
  // a 304, where it will overwrite the cached data we should be reusing.
  if (dataOut.size()) {
    m_fetcher->didReceiveData(m_resource.get(), dataOut.data(), dataOut.size());
    m_resource->setResourceBuffer(dataOut);
  }
  didFinishLoading(monotonicallyIncreasingTime(), encodedDataLength,
                   encodedBodyLength);
}
Пример #8
0
void SVGNumberListInterpolationType::composite(
    UnderlyingValueOwner& underlyingValueOwner,
    double underlyingFraction,
    const InterpolationValue& value,
    double interpolationFraction) const {
    const InterpolableList& list = toInterpolableList(*value.interpolableValue);

    if (toInterpolableList(*underlyingValueOwner.value().interpolableValue)
            .length() <= list.length())
        padWithZeroes(underlyingValueOwner.mutableValue().interpolableValue,
                      list.length());

    InterpolableList& underlyingList = toInterpolableList(
                                           *underlyingValueOwner.mutableValue().interpolableValue);

    DCHECK_GE(underlyingList.length(), list.length());
    size_t i = 0;
    for (; i < list.length(); i++)
        underlyingList.getMutable(i)->scaleAndAdd(underlyingFraction, *list.get(i));
    for (; i < underlyingList.length(); i++)
        underlyingList.getMutable(i)->scale(underlyingFraction);
}
Пример #9
0
void InfogainLossLayer<Dtype, MItype, MOtype>::Forward_cpu(
    const vector<Blob<MItype>*>& bottom,
    const vector<Blob<MOtype>*>& top) {
  // The forward pass computes the softmax prob values.
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
  const Dtype* prob_data = prob_.cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const Dtype* infogain_mat = NULL;
  if (bottom.size() < 3) {
    infogain_mat = infogain_.cpu_data();
  } else {
    infogain_mat = bottom[2]->cpu_data();
  }
  int_tp count = 0;
  Dtype loss = 0;
  for (int_tp i = 0; i < outer_num_; ++i) {
    for (int_tp j = 0; j < inner_num_; j++) {
      const int_tp label_value =
        static_cast<int_tp>(bottom_label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, num_labels_);
      for (int_tp l = 0; l < num_labels_; l++) {
        loss -= infogain_mat[label_value * num_labels_ + l] *
          std::log(std::max(
                  prob_data[i * inner_num_*num_labels_ + l * inner_num_ + j],
                  Dtype(kLOG_THRESHOLD)));
      }
      ++count;
    }
  }
  top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
}
Пример #10
0
void KeyframeEffectReadOnly::applyEffects() {
  DCHECK(isInEffect());
  DCHECK(animation());
  if (!m_target || !m_model)
    return;

  if (hasIncompatibleStyle())
    animation()->cancelAnimationOnCompositor();

  double iteration = currentIteration();
  DCHECK_GE(iteration, 0);
  bool changed = false;
  if (m_sampledEffect) {
    changed = m_model->sample(clampTo<int>(iteration, 0), progress(),
                              iterationDuration(),
                              m_sampledEffect->mutableInterpolations());
  } else {
    Vector<RefPtr<Interpolation>> interpolations;
    m_model->sample(clampTo<int>(iteration, 0), progress(), iterationDuration(),
                    interpolations);
    if (!interpolations.isEmpty()) {
      SampledEffect* sampledEffect = SampledEffect::create(this);
      sampledEffect->mutableInterpolations().swap(interpolations);
      m_sampledEffect = sampledEffect;
      ensureAnimationStack(m_target).add(sampledEffect);
      changed = true;
    } else {
      return;
    }
  }

  if (changed) {
    m_target->setNeedsAnimationStyleRecalc();
    if (RuntimeEnabledFeatures::webAnimationsSVGEnabled() &&
        m_target->isSVGElement())
      toSVGElement(*m_target).setWebAnimationsPending();
  }
}
Пример #11
0
int ffivector_reserve(FFIVector* v, size_t n) {
  if (n <= v->capacity) {
    return 0;
  }

  try {
    if (v->data) {
      v->data = folly::smartRealloc(
          v->data,
          v->size * v->elementSize,
          v->capacity * v->elementSize,
          n * v->elementSize);
    } else {
      v->data = folly::checkedMalloc(n * v->elementSize);
    }
  } catch (const std::bad_alloc&) {
    return -ENOMEM;
  }

  v->capacity = malloc_usable_size(v->data) / v->elementSize;
  DCHECK_GE(v->capacity, n);
  return 0;
}
Пример #12
0
void ShutdownSocketSet::remove(int fd) {
  DCHECK_GE(fd, 0);
  if (fd >= maxFd_) {
    return;
  }

  auto& sref = data_[size_t(fd)];
  uint8_t prevState = 0;

  prevState = sref.load(std::memory_order_relaxed);
  do {
    switch (prevState) {
      case IN_SHUTDOWN:
        std::this_thread::sleep_for(std::chrono::milliseconds(1));
        prevState = sref.load(std::memory_order_relaxed);
        continue;
      case FREE:
        LOG(FATAL) << "Invalid prev state for fd " << fd << ": "
                   << int(prevState);
    }
  } while (
      !sref.compare_exchange_weak(prevState, FREE, std::memory_order_relaxed));
}
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  // The forward pass computes the softmax prob values.
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
  const Dtype* prob_data = prob_.cpu_data();
  const Dtype* label = bottom[1]->cpu_data();
  const Dtype* weight = bottom[2]->cpu_data();
  int dim = prob_.count() / outer_num_;
  int count = 0;
  Dtype loss = 0;
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; j++) {
      const int label_value = static_cast<int>(label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      
      Dtype w = bottom[2]->cpu_data()[i * inner_num_ +j];
      //if(label_value>0)
      //   std::cerr<<w<<" ";
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, prob_.shape(softmax_axis_));
      loss -= w*log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
                           Dtype(FLT_MIN)));
      count+=w;
    }
  }
  if (normalize_) {
    top[0]->mutable_cpu_data()[0] = loss / count;
  } else {
    top[0]->mutable_cpu_data()[0] = loss / outer_num_;
  }
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
  //std::cerr<<"loss computed"<<std::endl;
}
Пример #14
0
void DeferredImageDecoder::setDataInternal(PassRefPtr<SharedBuffer> passData,
                                           bool allDataReceived,
                                           bool pushDataToDecoder) {
  RefPtr<SharedBuffer> data = passData;
  if (m_actualDecoder) {
    m_allDataReceived = allDataReceived;
    if (pushDataToDecoder)
      m_actualDecoder->setData(data, allDataReceived);
    prepareLazyDecodedFrames();
  }

  if (m_frameGenerator) {
    if (!m_rwBuffer)
      m_rwBuffer = wrapUnique(new SkRWBuffer(data->size()));

    const char* segment = 0;
    for (size_t length = data->getSomeData(segment, m_rwBuffer->size()); length;
         length = data->getSomeData(segment, m_rwBuffer->size())) {
      DCHECK_GE(data->size(), m_rwBuffer->size() + length);
      const size_t remaining = data->size() - m_rwBuffer->size() - length;
      m_rwBuffer->append(segment, length, remaining);
    }
  }
}
Пример #15
0
    bool ClipboardUtil::GetFileContents(IDataObject* data_object, std::wstring* filename,
        std::string* file_contents)
    {
        DCHECK(data_object && filename && file_contents);
        if(!SUCCEEDED(data_object->QueryGetData(GetFileContentFormatZero())) &&
            !SUCCEEDED(data_object->QueryGetData(GetFileDescriptorFormat())))
        {
            return false;
        }

        STGMEDIUM content;
        // 调用GetData有可能会很慢, 具体取决于|data_object|的实现.
        if(SUCCEEDED(data_object->GetData(GetFileContentFormatZero(), &content)))
        {
            if(TYMED_HGLOBAL == content.tymed)
            {
                base::win::ScopedHGlobal<char> data(content.hGlobal);
                file_contents->assign(data.get(), data.Size());
            }
            ReleaseStgMedium(&content);
        }

        STGMEDIUM description;
        if(SUCCEEDED(data_object->GetData(GetFileDescriptorFormat(),
            &description)))
        {
            {
                base::win::ScopedHGlobal<FILEGROUPDESCRIPTOR> fgd(description.hGlobal);
                // 这里至少有一个文件.
                DCHECK_GE(fgd->cItems, 1u);
                filename->assign(fgd->fgd[0].cFileName);
            }
            ReleaseStgMedium(&description);
        }
        return true;
    }
void InjectJoinFilters::concretizeAsLIPFilters(
    const P::PhysicalPtr &input,
    const P::PhysicalPtr &anchor_node) const {
  switch (input->getPhysicalType()) {
    case P::PhysicalType::kAggregate: {
      const P::AggregatePtr &aggregate =
          std::static_pointer_cast<const P::Aggregate>(input);
      concretizeAsLIPFilters(aggregate->input(), aggregate);
      break;
    }
    case P::PhysicalType::kSelection: {
      const P::SelectionPtr &selection =
          std::static_pointer_cast<const P::Selection>(input);
      concretizeAsLIPFilters(selection->input(), selection);
      break;
    }
    // Currently we disable the attachment of filters to HashJoin nodes. See the
    // comments in InjectJoinFilters::addFilterAnchors().
    /*
    case P::PhysicalType::kHashJoin: {
      const P::HashJoinPtr &hash_join =
          std::static_pointer_cast<const P::HashJoin>(input);
      concretizeAsLIPFilters(hash_join->left(), hash_join);
      concretizeAsLIPFilters(hash_join->right(), nullptr);
      break;
    }
    */
    case P::PhysicalType::kFilterJoin: {
      const P::FilterJoinPtr &filter_join =
          std::static_pointer_cast<const P::FilterJoin>(input);
      DCHECK_EQ(1u, filter_join->build_attributes().size());
      const E::AttributeReferencePtr &build_attr =
          filter_join->build_attributes().front();

      std::int64_t min_cpp_value;
      std::int64_t max_cpp_value;
      const bool has_exact_min_max_stats =
          findExactMinMaxValuesForAttributeHelper(filter_join,
                                                  build_attr,
                                                  &min_cpp_value,
                                                  &max_cpp_value);
      DCHECK(has_exact_min_max_stats);
      DCHECK_GE(max_cpp_value, min_cpp_value);
      DCHECK_LE(max_cpp_value - min_cpp_value, kMaxFilterSize);
      CHECK(anchor_node != nullptr);

      lip_filter_configuration_->addBuildInfo(
          P::BitVectorExactFilterBuildInfo::Create(build_attr,
                                                   min_cpp_value,
                                                   max_cpp_value,
                                                   filter_join->is_anti_join()),
          filter_join);
      lip_filter_configuration_->addProbeInfo(
          P::LIPFilterProbeInfo::Create(filter_join->probe_attributes().front(),
                                        build_attr,
                                        filter_join),
          anchor_node);

      concretizeAsLIPFilters(filter_join->left(), anchor_node);
      concretizeAsLIPFilters(filter_join->right(), filter_join);
      break;
    }
    default: {
      for (const P::PhysicalPtr &child : input->children()) {
        concretizeAsLIPFilters(child, nullptr);
      }
    }
  }
}
Пример #17
0
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  std::ofstream of;
  of.open("/media/DATA/BigVision/NLTK/caffe/tags_classifier/hierarchical_classification/result.txt", ios::app);
  // Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  int count = 0;
  int true_positive = 0;
  int true_negative = 0;
  int false_positive = 0;
  int false_negative = 0;
  const int auc_pts = 20;
  vector<int> auc_tp(2 * auc_pts + 1, 0);
  vector<int> auc_tn(2 * auc_pts + 1, 0);
  vector<int> auc_fp(2 * auc_pts + 1, 0);
  vector<int> auc_fn(2 * auc_pts + 1, 0);
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; ++j) {
      const int label_value =
          static_cast<int>(bottom_label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, num_labels);
      // Top-k accuracy
      std::vector<std::pair<Dtype, int> > bottom_data_vector;
      for (int k = 0; k < num_labels; ++k) {
        bottom_data_vector.push_back(std::make_pair(
            bottom_data[i * dim + k * inner_num_ + j], k));
      }
      std::partial_sort(
          bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
          bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
      // check if true label is in top k predictions
      count++;
      if (label_value == 0) {
        if (bottom_data_vector[0].second == 0) {
          true_negative++;
        } else {
          false_positive++;
        }
      } else {
        if (bottom_data_vector[0].second == 0) {
          false_negative++;
        } else {
          true_positive++;
        }
      }
      //for (int k = 0; k < 1; k++) {//top_k_ modified for binary classifier
      //}
      for (int k = 0; k < 2 * auc_pts + 1; k++) {
          int p = k - auc_pts;
          Dtype inc = (1 - exp(-p)) / (1 + exp(-p));
          bottom_data_vector.clear();
          for (int l = 0; l < num_labels; l++) {
            bottom_data_vector.push_back(std::make_pair(
                bottom_data[i * dim + l * inner_num_ + j], l));
          }
          bottom_data_vector[1].first += inc;
          // LOG(INFO) << "first: " << bottom_data_vector[0].first << ", second: " << bottom_data_vector[1].first;
          std::partial_sort(
              bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
              bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
          if (label_value == 0) {
            if (bottom_data_vector[0].second == 0) {
              auc_tn[k]++;
            } else {
              auc_fp[k]++;
            }
          } else {
            if (bottom_data_vector[0].second == 0) {
              auc_fn[k]++;
            } else {
              auc_tp[k]++;
            }
          }
      }
    }
    if (i==0) {
      //LOG(INFO) << "correct: " << correct << ", error: " << error;
      //LOG(INFO) << "positive1: " << positive1 << ", negative1: " <<negative1;
      //LOG(INFO) << "positive2: " << positive2 << ", negative2: " <<negative2;      
    }
  }
  //LOG(INFO) << "accuracy: " << accuracy << ", count: " <<count;
  //LOG(INFO) << "accuracy rate: " << accuracy / count;
  // LOG(INFO) << "Accuracy: " << accuracy;
  top[0]->mutable_cpu_data()[0] = Dtype(true_positive) / (true_positive + false_negative);
  top[0]->mutable_cpu_data()[1] = Dtype(true_negative) / (true_negative + false_positive);
  top[0]->mutable_cpu_data()[2] = Dtype(false_positive) / (true_negative + false_positive);
  top[0]->mutable_cpu_data()[3] = Dtype(false_negative) / (true_positive + false_negative);
  top[0]->mutable_cpu_data()[4] = Dtype(true_positive) / (true_positive + false_positive);
  top[0]->mutable_cpu_data()[5] = Dtype(true_negative) / (true_negative + false_negative);
  int l = auc_pts;
  top[0]->mutable_cpu_data()[6] = Dtype(sqrt(Dtype(true_positive * true_negative) / 
      ((true_positive + false_negative) * (true_negative + false_positive))));
  //int l = auc_pts / 2;
  //of << Dtype(sqrt(Dtype(auc_tp[l] * auc_tn[l]) / ((auc_tp[l] + auc_fn[l]) * (auc_tn[l] + auc_fp[l])))) << std::endl;
  /*for(int i = 0; i < 2 * auc_pts + 1; i++) {
    of << Dtype(auc_tp[i]) / (auc_tp[i] + auc_fn[i]) << " ";
    of << Dtype(auc_fp[i]) / (auc_fp[i] + auc_tn[i]) << " ";
    of << std::endl;
  }
  of << std::endl;*/
  //LOG(INFO) << "Write in result.txt";
  /*for (int i = 0; i < auc_pts; i++) {
    of << auc_tp[i] << " " << auc_fn[i] << " " << auc_tn[i] << " " << auc_fp[i] << std::endl;
  }
  of << std::endl;*/
  // Accuracy layer should not be used as a loss function.
}
Пример #18
0
 explicit BatchedControlUpdate(MediaControls* controls)
     : m_controls(controls) {
   DCHECK(isMainThread());
   DCHECK_GE(s_batchDepth, 0);
   ++s_batchDepth;
 }
Пример #19
0
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  Dtype accuracy = 0;
  const Dtype* bottom_data = bottom[0]->cpu_data();
  const Dtype* bottom_label = bottom[1]->cpu_data();
  const int dim = bottom[0]->count() / outer_num_;
  const int num_labels = bottom[0]->shape(label_axis_);
  vector<Dtype> maxval(top_k_+1);
  vector<int> max_id(top_k_+1);
  int count = 0;
  if (this->layer_param_.accuracy_param().type() ==1)
  {
    for (int i = 0; i < outer_num_; ++i) {
      for (int j = 0; j < inner_num_; ++j) {
        const int label_value =
            static_cast<int>(bottom_label[i * inner_num_ + j]);
        if (has_ignore_label_ && label_value == ignore_label_) {
          continue;
        }
        DCHECK_GE(label_value, 0);
        DCHECK_LT(label_value, num_labels);
        // Top-k accuracy
        std::vector<std::pair<Dtype, int> > bottom_data_vector;
        for (int k = 0; k < num_labels; ++k) {
          bottom_data_vector.push_back(std::make_pair(
              bottom_data[i * dim + k * inner_num_ + j], k));
        }
        std::partial_sort(
            bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
            bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
        // check if true label is in top k predictions
        for (int k = 0; k < top_k_; k++) {
          if (bottom_data_vector[k].second == label_value) {
            ++accuracy;
            break;
          }
        }
        ++count;
      }
    }

    // LOG(INFO) << "Accuracy: " << accuracy;
    top[0]->mutable_cpu_data()[0] = accuracy / count;
    // Accuracy layer should not be used as a loss function.
  }else{
    #define POS_W 2
// dim = num_labels, inner_num_ = 1, outer_num_ = batch num, 
//              LOG(INFO)<<"outer_num_: " << outer_num_ << " inner_num_: " << inner_num_ << " dim:" << dim <<" num_labels: " << num_labels;
      for (int i = 0; i < outer_num_; ++i) {
          for (int j = 0; j < inner_num_; ++j) {
              const int label_value =
                      static_cast<int>(bottom_label[i * inner_num_ + j]);
              if (has_ignore_label_ && label_value == ignore_label_) {
                  continue;
              }
              
              DCHECK_GE(label_value, 0);
              DCHECK_LT(label_value, num_labels);
              for (int k = 0; k < num_labels; ++k) {
                  if (label_value == (k+1))
                  {// positive for class j
//          prob += max(Dtype(0), 1-bottom_data[i * dim + j]);
//                      prob += max(Dtype(0), 2-2*bottom_data[i * dim + k * inner_num_ + j]);
                      if (bottom_data[i * dim + k * inner_num_ + j] > 0) {
                          ++accuracy;
                      }
                  }
                  else
                  {// negative for class j
//                      prob += max(Dtype(0), 1+bottom_data[i * dim + k * inner_num_ + j]);
                      if (bottom_data[i * dim + k * inner_num_ + j] < 0) {
                          ++accuracy;
                      }
                  }
              }
              ++count;
          }
      }
  (top)[0]->mutable_cpu_data()[0] = num_labels - accuracy / count;
  }
}
Пример #20
0
 /**
  * @brief Get the value of the specified attribute.
  * @warning This is only safe if gapsInAttributeSequence() is false for the
  *          relation this tuple belongs to.
  *
  * @param attr The id of the attribute to get.
  * @return The attribute's value in this tuple.
  **/
 const TypedValue& getAttributeValue(const attribute_id attr) const {
   DCHECK_GE(attr, 0);
   // The cast supresses a warning about comparing signed and unsigned types.
   DCHECK_LT(static_cast<std::vector<TypedValue>::size_type>(attr), attribute_values_.size());
   return attribute_values_[attr];
 }
Пример #21
0
void HRTFPanner::pan(double desiredAzimuth,
                     double elevation,
                     const AudioBus* inputBus,
                     AudioBus* outputBus,
                     size_t framesToProcess,
                     AudioBus::ChannelInterpretation channelInterpretation) {
  unsigned numInputChannels = inputBus ? inputBus->numberOfChannels() : 0;

  bool isInputGood = inputBus && numInputChannels >= 1 && numInputChannels <= 2;
  ASSERT(isInputGood);

  bool isOutputGood = outputBus && outputBus->numberOfChannels() == 2 &&
                      framesToProcess <= outputBus->length();
  ASSERT(isOutputGood);

  if (!isInputGood || !isOutputGood) {
    if (outputBus)
      outputBus->zero();
    return;
  }

  HRTFDatabase* database = m_databaseLoader->database();
  if (!database) {
    outputBus->copyFrom(*inputBus, channelInterpretation);
    return;
  }

  // IRCAM HRTF azimuths values from the loaded database is reversed from the
  // panner's notion of azimuth.
  double azimuth = -desiredAzimuth;

  bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0;
  ASSERT(isAzimuthGood);
  if (!isAzimuthGood) {
    outputBus->zero();
    return;
  }

  // Normally, we'll just be dealing with mono sources.
  // If we have a stereo input, implement stereo panning with left source
  // processed by left HRTF, and right source by right HRTF.
  const AudioChannel* inputChannelL =
      inputBus->channelByType(AudioBus::ChannelLeft);
  const AudioChannel* inputChannelR =
      numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight)
                           : nullptr;

  // Get source and destination pointers.
  const float* sourceL = inputChannelL->data();
  const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
  float* destinationL =
      outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
  float* destinationR =
      outputBus->channelByType(AudioBus::ChannelRight)->mutableData();

  double azimuthBlend;
  int desiredAzimuthIndex =
      calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);

  // Initially snap azimuth and elevation values to first values encountered.
  if (m_azimuthIndex1 == UninitializedAzimuth) {
    m_azimuthIndex1 = desiredAzimuthIndex;
    m_elevation1 = elevation;
  }
  if (m_azimuthIndex2 == UninitializedAzimuth) {
    m_azimuthIndex2 = desiredAzimuthIndex;
    m_elevation2 = elevation;
  }

  // Cross-fade / transition over a period of around 45 milliseconds.
  // This is an empirical value tuned to be a reasonable trade-off between
  // smoothness and speed.
  const double fadeFrames = sampleRate() <= 48000 ? 2048 : 4096;

  // Check for azimuth and elevation changes, initiating a cross-fade if needed.
  if (!m_crossfadeX && m_crossfadeSelection == CrossfadeSelection1) {
    if (desiredAzimuthIndex != m_azimuthIndex1 || elevation != m_elevation1) {
      // Cross-fade from 1 -> 2
      m_crossfadeIncr = 1 / fadeFrames;
      m_azimuthIndex2 = desiredAzimuthIndex;
      m_elevation2 = elevation;
    }
  }
  if (m_crossfadeX == 1 && m_crossfadeSelection == CrossfadeSelection2) {
    if (desiredAzimuthIndex != m_azimuthIndex2 || elevation != m_elevation2) {
      // Cross-fade from 2 -> 1
      m_crossfadeIncr = -1 / fadeFrames;
      m_azimuthIndex1 = desiredAzimuthIndex;
      m_elevation1 = elevation;
    }
  }

  // This algorithm currently requires that we process in power-of-two size
  // chunks at least AudioUtilities::kRenderQuantumFrames.
  ASSERT(1UL << static_cast<int>(log2(framesToProcess)) == framesToProcess);
  DCHECK_GE(framesToProcess, AudioUtilities::kRenderQuantumFrames);

  const unsigned framesPerSegment = AudioUtilities::kRenderQuantumFrames;
  const unsigned numberOfSegments = framesToProcess / framesPerSegment;

  for (unsigned segment = 0; segment < numberOfSegments; ++segment) {
    // Get the HRTFKernels and interpolated delays.
    HRTFKernel* kernelL1;
    HRTFKernel* kernelR1;
    HRTFKernel* kernelL2;
    HRTFKernel* kernelR2;
    double frameDelayL1;
    double frameDelayR1;
    double frameDelayL2;
    double frameDelayR2;
    database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex1,
                                             m_elevation1, kernelL1, kernelR1,
                                             frameDelayL1, frameDelayR1);
    database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex2,
                                             m_elevation2, kernelL2, kernelR2,
                                             frameDelayL2, frameDelayR2);

    bool areKernelsGood = kernelL1 && kernelR1 && kernelL2 && kernelR2;
    ASSERT(areKernelsGood);
    if (!areKernelsGood) {
      outputBus->zero();
      return;
    }

    ASSERT(frameDelayL1 / sampleRate() < MaxDelayTimeSeconds &&
           frameDelayR1 / sampleRate() < MaxDelayTimeSeconds);
    ASSERT(frameDelayL2 / sampleRate() < MaxDelayTimeSeconds &&
           frameDelayR2 / sampleRate() < MaxDelayTimeSeconds);

    // Crossfade inter-aural delays based on transitions.
    double frameDelayL =
        (1 - m_crossfadeX) * frameDelayL1 + m_crossfadeX * frameDelayL2;
    double frameDelayR =
        (1 - m_crossfadeX) * frameDelayR1 + m_crossfadeX * frameDelayR2;

    // Calculate the source and destination pointers for the current segment.
    unsigned offset = segment * framesPerSegment;
    const float* segmentSourceL = sourceL + offset;
    const float* segmentSourceR = sourceR + offset;
    float* segmentDestinationL = destinationL + offset;
    float* segmentDestinationR = destinationR + offset;

    // First run through delay lines for inter-aural time difference.
    m_delayLineL.setDelayFrames(frameDelayL);
    m_delayLineR.setDelayFrames(frameDelayR);
    m_delayLineL.process(segmentSourceL, segmentDestinationL, framesPerSegment);
    m_delayLineR.process(segmentSourceR, segmentDestinationR, framesPerSegment);

    bool needsCrossfading = m_crossfadeIncr;

    // Have the convolvers render directly to the final destination if we're not
    // cross-fading.
    float* convolutionDestinationL1 =
        needsCrossfading ? m_tempL1.data() : segmentDestinationL;
    float* convolutionDestinationR1 =
        needsCrossfading ? m_tempR1.data() : segmentDestinationR;
    float* convolutionDestinationL2 =
        needsCrossfading ? m_tempL2.data() : segmentDestinationL;
    float* convolutionDestinationR2 =
        needsCrossfading ? m_tempR2.data() : segmentDestinationR;

    // Now do the convolutions.
    // Note that we avoid doing convolutions on both sets of convolvers if we're
    // not currently cross-fading.

    if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) {
      m_convolverL1.process(kernelL1->fftFrame(), segmentDestinationL,
                            convolutionDestinationL1, framesPerSegment);
      m_convolverR1.process(kernelR1->fftFrame(), segmentDestinationR,
                            convolutionDestinationR1, framesPerSegment);
    }

    if (m_crossfadeSelection == CrossfadeSelection2 || needsCrossfading) {
      m_convolverL2.process(kernelL2->fftFrame(), segmentDestinationL,
                            convolutionDestinationL2, framesPerSegment);
      m_convolverR2.process(kernelR2->fftFrame(), segmentDestinationR,
                            convolutionDestinationR2, framesPerSegment);
    }

    if (needsCrossfading) {
      // Apply linear cross-fade.
      float x = m_crossfadeX;
      float incr = m_crossfadeIncr;
      for (unsigned i = 0; i < framesPerSegment; ++i) {
        segmentDestinationL[i] = (1 - x) * convolutionDestinationL1[i] +
                                 x * convolutionDestinationL2[i];
        segmentDestinationR[i] = (1 - x) * convolutionDestinationR1[i] +
                                 x * convolutionDestinationR2[i];
        x += incr;
      }
      // Update cross-fade value from local.
      m_crossfadeX = x;

      if (m_crossfadeIncr > 0 && fabs(m_crossfadeX - 1) < m_crossfadeIncr) {
        // We've fully made the crossfade transition from 1 -> 2.
        m_crossfadeSelection = CrossfadeSelection2;
        m_crossfadeX = 1;
        m_crossfadeIncr = 0;
      } else if (m_crossfadeIncr < 0 && fabs(m_crossfadeX) < -m_crossfadeIncr) {
        // We've fully made the crossfade transition from 2 -> 1.
        m_crossfadeSelection = CrossfadeSelection1;
        m_crossfadeX = 0;
        m_crossfadeIncr = 0;
      }
    }
  }
}
Пример #22
0
void InputType::applyStep(const Decimal& current, int count, AnyStepHandling anyStepHandling, TextFieldEventBehavior eventBehavior, ExceptionState& exceptionState)
{
    // https://html.spec.whatwg.org/multipage/forms.html#dom-input-stepup

    StepRange stepRange(createStepRange(anyStepHandling));
    // 2. If the element has no allowed value step, then throw an
    // InvalidStateError exception, and abort these steps.
    if (!stepRange.hasStep()) {
        exceptionState.throwDOMException(InvalidStateError, "This form element does not have an allowed value step.");
        return;
    }

    // 3. If the element has a minimum and a maximum and the minimum is greater
    // than the maximum, then abort these steps.
    if (stepRange.minimum() > stepRange.maximum())
        return;

    // 4. If the element has a minimum and a maximum and there is no value
    // greater than or equal to the element's minimum and less than or equal to
    // the element's maximum that, when subtracted from the step base, is an
    // integral multiple of the allowed value step, then abort these steps.
    Decimal alignedMaximum = stepRange.stepSnappedMaximum();
    if (!alignedMaximum.isFinite())
        return;

    Decimal base = stepRange.stepBase();
    Decimal step = stepRange.step();
    EventQueueScope scope;
    Decimal newValue = current;
    const AtomicString& stepString = element().fastGetAttribute(stepAttr);
    if (!equalIgnoringCase(stepString, "any") && stepRange.stepMismatch(current)) {
        // Snap-to-step / clamping steps
        // If the current value is not matched to step value:
        // - The value should be the larger matched value nearest to 0 if count > 0
        //   e.g. <input type=number value=3 min=-100 step=3> -> 5
        // - The value should be the smaller matched value nearest to 0 if count < 0
        //   e.g. <input type=number value=3 min=-100 step=3> -> 2
        //

        DCHECK(!step.isZero());
        if (count < 0) {
            newValue = base + ((newValue - base) / step).floor() * step;
            ++count;
        } else if (count > 0) {
            newValue = base + ((newValue - base) / step).ceil() * step;
            --count;
        }
    }
    newValue = newValue + stepRange.step() * count;

    if (!equalIgnoringCase(stepString, "any"))
        newValue = stepRange.alignValueForStep(current, newValue);

    // 7. If the element has a minimum, and value is less than that minimum,
    // then set value to the smallest value that, when subtracted from the step
    // base, is an integral multiple of the allowed value step, and that is more
    // than or equal to minimum.
    // 8. If the element has a maximum, and value is greater than that maximum,
    // then set value to the largest value that, when subtracted from the step
    // base, is an integral multiple of the allowed value step, and that is less
    // than or equal to maximum.
    if (newValue > stepRange.maximum()) {
        newValue = alignedMaximum;
    } else if (newValue < stepRange.minimum()) {
        const Decimal alignedMinimum = base + ((stepRange.minimum() - base) / step).ceil() * step;
        DCHECK_GE(alignedMinimum, stepRange.minimum());
        newValue = alignedMinimum;
    }

    // 9. Let value as string be the result of running the algorithm to convert
    // a number to a string, as defined for the input element's type attribute's
    // current state, on value.
    // 10. Set the value of the element to value as string.
    setValueAsDecimal(newValue, eventBehavior, exceptionState);

    if (AXObjectCache* cache = element().document().existingAXObjectCache())
        cache->handleValueChanged(&element());
}
Пример #23
0
void serialize(
    ThriftTensor& out,
    LongRange sizes,
    LongRange strides,
    folly::IOBuf&& data,
    ThriftTensorDataType dtype,
    size_t elementSize,
    ThriftTensorEndianness endianness,
    SharingMode sharing) {
  DCHECK(!data.isChained());
  if (endianness == ThriftTensorEndianness::NATIVE) {
    endianness = gMachineEndianness;
  } else {
    CHECK(endianness == gMachineEndianness)
      << "Non-native endianness not yet implemented";
  }

  int ndims = sizes.size();
  uint64_t dataSize = 1;
  uint64_t contiguousSize = 1;
  int firstContiguousDim = ndims - 1;

  if (!strides.empty()) {
    DCHECK_EQ(strides.size(), ndims);
    while (firstContiguousDim >= 0) {
      if (strides[firstContiguousDim] != contiguousSize) {
        break;
      }
      contiguousSize *= sizes[firstContiguousDim];
      --firstContiguousDim;
    }
    ++firstContiguousDim;
    dataSize = contiguousSize;
    for (int i = 0; i < firstContiguousDim; ++i) {
      dataSize *= sizes[i];
    }
  } else {
    for (auto s : sizes) {
      dataSize *= s;
    }
    contiguousSize = dataSize;
    firstContiguousDim = 0;
  }

  // Dimensions from firstContiguousDim till the last form a contiguous range
  // of contiguousSize elements; we'll copy / clone that in one go rather
  // than iterating through all elements.

  // We want bytes.
  dataSize *= elementSize;
  contiguousSize *= elementSize;

  DCHECK_LE(contiguousSize, dataSize);

  out.dataType = dtype;
  out.endianness = endianness;
  out.sizes.assign(sizes.begin(), sizes.end());

  if (ndims == 0) {
    // Empty tensor, nothing to do.
    out.data = folly::IOBuf();
    data = folly::IOBuf();
    return;
  }

  if (firstContiguousDim == 0) {
    // We're done.
    DCHECK_GE(data.length(), dataSize);
    data.trimEnd(data.length() - dataSize);
    detail::applySharingMode(data, sharing);
    out.data = std::move(data);
    return;
  }

  // We have to do this the hard way...
  folly::IOBufQueue outQueue;

  // If the contiguous chunk size is >= kMinCloneSize, we clone rather
  // than copying
  static constexpr uint64_t kMinCloneSize = 4 << 10;

  // Don't allocate huge contiguous buffers.
  // jemalloc defers to mmap() for buffers of 4MiB or more.
  static constexpr uint64_t kMaxBlockSize = 2 << 20;
  folly::io::QueueAppender appender(&outQueue,
                                    std::min(dataSize, kMaxBlockSize));

  std::vector<uint64_t> counter;
  counter.resize(firstContiguousDim);
  int idx = firstContiguousDim;
  const uint8_t* src = data.data();
  bool mayShare = false;
  switch (sharing) {
  case SHARE_NONE:
    break;
  case SHARE_IOBUF_MANAGED:
    mayShare = data.isManagedOne();
    break;
  case SHARE_ALL:
    mayShare = true;
    break;
  };
  while (idx >= 0) {
    if (idx == firstContiguousDim) {
      if (mayShare && contiguousSize >= kMinCloneSize) {
        appender.insert(partialCloneOne(data, src - data.data(),
                                        contiguousSize));
      } else {
        appender.push(src, contiguousSize);
      }
      --idx;
      continue;
    }
    src += strides[idx] * elementSize;
    if (++counter[idx] == sizes[idx]) {
      src -= sizes[idx] * strides[idx] * elementSize;
      counter[idx] = 0;
      --idx;
    } else {
      idx = firstContiguousDim;
    }
  }

  outQueue.move()->cloneInto(out.data);
}
Пример #24
0
static inline Node* parentWithDepth(unsigned depth,
                                    const NodeSetVector& parents) {
  DCHECK_GE(parents.size(), depth + 1);
  return parents[parents.size() - 1 - depth];
}
Пример #25
0
void PositionIteratorAlgorithm<Strategy>::decrement() {
  DCHECK(isValid());
  if (!m_anchorNode)
    return;

  // Assume that we have the following DOM tree:
  // A
  // |-B
  // | |-E
  // | +-F
  // |
  // |-C
  // +-D
  //   |-G
  //   +-H
  // Let |anchor| as |m_anchorNode| and
  // |child| as |m_nodeAfterPositionInAnchor|.
  // decrement() is complex but logically reverse of increment(), of course:)
  if (m_nodeAfterPositionInAnchor) {
    m_anchorNode = Strategy::previousSibling(*m_nodeAfterPositionInAnchor);
    if (m_anchorNode) {
      // Case #1-a. This is a revese of increment()::Case#3-a.
      // |child| has a previous sibling.
      // Let |anchor| is B and |child| is F,
      // next |anchor| is E and |child| is null.
      m_nodeAfterPositionInAnchor = nullptr;
      m_offsetInAnchor = Strategy::hasChildren(*m_anchorNode)
                             ? 0
                             : Strategy::lastOffsetForEditing(m_anchorNode);
      // Decrement offset of |child| or initialize if it have never been
      // used.
      if (m_offsetsInAnchorNode[m_depthToAnchorNode] == kInvalidOffset)
        m_offsetsInAnchorNode[m_depthToAnchorNode] =
            Strategy::index(*m_nodeAfterPositionInAnchor);
      else
        --m_offsetsInAnchorNode[m_depthToAnchorNode];
      DCHECK_GE(m_offsetsInAnchorNode[m_depthToAnchorNode], 0);
      // Increment depth intializing with last offset.
      ++m_depthToAnchorNode;
      if (m_depthToAnchorNode >= m_offsetsInAnchorNode.size())
        m_offsetsInAnchorNode.append(m_offsetInAnchor);
      else
        m_offsetsInAnchorNode[m_depthToAnchorNode] = m_offsetInAnchor;
      return;
    } else {
      // Case #1-b. This is a revese of increment()::Case#1.
      // |child| doesn't have a previous sibling.
      // Let |anchor| is B and |child| is E,
      // next |anchor| is A and |child| is B.
      m_nodeAfterPositionInAnchor =
          Strategy::parent(*m_nodeAfterPositionInAnchor);
      m_anchorNode = Strategy::parent(*m_nodeAfterPositionInAnchor);
      if (!m_anchorNode)
        return;
      m_offsetInAnchor = 0;
      // Decrement depth and intialize if needs.
      DCHECK_GT(m_depthToAnchorNode, 0u);
      --m_depthToAnchorNode;
      if (m_offsetsInAnchorNode[m_depthToAnchorNode] == kInvalidOffset)
        m_offsetsInAnchorNode[m_depthToAnchorNode] =
            Strategy::index(*m_nodeAfterPositionInAnchor);
    }
    return;
  }

  if (Strategy::hasChildren(*m_anchorNode)) {
    // Case #2. This is a reverse of increment()::Case3-b.
    // Let |anchor| is B, next |anchor| is F.
    m_anchorNode = Strategy::lastChild(*m_anchorNode);
    m_offsetInAnchor = Strategy::hasChildren(*m_anchorNode)
                           ? 0
                           : Strategy::lastOffsetForEditing(m_anchorNode);
    // Decrement depth initializing with -1 because
    // |m_nodeAfterPositionInAnchor| is null so still unneeded.
    if (m_depthToAnchorNode >= m_offsetsInAnchorNode.size())
      m_offsetsInAnchorNode.append(kInvalidOffset);
    else
      m_offsetsInAnchorNode[m_depthToAnchorNode] = kInvalidOffset;
    ++m_depthToAnchorNode;
    return;
  } else {
    if (m_offsetInAnchor && m_anchorNode->layoutObject()) {
      // Case #3-a. This is a reverse of increment()::Case#2.
      // In this case |anchor| is a leaf(E,F,C,G or H) and
      // |m_offsetInAnchor| is not on the beginning of |anchor|.
      // Then just decrement |m_offsetInAnchor|.
      m_offsetInAnchor =
          previousGraphemeBoundaryOf(m_anchorNode, m_offsetInAnchor);
      return;
    } else {
      // Case #3-b. This is a reverse of increment()::Case#1.
      // In this case |anchor| is a leaf(E,F,C,G or H) and
      // |m_offsetInAnchor| is on the beginning of |anchor|.
      // Let |anchor| is E,
      // next |anchor| is B and |child| is E.
      m_nodeAfterPositionInAnchor = m_anchorNode;
      m_anchorNode = Strategy::parent(*m_anchorNode);
      if (!m_anchorNode)
        return;
      DCHECK_GT(m_depthToAnchorNode, 0u);
      --m_depthToAnchorNode;
      if (m_offsetsInAnchorNode[m_depthToAnchorNode] == kInvalidOffset)
        m_offsetsInAnchorNode[m_depthToAnchorNode] =
            Strategy::index(*m_nodeAfterPositionInAnchor);
    }
  }
}
Пример #26
0
std::string SqlError::formatMessage(const std::string &sql_query) const {
  CHECK(!sql_query.empty()) << "SQL query is empty";
  CHECK_LT(sql_query.size(), static_cast<std::size_t>(std::numeric_limits<int>::max()));

  int error_line_number = line_number_;
  int error_column_number = column_number_;

  if (error_line_number == -1) {
    // If the error location is not available,
    // just append the error message directly.
    return std::string("ERROR: ").append(error_stream_.str()).append("\n");
  }

  DCHECK_GT(error_column_number, -1) << "Invalid column number";

  int current_line_number = 0;
  int current_line_begin_pos = 0;
  int current_line_end_pos = -1;

  // Find the ending index of the (<error_line_number>-1)-th line
  // of <sql_query>.
  if (current_line_number < error_line_number) {
    while (current_line_number < error_line_number) {
      current_line_begin_pos = sql_query.find('\n', current_line_begin_pos);
      DCHECK_GE(current_line_begin_pos, 0) << "Invalid line number";
      ++current_line_number;
      ++current_line_begin_pos;
    }
  }

  /*
   * The BISON may point the error to a position beyond the last line of
   * the SQL query. We move it to the position to the end of the last line.
   */
  if (current_line_begin_pos == static_cast<int>(sql_query.size()) && column_number_ == 0) {
    current_line_end_pos = current_line_begin_pos - 1;
    current_line_begin_pos = sql_query.rfind('\n', current_line_end_pos - 1) + 1;

    // Move the error line to the previous line.
    --error_line_number;
    error_column_number = current_line_end_pos - current_line_begin_pos;
  } else {
    current_line_end_pos = sql_query.find('\n', current_line_begin_pos);
    if (current_line_end_pos == -1) {
      current_line_end_pos = sql_query.size() - 1;
    }
    DCHECK(current_line_end_pos - current_line_begin_pos + 1 > column_number_) << "Invalid line and column number";
  }

  std::ostringstream error_stream;
  const int start_pos = getStartErrorPos(error_column_number + current_line_begin_pos, sql_query);
  const int end_pos = getEndErrorPos(error_column_number + current_line_begin_pos, sql_query);

  DCHECK_LE(start_pos, error_column_number + current_line_begin_pos);
  DCHECK_LE(error_column_number + current_line_begin_pos, end_pos);

  error_stream << "ERROR: " << getErrorMessage();
  error_stream << " (" << error_line_number + 1 << " : " << error_column_number + 1 << ")\n";

  // Append the snippet text.
  bool has_omitted_text = false;
  if (start_pos > current_line_begin_pos) {
    error_stream << "...";
    has_omitted_text = true;
  }
  error_stream << sql_query.substr(start_pos, end_pos - start_pos);
  if (end_pos < current_line_end_pos) {
    error_stream << "...";
  }
  error_stream << "\n";

  // Append a caret.
  if (has_omitted_text) {
    error_stream << "   ";
  }
  for (int i = start_pos; i < error_column_number + current_line_begin_pos; ++i) {
    if (sql_query.at(i) == '\t') {
      error_stream << "\t";
    } else {
      error_stream << " ";
    }
  }
  error_stream << "^\n";

  return error_stream.str();
}
void SoftmaxWithLossObjectnessPresenceLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  // The forward pass computes the softmax prob values.
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
  const Dtype* prob_data = prob_.cpu_data();
  const Dtype* label = bottom[1]->cpu_data();
  //int dim = prob_.count() / outer_num_;
  LOG(INFO) << "inner num: " << inner_num_ << ", bottom[1]->count(): " << bottom[1]->count();
	int mult = outer_num_ * inner_num_ * 2;
  CHECK_EQ(mult, bottom[1]->count(0))
      << "Number of labels must match the number of predictions because there are two channels,"
      << "one for gt labels per pixel and one for objectness labels per pixel; "
      << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), "
      << "label count (number of labels) must be 2*N*H*W, "
      << "with integer values in {0, 1, ..., C-1}.";
	int count = 0;
  Dtype loss = 0;
	
	vector<Dtype> present_classes (label, label + inner_num_); // Gets the first channel of gt 
	// Gets all unique elements
	sort( present_classes.begin(), present_classes.end() );
	present_classes.erase( unique( present_classes.begin(), present_classes.end() ), present_classes.end() );
	typename vector<Dtype>::iterator it;
	present_classes.erase(remove(present_classes.begin(), present_classes.end(), 255), present_classes.end()); // Removes 255
	LOG(INFO) << "size of present classes: " << present_classes.size() << ", present_classes contains:";
	for (it=present_classes.begin(); it!=present_classes.end(); ++it) {
		LOG(INFO) << ' ' << *it;
	}

	for (int j = 0; j < inner_num_; j++) {
		const int label_value = static_cast<int>(label[j]);
		// We don't know the target label because we don't have a user click
		if (has_ignore_label_ && label_value == ignore_label_) { 
				// Modify this block
				const int objectness = static_cast<int>(label[inner_num_ + j]); // A value beween 0 and 255. Objectness
				double S0 = std::max(prob_data[0 * inner_num_ + j], Dtype(FLT_MIN)); // P(background) in our model
        double P = 1.0 - ((double)objectness/255.0); // P(background) according to objectness prior
				double Q = 0;
				for (typename vector<Dtype>::iterator iter = present_classes.begin(); iter != present_classes.end(); iter++) {
					int c = *iter; // *iter is the class
					double Sc = std::max(prob_data[c * inner_num_ + j], Dtype(FLT_MIN)); // P(class=c)  
					Q += Sc;
				}
				loss -= (P*log(S0) + (1-P)*log(Q)); 
		// Supervised, we do know the target label
    } else {
			DCHECK_GE(label_value, 0);
 	  	DCHECK_LT(label_value, prob_.shape(softmax_axis_));
			loss -= log(std::max(prob_data[label_value * inner_num_ + j], Dtype(FLT_MIN)));
		}
		++count;
	
}
  if (normalize_) {
    top[0]->mutable_cpu_data()[0] = loss / count;
  } else {
    top[0]->mutable_cpu_data()[0] = loss / outer_num_;
  }
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
}
Пример #28
0
void AsyncIO::decrementPending() {
  auto p = pending_.fetch_add(-1, std::memory_order_acq_rel);
  DCHECK_GE(p, 1);
}
Пример #29
0
static Position createPosition(Node* node, int offset) {
    DCHECK_GE(offset, 0);
    if (!node)
        return Position();
    return Position(node, offset);
}
Пример #30
0
static bool isNonLatin1Separator(UChar32 character) {
  DCHECK_GE(character, 256);
  return U_GET_GC_MASK(character) &
         (U_GC_S_MASK | U_GC_P_MASK | U_GC_Z_MASK | U_GC_CF_MASK);
}