void EmbedLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input."; if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); int index; for (int n = 0; n < M_; ++n) { index = static_cast<int>(bottom_data[n]); DCHECK_GE(index, 0); DCHECK_LT(index, K_); DCHECK_EQ(static_cast<Dtype>(index), bottom_data[n]) << "non-integer input"; caffe_axpy(N_, Dtype(1), top_diff + n * N_, weight_diff + index * N_); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); caffe_cpu_gemv<Dtype>(CblasTrans, M_, N_, Dtype(1), top_diff, bias_multiplier_.cpu_data(), Dtype(1), bias_diff); } }
ColorHistogram ColorHistogram::ScaleHistogram(const vector<float>& gain) const { const ColorHistogramIndexLUT& lut = ColorHistogramIndexLUTFactory::Instance().GetLUT( lum_bins_, color_bins_, color_bins_); ColorHistogram result = EmptyCopy(); if (!IsSparse()) { for (int i = 0; i < total_bins_; ++i) { const float value = bins_[i]; if (value) { const std::tuple<int, int, int>& idx_3d = lut.Ind2Sub(i); const float bin_lum = std::min(lum_bins_ - 1.f, std::get<0>(idx_3d) * gain[0]); const float bin_col1 = std::min(color_bins_ - 1.f, std::get<1>(idx_3d) * gain[1]); const float bin_col2 = std::min(color_bins_ - 1.f, std::get<2>(idx_3d) * gain[2]); result.AddValueInterpolated(bin_lum, bin_col1, bin_col2, value); } } } else { for (const auto& bin : sparse_bins_) { const std::tuple<int, int, int>& idx_3d = lut.Ind2Sub(bin.first); const float bin_lum = std::min(lum_bins_ - 1.f, std::get<0>(idx_3d) * gain[0]); const float bin_col1 = std::min(color_bins_ - 1.f, std::get<1>(idx_3d) * gain[1]); const float bin_col2 = std::min(color_bins_ - 1.f, std::get<2>(idx_3d) * gain[2]); result.AddValueInterpolated(bin_lum, bin_col1, bin_col2, bin.second); } } DCHECK_LT(fabs(WeightSum() - result.WeightSum()), 1e-3f); return result; }
void SoftmaxWithLossLayer<Dtype>::Forward_cpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the softmax prob values. softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); int dim = prob_.count() / outer_num_; int count = 0; Dtype loss = 0; for (int i = 0; i < outer_num_; ++i) { for (int j = 0; j < inner_num_; j++) { const int label_value = static_cast<int>(label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } DCHECK_GE(label_value, 0); DCHECK_LT(label_value, prob_.shape(softmax_axis_)); loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], Dtype(FLT_MIN))); ++count; } } if (normalize_) { top[0]->mutable_cpu_data()[0] = loss / count; } else { top[0]->mutable_cpu_data()[0] = loss / outer_num_; } if (top.size() == 2) { top[1]->ShareData(prob_); } }
void IOBuf::coalesceSlow(size_t maxLength) { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); DCHECK_LT(length_, maxLength); // Compute the length of the entire chain uint64_t newLength = 0; IOBuf* end = this; while (true) { newLength += end->length_; end = end->next_; if (newLength >= maxLength) { break; } if (end == this) { throw std::overflow_error("attempted to coalesce more data than " "available"); } } coalesceAndReallocate(newLength, end); // We should have the requested length now DCHECK_GE(length_, maxLength); }
/** * @brief Append an untyped value to this NativeColumnVector. * @warning Appending a new value must not cause the number of values in this * NativeColumnVector to exceed the reserved length supplied to the * constructor. * @warning Do not use this with NULL values. Use appendNullValue() instead. * * @param value A pointer to an untyped value to append to this * NativeColumnVector. **/ inline void appendUntypedValue(const void *value) { DCHECK_LT(actual_length_, reserved_length_); std::memcpy((static_cast<char*>(values_) + (actual_length_ * type_length_)), value, type_length_); ++actual_length_; }
LeastSquaresVelocityTrackerStrategy::LeastSquaresVelocityTrackerStrategy( uint32_t degree, Weighting weighting) : degree_(degree), weighting_(weighting) { DCHECK_LT(degree_, static_cast<uint32_t>(Estimator::kMaxDegree)); Clear(); }
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_cpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the softmax prob values. softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); const Dtype* sample_weight = bottom[2]->cpu_data(); int num = prob_.num(); int dim = prob_.count() / num; int spatial_dim = prob_.height() * prob_.width(); int count = 0; Dtype loss = 0; for (int i = 0; i < num; ++i) { for (int j = 0; j < spatial_dim; j++) { const int label_value = static_cast<int>(label[i * spatial_dim + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } DCHECK_GE(label_value, 0); DCHECK_LT(label_value, prob_.channels()); Dtype w = sample_weight[i * spatial_dim + j]; loss -= w * log(std::max(prob_data[i * dim + label_value * spatial_dim + j], Dtype(FLT_MIN))); ++count; } } if (normalize_) { top[0]->mutable_cpu_data()[0] = loss / count; } else { top[0]->mutable_cpu_data()[0] = loss / num; } if (top.size() == 2) { top[1]->ShareData(prob_); } }
void ScriptProcessorHandler::fireProcessEventForOfflineAudioContext( unsigned doubleBufferIndex, WaitableEvent* waitableEvent) { DCHECK(isMainThread()); DCHECK_LT(doubleBufferIndex, 2u); if (doubleBufferIndex > 1) { waitableEvent->signal(); return; } AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get(); AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get(); DCHECK(outputBuffer); if (!outputBuffer) { waitableEvent->signal(); return; } if (node() && context() && context()->getExecutionContext()) { // We do not need a process lock here because the offline render thread // is locked by the waitable event. double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate()); node()->dispatchEvent( AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime)); } waitableEvent->signal(); }
void ScriptProcessorHandler::fireProcessEvent(unsigned doubleBufferIndex) { DCHECK(isMainThread()); DCHECK_LT(doubleBufferIndex, 2u); if (doubleBufferIndex > 1) return; AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get(); AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get(); DCHECK(outputBuffer); if (!outputBuffer) return; // Avoid firing the event if the document has already gone away. if (node() && context() && context()->getExecutionContext()) { // This synchronizes with process(). MutexLocker processLocker(m_processEventLock); // Calculate a playbackTime with the buffersize which needs to be processed // each time onaudioprocess is called. The outputBuffer being passed to JS // will be played after exhuasting previous outputBuffer by // double-buffering. double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate()); // Call the JavaScript event handler which will do the audio processing. node()->dispatchEvent( AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime)); } }
/** * @brief Add a block to a partition. * * @param block_id The id of the block to be added to the partition. * @param part_id The id of the partition to add the block to. **/ inline void addBlockToPartition(const block_id block, const partition_id part_id) { DCHECK_LT(part_id, num_partitions_); SpinSharedMutexExclusiveLock<false> lock( blocks_in_partition_mutexes_[part_id]); blocks_in_partition_[part_id].insert(block); }
/** * @brief Get a value in this NativeColumnVector as a TypedValue. * * @param position The position of the value to get. * @return The value at position. **/ inline TypedValue getTypedValue(const std::size_t position) const { DCHECK_LT(position, actual_length_); return (null_bitmap_ && null_bitmap_->getBit(position)) ? type_.makeNullValue() : type_.makeValue(static_cast<const char*>(values_) + (position * type_length_), type_length_); }
void AccuracyLayer<Ftype, Btype>::Forward_cpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { float accuracy = 0.F; const Ftype* bottom_data = bottom[0]->cpu_data<Ftype>(); const Ftype* bottom_label = bottom[1]->cpu_data<Ftype>(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); if (top.size() > 1) { nums_buffer_.set_data(0.F); top[1]->set_data(0.F); } std::vector<std::pair<float, int>> bottom_data_vector(num_labels); int count = 0; for (int i = 0; i < outer_num_; ++i) { for (int j = 0; j < inner_num_; ++j) { const int label_value = static_cast<int>(bottom_label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } if (top.size() > 1) { ++nums_buffer_.mutable_cpu_data()[label_value]; } DCHECK_GE(label_value, 0); DCHECK_LT(label_value, num_labels); // Top-k accuracy for (int k = 0; k < num_labels; ++k) { bottom_data_vector[k] = std::make_pair( static_cast<float>(bottom_data[i * dim + k * inner_num_ + j]), k); } std::partial_sort( bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, bottom_data_vector.end(), std::greater<std::pair<float, int>>()); // check if true label is in top k predictions for (int k = 0; k < top_k_; k++) { if (bottom_data_vector[k].second == label_value) { accuracy += 1.F; if (top.size() > 1) { Ftype* top_label = top[1]->mutable_cpu_data<Ftype>(); top_label[label_value] = top_label[label_value] + 1.; } break; } } ++count; } } top[0]->mutable_cpu_data<Ftype>()[0] = accuracy / count; if (top.size() > 1) { for (int i = 0; i < top[1]->count(); ++i) { const float num = nums_buffer_.cpu_data()[i]; Ftype* top_label = top[1]->mutable_cpu_data<Ftype>(); top_label[i] = num == 0.F ? 0. : top_label[i] / num; } } // Accuracy layer should not be used as a loss function. }
/** * @brief Get all the blocks from a particular partition. * * @param part_id The id of the partition to retrieve the blocks from. * @return The block_ids of blocks belonging to this partition at the moment * when this method is called. **/ inline const std::vector<block_id> getBlocksInPartition( const partition_id part_id) const { DCHECK_LT(part_id, num_partitions_); SpinSharedMutexSharedLock<false> lock( blocks_in_partition_mutexes_[part_id]); return std::vector<block_id>(blocks_in_partition_[part_id].begin(), blocks_in_partition_[part_id].end()); }
void NotificationResourcesLoader::didLoadActionIcon(size_t actionIndex, const SkBitmap& image) { DCHECK_LT(actionIndex, m_actionIcons.size()); m_actionIcons[actionIndex] = NotificationImageLoader::scaleDownIfNeeded( image, NotificationImageLoader::Type::ActionIcon); didFinishRequest(); }
/** * @brief Add a WorkOrder generated from a given * operator. * * @param workorder A pointer to the WorkOrder to be added. * @param operator_index The index of the operator in the query DAG. **/ void addWorkOrderProto(serialization::WorkOrder *proto, const std::size_t operator_index) { DCHECK(proto != nullptr); DCHECK_LT(operator_index, num_operators_); operator_containers_[operator_index].emplace( std::unique_ptr<serialization::WorkOrder>(proto)); }
/** * @brief Overwrite the value at the specified position with the supplied * untyped value. * @warning Do not use this with NULL values. Use positionalWriteNullValue() * instead. * @warning You must call prepareForPositionalWrites() BEFORE calling this * method. * @warning Do NOT use positional writes in combination with appends. * @warning It is intended that this and other positional write methods * should be called exactly once for each position (if this is * violated, NULLs may not be tracked properly). * * @param position The position of the value in this NativeColumnVector to * overwrite. * @param value A pointer to an untyped value to write into this * NativeColumnVector. **/ inline void positionalWriteUntypedValue(const std::size_t position, const void *value) { DCHECK_LT(position, actual_length_); DCHECK(value != nullptr); std::memcpy((static_cast<char*>(values_) + (position * type_length_)), value, type_length_); }
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype accuracy = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); vector<Dtype> maxval(top_k_+1); vector<int> max_id(top_k_+1); if (top.size() > 1) { caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data()); caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data()); } int count = 0; for (int i = 0; i < outer_num_; ++i) { for (int j = 0; j < inner_num_; ++j) { const int label_value = static_cast<int>(bottom_label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value]; DCHECK_GE(label_value, 0); DCHECK_LT(label_value, num_labels); // Top-k accuracy std::vector<std::pair<Dtype, int> > bottom_data_vector; for (int k = 0; k < num_labels; ++k) { bottom_data_vector.push_back(std::make_pair( bottom_data[i * dim + k * inner_num_ + j], k)); } std::partial_sort( bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >()); // check if true label is in top k predictions for (int k = 0; k < top_k_; k++) { if (bottom_data_vector[k].second == label_value && (threshold_ <= 0 || bottom_data_vector[k].first >= threshold_ )) { ++accuracy; if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value]; break; } } ++count; } } // LOG(INFO) << "Accuracy: " << accuracy; top[0]->mutable_cpu_data()[0] = accuracy / count; if (top.size() > 1) { for (int i = 0; i < top[1]->count(); ++i) { top[1]->mutable_cpu_data()[i] = nums_buffer_.cpu_data()[i] == 0 ? 0 : top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i]; } } // Accuracy layer should not be used as a loss function. }
/** * @brief Remove a block from a partition. * * @param block_id The id of the block to be removed from the partition. * @param part_id The id of the partition to remove the block from. **/ inline void removeBlockFromPartition(const block_id block, const partition_id part_id) { DCHECK_LT(part_id, num_partitions_); SpinSharedMutexExclusiveLock<false> lock( blocks_in_partition_mutexes_[part_id]); std::unordered_set<block_id> &blocks_in_partition = blocks_in_partition_[part_id]; blocks_in_partition.erase(block); }
size_t WaitableEvent::waitMultiple(const WTF::Vector<WaitableEvent*>& events) { std::vector<base::WaitableEvent*> baseEvents; for (size_t i = 0; i < events.size(); ++i) baseEvents.push_back(events[i]->m_impl.get()); size_t idx = base::WaitableEvent::WaitMany(baseEvents.data(), baseEvents.size()); DCHECK_LT(idx, events.size()); return idx; }
FieldTrial::FieldTrial(const std::string& name, const Probability total_probability, const std::string& default_group_name, const int year, const int month, const int day_of_month) : name_(name), divisor_(total_probability), default_group_name_(default_group_name), random_(static_cast<Probability>(divisor_*RandDouble())), accumulated_group_probability_(0), next_group_number_(kDefaultGroupNumber+1), group_(kNotFinalized), enable_field_trial_(true) { DCHECK_GT(total_probability, 0); DCHECK(!name_.empty()); DCHECK(!default_group_name_.empty()); FieldTrialList::Register(this); DCHECK_GT(year, 1970); DCHECK_GT(month, 0); DCHECK_LT(month, 13); DCHECK_GT(day_of_month, 0); DCHECK_LT(day_of_month, 32); Time::Exploded exploded; exploded.year = year; exploded.month = month; exploded.day_of_week = 0; // Should be unused. exploded.day_of_month = day_of_month; exploded.hour = 0; exploded.minute = 0; exploded.second = 0; exploded.millisecond = 0; Time expiration_time = Time::FromLocalExploded(exploded); if(GetBuildTime() > expiration_time) { Disable(); } }
/** * @brief Append a TypedValue to this NativeColumnVector. * * @param value A value to append to this NativeColumnVector. **/ inline void appendTypedValue(const TypedValue &value) { DCHECK_LT(actual_length_, reserved_length_); DCHECK(value.isPlausibleInstanceOf(type_.getSignature())); if (null_bitmap_ && value.isNull()) { null_bitmap_->setBit(actual_length_, true); } else { DCHECK(!value.isNull()); value.copyInto(static_cast<char*>(values_) + (actual_length_ * type_length_)); } ++actual_length_; }
/** * @brief Overwrite the value at the specified position with the supplied * TypedValue. * @warning You must call prepareForPositionalWrites() BEFORE calling this * method. * @warning Do NOT use positional writes in combination with appends. * @warning It is intended that this and other positional write methods * should be called exactly once for each position (if this is * violated, NULLs may not be tracked properly). * * @param position The position of the value in this NativeColumnVector to * overwrite. * @param value A TypedValue to write into this NativeColumnVector. **/ inline void positionalWriteTypedValue(const std::size_t position, const TypedValue &value) { DCHECK_LT(position, actual_length_); DCHECK(value.isPlausibleInstanceOf(type_.getSignature())); if (null_bitmap_ && value.isNull()) { null_bitmap_->setBit(position, true); } else { DCHECK(!value.isNull()); value.copyInto(static_cast<char*>(values_) + (position * type_length_)); } }
void SelectLayer<Dtype>::Forward_cpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_cpu_data(); const Dtype* select_data = bottom[num_cand_]->cpu_data(); for (int i = 0; i < outer_dim_; ++i) { const int index = static_cast<int>(select_data[i]); DCHECK_GE(index, 0); DCHECK_LT(index, num_cand_); caffe_copy(inner_dim_, bottom[index]->cpu_data() + inner_dim_*i, top_data); top_data += inner_dim_; } }
void InfogainLossLayer<Dtype, MItype, MOtype>::Backward_cpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down.size() > 2 && propagate_down[2]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to infogain inputs."; } if (propagate_down[0]) { const Dtype* prob_data = prob_.cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { infogain_mat = infogain_.cpu_data(); } else { infogain_mat = bottom[2]->cpu_data(); // H is provided as a "bottom" and might change. sum rows every time. sum_rows_of_H(bottom[2]); } const Dtype* sum_rows_H = sum_rows_H_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int_tp dim = bottom[0]->count() / outer_num_; int_tp count = 0; for (int_tp i = 0; i < outer_num_; ++i) { for (int_tp j = 0; j < inner_num_; ++j) { const int_tp label_value = static_cast<int_tp>(bottom_label[i * inner_num_ + j]); DCHECK_GE(label_value, 0); DCHECK_LT(label_value, num_labels_); if (has_ignore_label_ && label_value == ignore_label_) { for (int_tp l = 0; l < num_labels_; ++l) { bottom_diff[i * dim + l * inner_num_ + j] = 0; } } else { for (int_tp l = 0; l < num_labels_; ++l) { bottom_diff[i * dim + l * inner_num_ + j] = prob_data[i*dim + l*inner_num_ + j]*sum_rows_H[label_value] - infogain_mat[label_value * num_labels_ + l]; } ++count; } } } // Scale gradient Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, count); caffe_scal(bottom[0]->count(), loss_weight, bottom_diff); } }
/** * @brief Get a WorkOrder for a given operator. * * @param operator_index The index of the operator. * * @return Release a WorkOrder proto. If no WorkOrder proto is available, * return nullptr. **/ serialization::WorkOrder* getWorkOrderProto(const std::size_t operator_index) { DCHECK_LT(operator_index, num_operators_); if (operator_containers_[operator_index].empty()) { return nullptr; } serialization::WorkOrder *proto = operator_containers_[operator_index].front().release(); operator_containers_[operator_index].pop(); return proto; }
double BitsToOpenEndedUnitInterval(uint64 bits) { // 为了提高精度, 生成位数尽量多的数字, 然后进行适当的幂运算得到[0, 1)区间 // 的double数值. IEEE 754精度要求是53位. COMPILE_ASSERT(std::numeric_limits<double>::radix==2, otherwise_use_scalbn); static const int kBits = std::numeric_limits<double>::digits; uint64 random_bits = bits & ((GG_UINT64_C(1)<<kBits) - 1); double result = ldexp(static_cast<double>(random_bits), -1*kBits); DCHECK_GE(result, 0.0); DCHECK_LT(result, 1.0); return result; }
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype accuracy = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); if (top.size() > 1) { caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data()); caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data()); } int count = 0; for (int i = 0; i < outer_num_; ++i) { for (int j = 0; j < inner_num_; ++j) { const int label_value = static_cast<int>(bottom_label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } DCHECK_GE(label_value, 0); DCHECK_LT(label_value, num_labels); if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value]; const Dtype prob_of_true_class = bottom_data[i * dim + label_value * inner_num_ + j]; int num_better_predictions = -1; // true_class also counts as "better" // Top-k accuracy for (int k = 0; k < num_labels && num_better_predictions < top_k_; ++k) { num_better_predictions += (bottom_data[i * dim + k * inner_num_ + j] >= prob_of_true_class); } // check if there are less than top_k_ predictions if (num_better_predictions < top_k_) { ++accuracy; if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value]; } ++count; } } // LOG(INFO) << "Accuracy: " << accuracy; top[0]->mutable_cpu_data()[0] = (count == 0) ? 0 : (accuracy / count); if (top.size() > 1) { for (int i = 0; i < top[1]->count(); ++i) { top[1]->mutable_cpu_data()[i] = nums_buffer_.cpu_data()[i] == 0 ? 0 : top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i]; } } // Accuracy layer should not be used as a loss function. }
static const AtomicString& valueName(CSSValueID valueID) { DCHECK_GE(valueID, 0); DCHECK_LT(valueID, numCSSValueKeywords); if (valueID < 0) return nullAtom; static AtomicString* keywordStrings = new AtomicString[numCSSValueKeywords]; // Leaked intentionally. AtomicString& keywordString = keywordStrings[valueID]; if (keywordString.isNull()) keywordString = getValueName(valueID); return keywordString; }
void DocumentLoader::commitData(const char* bytes, size_t length) { DCHECK_LT(m_state, MainResourceDone); ensureWriter(m_response.mimeType()); // This can happen if document.close() is called by an event handler while // there's still pending incoming data. if (m_frame && !m_frame->document()->parsing()) return; if (length) m_dataReceived = true; m_writer->addData(bytes, length); }
void SwitchLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int selector_ind = bottom.size() - 1; Dtype* top_data = top[0]->mutable_cpu_data(); const int num_elem = top[0]->channels() * top[0]->height() * top[0]->width(); for (int n = 0; n < bottom[selector_ind]->num(); n++) { int index = static_cast<int>(bottom[selector_ind]->data_at(n, 0 , 0, 0)); DCHECK(floor(index) == index) << "Index should be an integer"; DCHECK_GE(index, 0) << "Index should be greater than 0"; DCHECK_LT(index, selector_ind) << "Index should be less than " << selector_ind; const Dtype* bottom_data = bottom[index]->cpu_data(); caffe_copy(num_elem, bottom_data + bottom[index]->offset(n), top_data + top[0]->offset(n)); } }