Exemple #1
0
void forward_softmax_layer(const softmax_layer l, network_state state)
{
    if(l.softmax_tree){
        int i;
        int count = 0;
        for (i = 0; i < l.softmax_tree->groups; ++i) {
            int group_size = l.softmax_tree->group_size[i];
            softmax_cpu(state.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count);
            count += group_size;
        }
    } else {
        softmax_cpu(state.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output);
    }
}
Exemple #2
0
bool Run(Node * node)
{
    const Tensor * input_tensor=node->GetInputTensor(0);
    Tensor * output_tensor=node->GetOutputTensor(0);

    const std::vector<int>& dims=input_tensor->GetShape().GetDim();

    float *input = (float *)get_tensor_mem(input_tensor);
    float *output = (float *)get_tensor_mem(output_tensor);

    Region *reorg_op = dynamic_cast<Region *>(node->GetOp());
    RegionParam *param = reorg_op->GetParam();

    int hw = dims[2] * dims[3];
    int chw = dims[1] * hw;
    int nchw = dims[0] * chw;
    int num_box = param->num_box;
    int num_class = param->num_classes;
    int coords = param->coords;
    memcpy(output, input, nchw * sizeof(float));

    for (int b = 0; b < dims[0]; b++)
    {
        for (int n = 0; n < num_box; n++)
        {
            int index = entry_index(b, n * hw, 0, hw, chw, num_class);
            logit_activate_array(output + index, 2 * hw);
            index = entry_index(b, n * hw, coords, hw, chw, num_class);
            logit_activate_array(output + index, hw);
            index = entry_index(b, n * hw, coords + 1, hw, chw, num_class);
        }
    }

    int index = entry_index(0, 0, coords + 1, hw, chw, num_class);
    softmax_cpu(input + index, num_class, dims[0] * num_box, chw / num_box, hw, hw, output + index);

    return true;
}