Пример #1
0
void PyNet::set_input_arrays(bp::object data_obj, bp::object labels_obj) {
  // check that this network has an input MemoryDataLayer
  shared_ptr<MemoryDataLayer<float> > md_layer =
    boost::dynamic_pointer_cast<MemoryDataLayer<float> >(net_->layers()[0]);
  if (!md_layer) {
    throw std::runtime_error("set_input_arrays may only be called if the"
        " first layer is a MemoryDataLayer");
  }

  // check that we were passed appropriately-sized contiguous memory
  PyArrayObject* data_arr =
      reinterpret_cast<PyArrayObject*>(data_obj.ptr());
  PyArrayObject* labels_arr =
      reinterpret_cast<PyArrayObject*>(labels_obj.ptr());
  check_contiguous_array(data_arr, "data array", md_layer->channels(),
      md_layer->height(), md_layer->width());
  check_contiguous_array(labels_arr, "labels array", 1, 1, 1);
  if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) {
    throw std::runtime_error("data and labels must have the same first"
        " dimension");
  }
  if (PyArray_DIMS(data_arr)[0] % md_layer->batch_size() != 0) {
    throw std::runtime_error("first dimensions of input arrays must be a"
        " multiple of batch size");
  }

  // hold references
  input_data_ = data_obj;
  input_labels_ = labels_obj;

  md_layer->Reset(static_cast<float*>(PyArray_DATA(data_arr)),
      static_cast<float*>(PyArray_DATA(labels_arr)),
      PyArray_DIMS(data_arr)[0]);
}
Пример #2
0
bp::object Filt(bp::object in_obj, bp::object kernel_obj, bp::object channel_obj) {
    Img in, kernel, out;
    in.FromPyArrayObject(reinterpret_cast<PyArrayObject*>(in_obj.ptr()));
    kernel.FromPyArrayObject(reinterpret_cast<PyArrayObject*>(kernel_obj.ptr()));
    int channel = bp::extract<int>(channel_obj);
    FiltImg(in, kernel, channel, out);
    PyObject* out_obj =(PyObject*) out.ToPyArrayObject(); 
    bp::handle<> out_handle(out_obj);
    bp::numeric::array out_array(out_handle);
    return out_array.copy();
}
Пример #3
0
inline
bool
is_object_of_type(
    bp::object const & obj
  , PyTypeObject & type_obj0
  , PyTypeObject & type_obj1
  , PyTypeObject & type_obj2
)
{
    if(   PyObject_TypeCheck(obj.ptr(), &type_obj0)
       || PyObject_TypeCheck(obj.ptr(), &type_obj1)
       || PyObject_TypeCheck(obj.ptr(), &type_obj2)
      )
    {
        return true;
    }
    return false;
}
Пример #4
0
bp::object FiltMax(bp::object in_obj, bp::object k_size, bp::object channel_obj) {
    Img in,  out;
    in.CopyFromPyArrayObject(reinterpret_cast<PyArrayObject*>(in_obj.ptr()));
    int k_height = bp::extract<int>(k_size[0]);
    int k_width = bp::extract<int>(k_size[1]);
    int channel = bp::extract<int>(channel_obj);
    FiltMaxImg(in, k_width, k_height, channel, out);
    PyObject* out_obj =(PyObject*) out.ToPyArrayObject(); 
    bp::handle<> out_handle(out_obj);
    bp::numeric::array out_array(out_handle);
    return out_array.copy();
}
Пример #5
0
//-----------------------------------------------------------------------------
// Purpose: Called when it's time for a physically moved objects (plats, doors, etc)
//			to run it's game code.
//			All other entity thinking is done during worldspawn's think
//-----------------------------------------------------------------------------
void C_BaseEntity::PhysicsPyDispatchThink( bp::object thinkFunc )
{
	float thinkLimit = think_limit.GetFloat();
	float startTime = 0.0;

	/*
	// This doesn't apply on the client, really
	if ( IsDormant() )
	{
	Warning( "Dormant entity %s is thinking!!\n", GetClassname() );
	Assert(0);
	}
	*/

	if ( thinkLimit )
	{
		startTime = Plat_FloatTime();
	}

	if ( thinkFunc.ptr() != Py_None )
	{
		try {
			thinkFunc();
		} catch(boost::python::error_already_set &) {
			PyErr_Print();
		}
	}

	if ( thinkLimit )
	{
		// calculate running time of the AI in milliseconds
		float time = ( Plat_FloatTime() - startTime ) * 1000.0f;
		if ( time > thinkLimit )
		{
#if 0
			// If its an NPC print out the shedule/task that took so long
			CAI_BaseNPC *pNPC = MyNPCPointer();
			if (pNPC && pNPC->GetCurSchedule())
			{
				pNPC->ReportOverThinkLimit( time );
			}
			else
#endif
			{
				Msg( "CLIENT:  %s(%s) thinking for %.02f ms!!!\n", GetClassname(), typeid(this).raw_name(), time );
			}
		}
	}
}
Пример #6
0
/*         false if bind is not successfull.                           */
bool PyFunction::bind(const bp::object& dict, std::string funcname)
{
#if BOOST_VERSION >= 104100
    if(bp::extract<bool>(dict.contains(funcname)))
#else
    if(PyDict_Contains(dict.ptr(), bp::object(funcname).ptr()))
#endif
    {
        _func = dict[funcname];
        _isValid = true;
    }
    else
    {
        _isValid = false;
        std::cerr << "[PyFunction] Error binding function '" << funcname << "'." << std::endl;
    }

    return _isValid;
}
Пример #7
0
static size_t wrap_send(uhd::tx_streamer *tx_stream,
                        bp::object &np_array,
                        bp::object &metadata,
                        const double timeout = 0.1)
{
    // Extract the metadata
    bp::extract<uhd::tx_metadata_t&> get_metadata(metadata);
    // TODO: throw an error here?
    if (not get_metadata.check())
    {
        return 0;
    }

    // Get a numpy array object from given python object
    // No sanity checking possible!
    // Note: this increases the ref count, which we'll need to manually decrease at the end
    PyObject* array_obj = PyArray_FROM_OF(np_array.ptr(),NPY_ARRAY_CARRAY);
    PyArrayObject* array_type_obj = reinterpret_cast<PyArrayObject*>(array_obj);

    // Get dimensions of the numpy array
    const size_t dims = PyArray_NDIM(array_type_obj);
    const npy_intp* shape = PyArray_SHAPE(array_type_obj);

    // How many bytes to jump to get to the next element of the stride
    // (next row)
    const npy_intp* strides = PyArray_STRIDES(array_type_obj);
    const size_t channels = tx_stream->get_num_channels();

    // Check if numpy array sizes are ok
    if (((channels > 1) && (dims != 2))
     or ((size_t) shape[0] < channels))
    {
        // Manually decrement the ref count
        Py_DECREF(array_obj);
        // If we don't have a 2D NumPy array, assume we have a 1D array
        size_t input_channels = (dims != 2) ? 1 : shape[0];
        throw uhd::runtime_error(str(boost::format(
            "Number of TX channels (%d) does not match the dimensions of the data array (%d)")
            % channels % input_channels));
    }

    // Get a pointer to the storage
    std::vector<void*> channel_storage;
    char* data = PyArray_BYTES(array_type_obj);
    for (size_t i = 0; i < channels; ++i)
    {
        channel_storage.push_back((void*)(data + i * strides[0]));
    }

    // Get data buffer and size of the array
    size_t nsamps_per_buff = (dims > 1) ? (size_t) shape[1] : PyArray_SIZE(array_type_obj);

    // Release the GIL only for the send() call
    const size_t result = [&]() {
        scoped_gil_release gil_release;
        // Call the real send()
        return tx_stream->send(
            channel_storage,
            nsamps_per_buff,
            get_metadata(),
            timeout
        );
    }();

    // Manually decrement the ref count
    Py_DECREF(array_obj);
    return result;
}