Example #1
0
    /**
     * destructor
     */
    virtual ~MappedBufferIntern()
    {
        __startOperation(ITask::TASK_CUDA);
        __startOperation(ITask::TASK_HOST);

        if (pointer && ownPointer)
        {
#if( PMACC_CUDA_ENABLED == 1 )
/* cupla 0.1.0 does not support the function cudaHostAlloc to create mapped memory.
 * Therefore we need to call the native CUDA function cudaFreeHost to free memory.
 * Due to the renaming of cuda functions with cupla via macros we need to remove
 * the renaming to get access to the native cuda function.
 * @todo this is a workaround please fix me. We need to investigate if
 * it is possible to have mapped/unified memory in alpaka.
 *
 * corresponding alpaka issues:
 *   https://github.com/ComputationalRadiationPhysics/alpaka/issues/296
 *   https://github.com/ComputationalRadiationPhysics/alpaka/issues/612
 */
#   undef cudaFreeHost
            CUDA_CHECK((cuplaError_t)cudaFreeHost(pointer));
// re-introduce the cupla macro
#   define cudaFreeHost(...) cuplaFreeHost(__VA_ARGS__)
#else
            __deleteArray(pointer);
#endif
        }
    }
Example #2
0
    void send(void* array, size_t size)
    {
        if (connectOK)
        {
            char* tmp = new char[size];
            memcpy(tmp, array, sizeof(MessageHeader));

            ZipConnector zip;
            size_t zipedSize = zip.compress(tmp + MessageHeader::bytes, ((char*) array) + MessageHeader::bytes, size - MessageHeader::bytes, 6);
            MessageHeader* header = (MessageHeader*) tmp;
            header->data.byte = (uint32_t) zipedSize;
            write(SocketFD, tmp, zipedSize + MessageHeader::bytes);
            __deleteArray(tmp);
        }
    }