Example #1
0
bool msg_buffers::push_back(auto_buffer& buffer)
{
	if(m_size>=fix_buffer_size) return false;
	buffers[m_size]=msg_buffer(buffer);
	m_size++;
	return true;
}
Example #2
0
void SocketServer::handle_conn(int sockfd)
{
    //MPI_CONNECTION_INIT
    // TODO: check this! 
    int argc = 0;
    
    #ifndef NDEBUG
        std::cout << "INFO" << ": trying MPI_Init " << std::endl;
    #endif
    MPI_Init( &argc, NULL );
    #ifndef NDEBUG
        std::cout << "INFO" << ": ... done " << std::endl;
    #endif
    
    // Create MPI Structure
    int sizeOfData;
    MPI_Type_size( MPI_INT,&sizeOfData );
    int array_of_block_lengths[2] = {1, 1};
    MPI_Aint array_of_displacements[2] = {0, sizeOfData};
    MPI_Datatype array_of_types[2] = { MPI_INT, MPI_INT };

    MPI_Type_create_struct(2, array_of_block_lengths, array_of_displacements, array_of_types, &ArgListType);
    MPI_Type_commit(&ArgListType);
    // End of MPI struct
    
    client = MPI_COMM_WORLD;

    #ifndef NDEBUG
        std::cout << "DEBUG: Waiting for IR\n" << std::endl;
    #endif
    
    MPI_Status status;
    int mpi_server_tag = MPI_SERVER_TAG;
    int myrank;
    MPI_Comm_rank(client, &myrank);
    int mpi_server_rank =0;
    
    // TODO: check this! 
    if(myrank==0)
          mpi_server_rank = 1;
    
    int incomingMessageSize=0;    
    MPI_Probe(MPI_ANY_SOURCE, mpi_server_tag, client, &status);
    MPI_Get_count(&status,MPI_CHAR,&incomingMessageSize);    
    char *module_ir_buffer = (char *) calloc(incomingMessageSize + 1 , sizeof(char));    
    MPI_Recv(module_ir_buffer, incomingMessageSize + 1, MPI_CHAR, MPI_ANY_SOURCE, mpi_server_tag, client, &status);
    
    #ifndef NDEBUG
        std::cout << "DEBUG: Recieved IR\n" << std::endl;
    #endif
  
    auto backend = parseIRtoBackend(module_ir_buffer);
    // notify client that calls can be accepted now by sending time taken for optimizing module and initialising backend
    const std::string readyStr(std::to_string(TimeDiffOpt.count()) + ":" + std::to_string(TimeDiffInit.count()));
    MPI_Send((void *)readyStr.c_str(), readyStr.size() , MPI_CHAR, mpi_server_rank, mpi_server_tag, client);
    free(module_ir_buffer);

    // initialise msg_buffer
    std::shared_ptr<char> msg_buffer((char*)calloc(MSG_BUFFER_SIZE, sizeof(char)), &free);
    while (1) {
        bzero(msg_buffer.get(), MSG_BUFFER_SIZE);
        // first acquire message length
        unsigned msg_length;
        auto UINT_MAX_str_len = std::to_string(UINT_MAX).length();
        int num_chars = recv(sockfd, msg_buffer.get(), UINT_MAX_str_len + 1, 0);
    
        if (num_chars == 0) {
            std::cout << "Client assigned to process " << getpid() << " has closed its socket 3 \n";
            exit(0);
        }

        if (num_chars < 0)
            error("ERROR, could not read from socket");

        #ifndef NDEBUG
            //std::cout << getpid() << ": got message \"" << msg_buffer << "\"\n"; // TODO command line argument to print messages
            std::cout << getpid() << ": got message \n";
        #endif

        llvm::Function* calledFunction = nullptr;
        std::vector<llvm::GenericValue> args;
        std::list<std::vector<llvm::GenericValue>::size_type> indexesOfPointersInArgs;
        llvm::GenericValue result = handleCall(backend.get(), msg_buffer.get(), calledFunction, args, indexesOfPointersInArgs);

        // reset buffer and write time taken to buffer
        bzero(msg_buffer.get(), MSG_BUFFER_SIZE);
        sprintf(msg_buffer.get(), ";%ld", (long)TimeDiffLastExecution.count());

        //MPI_DATA_MOVEMENT
        //Send data back to the client
        //Create the MPI data structure
        //allocate memory for struct    
        #ifndef TIMING 
            auto StartTime = std::chrono::high_resolution_clock::now();
        #endif  
    
        struct ArgumentList argList[MAX_NUMBER_OF_ARGUMENTS];
        MPI_Status status;

        //Create the structure
        int structSize=0;
    
        for (const auto& indexOfPtr : indexesOfPointersInArgs) {
            auto paramType = calledFunction->getFunctionType()->getParamType(indexOfPtr);
            while (paramType->getTypeID() == llvm::Type::ArrayTyID || paramType->getTypeID() == llvm::Type::PointerTyID)
            paramType = llvm::cast<llvm::SequentialType>(paramType)->getElementType();

            if (paramType->getTypeID() == llvm::Type::IntegerTyID) {
                argList[structSize].typeofArg = ENUM_MPI_INT;
            } else {
                argList[structSize].typeofArg = ENUM_MPI_DOUBLE;
            }
            argList[structSize].sizeOfArg =argumentList[indexOfPtr].sizeOfArg;
            structSize++;
        }

        #ifndef NDEBUG
            std::cout << "\nMPI SERVER: Sending message back from server to client";
            std::cout.flush();
        #endif


        #ifndef NDEBUG
            std::cout << "\nMPI SERVER: Sending MPI Header";
            std::cout.flush();

            for (int i=0; i<structSize; i++) {
                std::cout <<  "\n MPI Sent DS : Size : " << argList[i].sizeOfArg << "  Type" << argList[i].typeofArg ;
                std::cout.flush();
            }
        #endif
        MPI_Send(argList, structSize, ArgListType, mpi_server_rank, mpi_server_tag, client);

        #ifndef NDEBUG
            std::cout << "\nMPI SERVER: Sent MPI Header";
            std::cout.flush();

            std::cout << "\nMPI SERVER: Sending data";
            std::cout.flush();
        #endif

        //Start sending individual arrrays
        for (const auto& indexOfPtr : indexesOfPointersInArgs) {
            auto paramType = calledFunction->getFunctionType()->getParamType(indexOfPtr);
            while (paramType->getTypeID() == llvm::Type::ArrayTyID || paramType->getTypeID() == llvm::Type::PointerTyID)
            paramType = llvm::cast<llvm::SequentialType>(paramType)->getElementType();

            if (paramType->getTypeID() == llvm::Type::IntegerTyID) {
            MPI_Send(args[indexOfPtr].PointerVal,argList[indexOfPtr].sizeOfArg, MPI_INT, mpi_server_rank, mpi_server_tag, client);
            } else {
            MPI_Send(args[indexOfPtr].PointerVal, argList[indexOfPtr].sizeOfArg, MPI_DOUBLE, mpi_server_rank, mpi_server_tag, client);
            }
            free(args[indexOfPtr].PointerVal);
        }

        #ifndef TIMING 
            auto EndTime = std::chrono::high_resolution_clock::now();
            std::cout << "\n SERVR: MPI_DATA_TRANSFER S->C = " <<    std::chrono::duration_cast<std::chrono::microseconds>(EndTime - StartTime).count() << "\n";
        #endif
    
        #ifndef NDEBUG
            std::cout << "\nMPI SERVER: Data sent";
            std::cout.flush();

            std::cout << "\nMPI SERVER: Return Messages sent";
            std::cout.flush();
        #endif
        
        char returnValStr[MAX_VAL_SIZE];
        switch (calledFunction->getReturnType()->getTypeID()) {
            case llvm::Type::VoidTyID:
                sprintf(returnValStr, ":");
                break;
            case llvm::Type::FloatTyID:
                sprintf(returnValStr, ":%a", result.FloatVal);
                break;
            case llvm::Type::DoubleTyID:
                sprintf(returnValStr, ":%la", result.DoubleVal);
                break;
            case llvm::Type::X86_FP80TyID:
                returnValStr[0]=':';
                llvm::APFloat(llvm::APFloat::x87DoubleExtended, result.IntVal).convertToHexString(returnValStr+1, 0U, false, llvm::APFloat::roundingMode::rmNearestTiesToEven);
                break;
            case llvm::Type::FP128TyID:
                returnValStr[0]=':';
                llvm::APFloat(llvm::APFloat::IEEEquad, result.IntVal).convertToHexString(returnValStr+1, 0U, false, llvm::APFloat::roundingMode::rmNearestTiesToEven);
                break;
            case llvm::Type::IntegerTyID: // Note: LLVM does not differentiate between signed/unsiged int types
                sprintf(returnValStr, ":%s", result.IntVal.toString(16,false).c_str());
                break;
            default:
                error(std::string("ERROR, LLVM TypeID " + std::to_string(calledFunction->getReturnType()->getTypeID()) + " of result of function \"" + calledFunction->getName().str() + "\" is not supported").c_str());
        }
        strcat(msg_buffer.get(), returnValStr);

        //Send the message
        MPI_Send(msg_buffer.get(), strlen(msg_buffer.get()), MPI_CHAR, mpi_server_rank, mpi_server_tag, client);
    
        MPI_Type_free(&ArgListType);
    
        // TODO: check this!
        MPI_Finalize();
    }
}