void maliciousot::OtExtensionMaliciousReceiverInterface::init_ot_receiver() {
    int nSndVals = 2;
    int wdsize = 1 << (CEIL_LOG2(m_num_base_ots));
    int nblocks = CEIL_DIVIDE(m_num_ots, NUMOTBLOCKS * wdsize);
    int s2ots = nblocks * m_num_base_ots;
    
    m_sender_key_seeds = (BYTE*) malloc(AES_KEY_BYTES * m_num_base_ots);//m_security_level.symbits);
    m_receiver_key_seeds_matrix = (BYTE*) malloc(AES_KEY_BYTES * 2 * s2ots);

    // client connect
    m_connection_manager->setup_connection();

    // 1st step: pre-compute the PVW base OTs
    precompute_base_ots_receiver();

    assert(nblocks <= NUMOTBLOCKS);

    // 2nd step: OT extension step to obtain the base-OTs for the next step
    m_sender = new Mal_OTExtensionSender(nSndVals, m_security_level.symbits,
					 m_connection_manager->get_sockets_data(),
					 U, m_sender_key_seeds, m_num_base_ots,
					 m_num_checks, s2ots, m_sender_seed);
    
    CBitVector seedA(s2ots * AES_KEY_BITS);
    CBitVector seedB(s2ots * AES_KEY_BITS);

    XORMasking* masking_function = new XORMasking(AES_KEY_BITS);
    m_sender->send(s2ots, AES_KEY_BITS, seedA, seedB, R_OT, 1, masking_function);
    delete masking_function;

    for(int i = 0; i < s2ots; i++) {
	memcpy(m_receiver_key_seeds_matrix + 2 * i * AES_KEY_BYTES, 
	       seedA.GetArr() + i * AES_KEY_BYTES, 
	       AES_KEY_BYTES);
	
	memcpy(m_receiver_key_seeds_matrix + (2*i+1) * AES_KEY_BYTES, 
	       seedB.GetArr() + i * AES_KEY_BYTES, 
	       AES_KEY_BYTES);
    }
    
    m_receiver = new Mal_OTExtensionReceiver(nSndVals, m_security_level.symbits,
					     m_connection_manager->get_sockets_data(),
					     m_receiver_key_seeds_matrix, m_receiver_seed,
					     m_num_base_ots, s2ots);
}
Exemplo n.º 2
0
/******************************************************************************
* The dma to the dsp memory system can only occur from contiguous memory, i.e. 
* cmem.  CMEM buffers are currently limited to 4M, the algorithm is to 
* copy the general buffer in 4M chunks into CMEM 4M buffers.  Then we are able 
* to chain 2 4M buffer writes per DMA initiate. As a result, we will have 
* ceil ( size / 8M ) dma transfers initiated by the routine. to make it 
* concrete at 48M buffer dma, will result in:
*       12 memcpy calls of 4M each,
*       12 CMEM buffers allocated of 4M each
*       6 dma_initiates each with 2 - 4M buffers
*
* The algorithm is based one the MAX_CONTIGUOUS_XFER_BUFFERS and 
* HOST_CMEM_BUFFER_SIZE macros.  Currently they are 2 and 4M.
******************************************************************************/
void Cmem::dma_write(int32_t dsp_id, uint32_t addr, uint8_t *buf, uint32_t size)
{
    static uint32_t      trans_id = 0;
    uint32_t             start_trans_id = trans_id;
    int32_t              ret_val;
    std::deque<uint32_t> dma_ids;

    uint32_t simul_dmas       = 4;
    uint32_t cmem_buffer_size = HOST_CMEM_BUFFER_SIZE;
    uint32_t tot_buffers      = CEIL_DIVIDE(size, cmem_buffer_size);
    uint32_t circ_buffers     = std::min(simul_dmas, tot_buffers);
    uint32_t last_buffer_size = size - ((tot_buffers-1) * cmem_buffer_size);

    cmem_host_buf_desc_t   *host_buf_desc   = 
                                  new cmem_host_buf_desc_t[circ_buffers];

    cmem_host_frame_desc_t *host_frame_desc = 
                                  new cmem_host_frame_desc_t[circ_buffers];

    /*---------------------------------------------------------------------
    * Allocate Host CMEM buffers
    *--------------------------------------------------------------------*/
    for (int i = 0; i < circ_buffers; i++) 
    {
        ret_val = bufmgrAlloc(DmaBufPool, 1, &host_buf_desc[i]);
        ERR(ret_val, "dma buffer allocation failed");
        host_frame_desc[i].bufDescP         = &host_buf_desc[i];
        host_frame_desc[i].numBuffers       = 1;
        host_frame_desc[i].frameStartOffset = 0;
        host_frame_desc[i].frameSize        = cmem_buffer_size;
    }

    /*-------------------------------------------------------------------------
     * Initiate one transfer at a time based on what fits within the allowed
     * contiguous buffers per DMA transaction
     *------------------------------------------------------------------------*/
    for (int i = 0; i < tot_buffers; ++i)
    {
        int circ_i = i % simul_dmas;
        int offset = i * cmem_buffer_size;

        cmem_host_buf_desc_t &buf_desc = host_buf_desc[circ_i];
        uint32_t cpy_size = buf_desc.length;

        if (i == tot_buffers-1) 
            host_frame_desc[circ_i].frameSize = cpy_size = last_buffer_size;

        memcpy(buf_desc.userAddr, buf + offset, cpy_size);

        /*---------------------------------------------------------------------
         * Initiate DMA
         *--------------------------------------------------------------------*/
        ret_val = pciedrv_dma_write_initiate(dsp_id, addr + offset, 
                                    &host_frame_desc[circ_i], 
                                    PCIEDRV_DMA_XFER_NON_BLOCKING, 
                                    &trans_id);
        ERR(ret_val, "DMA initiate failed");

        dma_ids.push_back(trans_id);

        if (dma_ids.size() >= simul_dmas)
        {
            while (pciedrv_dma_check(dsp_id, dma_ids.front()));
            dma_ids.pop_front();
        }
    }

    /*---------------------------------------------------------------------
     * Wait for all dmas to complete
     *--------------------------------------------------------------------*/
    for (int i = 0; i < dma_ids.size(); i++)
        while (pciedrv_dma_check(dsp_id, dma_ids[i]));

    /*---------------------------------------------------------------------
     * Free host CMEM buffers
     *--------------------------------------------------------------------*/
    for (int i = 0; i < circ_buffers; i++) 
    {
        ret_val = bufmgrFreeDesc(DmaBufPool, &host_buf_desc[i]);
        ERR(ret_val, "dma buffer free failed");
    }

    delete [] host_buf_desc;
    delete [] host_frame_desc;
}
Exemplo n.º 3
0
/******************************************************************************
* Cmem::dma_read
******************************************************************************/
void Cmem::dma_read(int32_t dsp_id, uint32_t addr, uint8_t *buf, uint32_t size)
{
    cmem_host_buf_desc_t   host_buf_desc;
    cmem_host_frame_desc_t host_frame_desc;

    /*-------------------------------------------------------------------------
    * Calculate total number of host buffers required to fit the data
    *------------------------------------------------------------------------*/
    uint32_t num_buffers     = CEIL_DIVIDE(size, HOST_CMEM_BUFFER_SIZE);
    uint32_t remaining_size  = size;
    uint32_t offset          = 0;
    uint32_t transfer_size   = HOST_CMEM_BUFFER_SIZE;
    uint32_t trans_id;
    int32_t  ret_val;

    /*---------------------------------------------------------------------
    * Allocate Host buffer
    *--------------------------------------------------------------------*/
    ret_val = bufmgrAlloc(DmaBufPool, 1, &host_buf_desc);
    ERR(ret_val, "dma buffer allocation failed");

    /*---------------------------------------------------------------------
    * Populate details of data in frame descriptor
    *--------------------------------------------------------------------*/
    host_frame_desc.bufDescP         = &host_buf_desc;
    host_frame_desc.numBuffers       = 1;
    host_frame_desc.frameStartOffset = 0;
    host_frame_desc.frameSize        = transfer_size;

    /*-------------------------------------------------------------------------
    * Initiate one transfer at a time based on what fits within the allowed
    *------------------------------------------------------------------------*/
    while (num_buffers) 
    {
        if (num_buffers == 1) 
        {
            transfer_size = remaining_size;
            host_frame_desc.frameSize = transfer_size;
        }

        /*---------------------------------------------------------------------
        * Initiate DMA
        *--------------------------------------------------------------------*/
        ret_val = pciedrv_dma_read_initiate(dsp_id, addr + offset, 
                       &host_frame_desc, PCIEDRV_DMA_XFER_BLOCKING, &trans_id);
        ERR(ret_val, "DMA initiate failed");

        /*---------------------------------------------------------------------
        * Copy from dma buffers into buffer
        *--------------------------------------------------------------------*/
        memcpy (buf + offset, host_buf_desc.userAddr, transfer_size);

        num_buffers--;
        offset         += transfer_size;
        remaining_size -= transfer_size;
    }

    /*---------------------------------------------------------------------
    * Free Buffer Descriptors
    *--------------------------------------------------------------------*/
    ret_val = bufmgrFreeDesc(DmaBufPool, &host_buf_desc);
    ERR(ret_val, "dma buffer free failed");
}
Exemplo n.º 4
0
static error_t vfl_vsvfl_open(vfl_device_t *_vfl, nand_device_t *_nand)
{
    vfl_vsvfl_device_t *vfl = CONTAINER_OF(vfl_vsvfl_device_t, vfl, _vfl);

    if(vfl->device || !_nand)
        return EINVAL;

    vfl->device = _nand;
    error_t ret = vfl_vsvfl_setup_geometry(vfl);
    if(FAILED(ret))
        return ret;

    bufferPrintf("vsvfl: Opening %p.\r\n", _nand);

    vfl->contexts = malloc(vfl->geometry.num_ce * sizeof(vfl_vsvfl_context_t));
    memset(vfl->contexts, 0, vfl->geometry.num_ce * sizeof(vfl_vsvfl_context_t));

    vfl->pageBuffer = (uint32_t*) malloc(vfl->geometry.pages_per_block * sizeof(uint32_t));
    vfl->chipBuffer = (uint16_t*) malloc(vfl->geometry.pages_per_block * sizeof(uint16_t));
    vfl->blockBuffer = (uint16_t*) malloc(vfl->geometry.banks_total * sizeof(uint16_t));

    uint32_t ce = 0;
    for(ce = 0; ce < vfl->geometry.num_ce; ce++) {
        vfl->bbt[ce] = (uint8_t*) malloc(CEIL_DIVIDE(vfl->geometry.blocks_per_ce, 8));

        bufferPrintf("vsvfl: Checking CE %d.\r\n", ce);

        if(FAILED(nand_device_read_special_page(_nand, ce, "DEVICEINFOBBT\0\0\0",
                                                vfl->bbt[ce], CEIL_DIVIDE(vfl->geometry.blocks_per_ce, 8))))
        {
            bufferPrintf("vsvfl: Failed to find DEVICEINFOBBT!\r\n");
            return EIO;
        }

        if(ce >= vfl->geometry.num_ce)
            return EIO;

        vfl_vsvfl_context_t *curVFLCxt = &vfl->contexts[ce];
        uint8_t* pageBuffer = malloc(vfl->geometry.bytes_per_page);
        uint8_t* spareBuffer = malloc(vfl->geometry.bytes_per_spare);
        if(pageBuffer == NULL || spareBuffer == NULL) {
            bufferPrintf("ftl: cannot allocate page and spare buffer\r\n");
            return ENOMEM;
        }

        // Any VFLCxt page will contain an up-to-date list of all blocks used to store VFLCxt pages. Find any such
        // page in the system area.

        int i;
        for(i = vfl->geometry.reserved_blocks; i < vfl->geometry.fs_start_block; i++) {
            // so pstBBTArea is a bit array of some sort
            if(!(vfl->bbt[ce][i / 8] & (1 << (i  & 0x7))))
                continue;

            if(SUCCEEDED(nand_device_read_single_page(vfl->device, ce, i, 0, pageBuffer, spareBuffer, 0)))
            {
                memcpy(curVFLCxt->vfl_context_block, ((vfl_vsvfl_context_t*)pageBuffer)->vfl_context_block,
                       sizeof(curVFLCxt->vfl_context_block));
                break;
            }
        }

        if(i == vfl->geometry.fs_start_block) {
            bufferPrintf("vsvfl: cannot find readable VFLCxtBlock\r\n");
            free(pageBuffer);
            free(spareBuffer);
            return EIO;
        }

        // Since VFLCxtBlock is a ringbuffer, if blockA.page0.spare.usnDec < blockB.page0.usnDec, then for any page a
        // in blockA and any page b in blockB, a.spare.usNDec < b.spare.usnDec. Therefore, to begin finding the
        // page/VFLCxt with the lowest usnDec, we should just look at the first page of each block in the ring.
        int minUsn = 0xFFFFFFFF;
        int VFLCxtIdx = 4;
        for(i = 0; i < 4; i++) {
            uint16_t block = curVFLCxt->vfl_context_block[i];
            if(block == 0xFFFF)
                continue;

            if(FAILED(nand_device_read_single_page(vfl->device, ce, block, 0, pageBuffer, spareBuffer, 0)))
                continue;

            vfl_vsvfl_spare_data_t *spareData = (vfl_vsvfl_spare_data_t*)spareBuffer;

            if(spareData->meta.usnDec > 0 && spareData->meta.usnDec <= minUsn) {
                minUsn = spareData->meta.usnDec;
                VFLCxtIdx = i;
            }
        }

        if(VFLCxtIdx == 4) {
            bufferPrintf("vsvfl: cannot find readable VFLCxtBlock index in spares\r\n");
            free(pageBuffer);
            free(spareBuffer);
            return EIO;
        }

        // VFLCxts are stored in the block such that they are duplicated 8 times. Therefore, we only need to
        // read every 8th page, and nand_readvfl_cxt_page will try the 7 subsequent pages if the first was
        // no good. The last non-blank page will have the lowest spare.usnDec and highest usnInc for VFLCxt
        // in all the land (and is the newest).
        int page = 8;
        int last = 0;
        for(page = 8; page < vfl->geometry.pages_per_block; page += 8) {
            if(nand_device_read_single_page(vfl->device, ce, curVFLCxt->vfl_context_block[VFLCxtIdx], page, pageBuffer, spareBuffer, 0) != 0) {
                break;
            }

            last = page;
        }

        if(nand_device_read_single_page(vfl->device, ce, curVFLCxt->vfl_context_block[VFLCxtIdx], last, pageBuffer, spareBuffer, 0) != 0) {
            bufferPrintf("vsvfl: cannot find readable VFLCxt\n");
            free(pageBuffer);
            free(spareBuffer);
            return -1;
        }

        // Aha, so the upshot is that this finds the VFLCxt and copies it into vfl->contexts
        memcpy(&vfl->contexts[ce], pageBuffer, sizeof(vfl_vsvfl_context_t));

        // This is the newest VFLCxt across all CEs
        if(curVFLCxt->usn_inc >= vfl->current_version) {
            vfl->current_version = curVFLCxt->usn_inc;
        }

        free(pageBuffer);
        free(spareBuffer);

        // Verify the checksum
        if(vfl_check_checksum(vfl, ce) == FALSE)
        {
            bufferPrintf("vsvfl: VFLCxt has bad checksum.\r\n");
            return EIO;
        }
    }

    // retrieve some global parameters from the latest VFL across all CEs.
    vfl_vsvfl_context_t *latestCxt = get_most_updated_context(vfl);

    // Then we update the VFLCxts on every ce with that information.
    for(ce = 0; ce < vfl->geometry.num_ce; ce++) {
        // Don't copy over own data.
        if(&vfl->contexts[ce] != latestCxt) {
            // Copy the data, and generate the new checksum.
            memcpy(vfl->contexts[ce].control_block, latestCxt->control_block, sizeof(latestCxt->control_block));
            vfl->contexts[ce].usable_blocks_per_bank = latestCxt->usable_blocks_per_bank;
            vfl->contexts[ce].reserved_block_pool_start = latestCxt->reserved_block_pool_start;
            vfl->contexts[ce].ftl_type = latestCxt->ftl_type;
            memcpy(vfl->contexts[ce].field_6CA, latestCxt->field_6CA, sizeof(latestCxt->field_6CA));

            vfl_gen_checksum(vfl, ce);
        }
    }

    // Vendor-specific virtual-from/to-physical functions.
    // Note: support for some vendors is still missing.
    nand_device_t *nand = vfl->device;
    uint32_t vendorType = vfl->contexts[0].vendor_type;

    if(!vendorType)
        if(FAILED(nand_device_get_info(nand, diVendorType, &vendorType, sizeof(vendorType))))
            return EIO;

    switch(vendorType) {
    case 0x10001:
        vfl->geometry.banks_per_ce = 1;
        vfl->virtual_to_physical = virtual_to_physical_10001;
        break;

    case 0x100010:
    case 0x100014:
    case 0x120014:
        vfl->geometry.banks_per_ce = 2;
        vfl->virtual_to_physical = virtual_to_physical_100014;
        break;

    case 0x150011:
        vfl->geometry.banks_per_ce = 2;
        vfl->virtual_to_physical = virtual_to_physical_150011;
        break;

    default:
        bufferPrintf("vsvfl: unsupported vendor 0x%06x\r\n", vendorType);
        return EIO;
    }

    if(FAILED(nand_device_set_info(nand, diVendorType, &vendorType, sizeof(vendorType))))
        return EIO;

    vfl->geometry.pages_per_sublk = vfl->geometry.pages_per_block * vfl->geometry.banks_per_ce * vfl->geometry.num_ce;
    vfl->geometry.banks_total = vfl->geometry.num_ce * vfl->geometry.banks_per_ce;
    vfl->geometry.blocks_per_bank_vfl = vfl->geometry.blocks_per_ce / vfl->geometry.banks_per_ce;

    uint32_t banksPerCE = vfl->geometry.banks_per_ce;
    if(FAILED(nand_device_set_info(nand, diBanksPerCE_VFL, &banksPerCE, sizeof(banksPerCE))))
        return EIO;

    bufferPrintf("vsvfl: detected chip vendor 0x%06x\r\n", vendorType);

    // Now, discard the old scfg bad-block table, and set it using the VFL context's reserved block pool map.
    uint32_t bank, i;
    uint32_t num_reserved = vfl->contexts[0].reserved_block_pool_start;
    uint32_t num_non_reserved = vfl->geometry.blocks_per_bank_vfl - num_reserved;

    for(ce = 0; ce < vfl->geometry.num_ce; ce++) {
        memset(vfl->bbt[ce], 0xFF, CEIL_DIVIDE(vfl->geometry.blocks_per_ce, 8));

        for(bank = 0; bank < banksPerCE; bank++) {
            for(i = 0; i < num_non_reserved; i++) {
                uint16_t mapEntry = vfl->contexts[ce].reserved_block_pool_map[bank * num_non_reserved + i];
                uint32_t pBlock;

                if(mapEntry == 0xFFF0)
                    continue;

                if(mapEntry < vfl->geometry.blocks_per_ce) {
                    pBlock = mapEntry;
                } else if(mapEntry > 0xFFF0) {
                    virtual_block_to_physical_block(vfl, ce + bank * vfl->geometry.num_ce, num_reserved + i, &pBlock);
                } else {
                    system_panic("vsvfl: bad map table: CE %d, entry %d, value 0x%08x\r\n",
                                 ce, bank * num_non_reserved + i, mapEntry);
                }

                vfl->bbt[ce][pBlock / 8] &= ~(1 << (pBlock % 8));
            }
        }
    }

    bufferPrintf("vsvfl: VFL successfully opened!\r\n");

    return SUCCESS;
}