t_Error MEM_InitSmart(char name[], t_Handle *p_Handle, uint32_t num, uint16_t dataSize, uint16_t prefixSize, uint16_t postfixSize, uint16_t alignment, uint8_t memPartitionId, bool consecutiveMem) { t_MemorySegment *p_Mem; uint32_t i, blockSize; uint16_t alignPad, endPad; /* prepare in case of error */ *p_Handle = NULL; /* make sure that size is always a multiple of 4 */ if (dataSize & 3) { dataSize &= ~3; dataSize += 4; } /* make sure that the alignment is at least 4 and power of 2 */ if (alignment < 4) { alignment = 4; } else if (!POWER_OF_2(alignment)) { RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)")); } /* first allocate the segment descriptor */ p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment)); if (!p_Mem) { RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure")); } /* allocate the blocks stack */ p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*)); if (!p_Mem->p_BlocksStack) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack")); } /* allocate the blocks bases array */ p_Mem->p_Bases = (uint8_t **)XX_Malloc((consecutiveMem ? 1 : num) * sizeof(uint8_t*)); if (!p_Mem->p_Bases) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array")); } memset(p_Mem->p_Bases, 0, (consecutiveMem ? 1 : num) * sizeof(uint8_t*)); /* store info about this segment */ p_Mem->num = num; p_Mem->current = 0; p_Mem->dataSize = dataSize; p_Mem->getFailures = 0; p_Mem->allocOwner = e_MEM_ALLOC_OWNER_LOCAL_SMART; p_Mem->consecutiveMem = consecutiveMem; p_Mem->prefixSize = prefixSize; p_Mem->postfixSize = postfixSize; p_Mem->alignment = alignment; p_Mem->h_Spinlock = XX_InitSpinlock(); if (!p_Mem->h_Spinlock) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!")); } alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize); /* Make sure the entire size is a multiple of alignment */ endPad = (uint16_t)PAD_ALIGNMENT(alignment, alignPad + prefixSize + dataSize + postfixSize); /* Calculate blockSize */ blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad); /* Now allocate the blocks */ if (p_Mem->consecutiveMem) { /* |alignment - 1| bytes at most will be discarded in the beginning of the received segment for alignment reasons, therefore the allocation is of: (alignment + (num * block size)). */ uint8_t *p_Blocks = (uint8_t *) XX_MallocSmart((uint32_t)((num * blockSize) + alignment), memPartitionId, 1); if (!p_Blocks) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment blocks")); } /* Store the memory segment address */ p_Mem->p_Bases[0] = p_Blocks; /* The following manipulation places the data of block[0] in an aligned address, since block size is aligned the following block datas will all be aligned.*/ ALIGN_BLOCK(p_Blocks, prefixSize, alignment); /* initialize the blocks */ for (i = 0; i < num; i++) { p_Mem->p_BlocksStack[i] = p_Blocks; p_Blocks += blockSize; } #ifdef DEBUG_MEM_LEAKS p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]); p_Mem->blockSize = blockSize; #endif /* DEBUG_MEM_LEAKS */ } else { /* |alignment - 1| bytes at most will be discarded in the beginning of the received segment for alignment reasons, therefore the allocation is of: (alignment + block size). */ for (i = 0; i < num; i++) { uint8_t *p_Block = (uint8_t *) XX_MallocSmart((uint32_t)(blockSize + alignment), memPartitionId, 1); if (!p_Block) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment blocks")); } /* Store the memory segment address */ p_Mem->p_Bases[i] = p_Block; /* The following places the data of each block in an aligned address */ ALIGN_BLOCK(p_Block, prefixSize, alignment); #ifdef DEBUG_MEM_LEAKS /* Need 4 bytes before the meaningful bytes to store the block index. We know we have them because alignment is at least 4 bytes. */ if (p_Block == p_Mem->p_Bases[i]) p_Block += alignment; *(uint32_t *)(p_Block - 4) = i; #endif /* DEBUG_MEM_LEAKS */ p_Mem->p_BlocksStack[i] = p_Block; } } /* store name */ strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1); /* return handle to caller */ *p_Handle = (t_Handle)p_Mem; #ifdef DEBUG_MEM_LEAKS { t_Error errCode = InitMemDebugDatabase(p_Mem); if (errCode != E_OK) RETURN_ERROR(MAJOR, errCode, NO_MSG); } #endif /* DEBUG_MEM_LEAKS */ return E_OK; }
void net_habitue_device_SC101::doSubmitIO(outstanding_io *io) { bool isWrite = (io->buffer->getDirection() == kIODirectionOut); UInt32 ioLen = (io->nblks * SECTOR_SIZE); mbuf_t m; retryResolve(); if (isWrite) { KDEBUG("%p write %d %d (%d)", io, io->block, io->nblks, _outstandingCount); psan_put_t req; bzero(&req, sizeof(req)); req.ctrl.cmd = PSAN_PUT; req.ctrl.seq = ((net_habitue_driver_SC101 *)getProvider())->getSequenceNumber(); req.ctrl.len_power = POWER_OF_2(ioLen); req.sector = htonl(io->block); if (mbuf_allocpacket(MBUF_WAITOK, sizeof(req) + ioLen, NULL, &m) != 0) KINFO("mbuf_allocpacket failed!"); // TODO(iwade) handle if (mbuf_copyback(m, 0, sizeof(req), &req, MBUF_WAITOK) != 0) KINFO("mbuf_copyback failed!"); // TODO(iwade) handle if (!mbuf_buffer(io->buffer, 0, m, sizeof(req), ioLen)) KINFO("mbuf_buffer failed"); // TODO(iwade) handle io->outstanding.seq = ntohs(req.ctrl.seq); io->outstanding.len = sizeof(psan_put_response_t); io->outstanding.cmd = PSAN_PUT_RESPONSE; } else { KDEBUG("%p read %d %d (%d)", io, io->block, io->nblks, _outstandingCount); psan_get_t req; bzero(&req, sizeof(req)); req.ctrl.cmd = PSAN_GET; req.ctrl.seq = ((net_habitue_driver_SC101 *)getProvider())->getSequenceNumber(); req.ctrl.len_power = POWER_OF_2(ioLen); req.sector = htonl(io->block); if (mbuf_allocpacket(MBUF_WAITOK, sizeof(req), NULL, &m) != 0) KINFO("mbuf_allocpacket failed!"); // TODO(iwade) handle if (mbuf_copyback(m, 0, sizeof(req), &req, MBUF_WAITOK) != 0) KINFO("mbuf_copyback failed!"); // TODO(iwade) handle io->outstanding.seq = ntohs(req.ctrl.seq); io->outstanding.len = sizeof(psan_get_response_t) + ioLen; io->outstanding.cmd = PSAN_GET_RESPONSE; } io->outstanding.packetHandler = OSMemberFunctionCast(PacketHandler, this, &net_habitue_device_SC101::handleAsyncIOPacket); io->outstanding.timeoutHandler = OSMemberFunctionCast(TimeoutHandler, this, &net_habitue_device_SC101::handleAsyncIOTimeout); io->outstanding.target = this; io->outstanding.ctx = io; io->outstanding.timeout_ms = io->timeout_ms; ((net_habitue_driver_SC101 *)getProvider())->sendPacket((sockaddr_in *)io->addr->getBytesNoCopy(), m, &io->outstanding); }
t_Error MEM_InitByAddress(char name[], t_Handle *p_Handle, uint32_t num, uint16_t dataSize, uint16_t prefixSize, uint16_t postfixSize, uint16_t alignment, uint8_t *p_Memory) { t_MemorySegment *p_Mem; uint32_t i, blockSize; uint16_t alignPad, endPad; uint8_t *p_Blocks; /* prepare in case of error */ *p_Handle = NULL; if (!p_Memory) { RETURN_ERROR(MAJOR, E_NULL_POINTER, ("Memory blocks")); } p_Blocks = p_Memory; /* make sure that the alignment is at least 4 and power of 2 */ if (alignment < 4) { alignment = 4; } else if (!POWER_OF_2(alignment)) { RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)")); } /* first allocate the segment descriptor */ p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment)); if (!p_Mem) { RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure")); } /* allocate the blocks stack */ p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*)); if (!p_Mem->p_BlocksStack) { XX_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack")); } /* allocate the blocks bases array */ p_Mem->p_Bases = (uint8_t **)XX_Malloc(sizeof(uint8_t*)); if (!p_Mem->p_Bases) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array")); } memset(p_Mem->p_Bases, 0, sizeof(uint8_t*)); /* store info about this segment */ p_Mem->num = num; p_Mem->current = 0; p_Mem->dataSize = dataSize; p_Mem->p_Bases[0] = p_Blocks; p_Mem->getFailures = 0; p_Mem->allocOwner = e_MEM_ALLOC_OWNER_EXTERNAL; p_Mem->consecutiveMem = TRUE; p_Mem->prefixSize = prefixSize; p_Mem->postfixSize = postfixSize; p_Mem->alignment = alignment; /* store name */ strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1); p_Mem->h_Spinlock = XX_InitSpinlock(); if (!p_Mem->h_Spinlock) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!")); } alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize); /* Make sure the entire size is a multiple of alignment */ endPad = (uint16_t)PAD_ALIGNMENT(alignment, (alignPad + prefixSize + dataSize + postfixSize)); /* The following manipulation places the data of block[0] in an aligned address, since block size is aligned the following block datas will all be aligned */ ALIGN_BLOCK(p_Blocks, prefixSize, alignment); blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad); /* initialize the blocks */ for (i=0; i < num; i++) { p_Mem->p_BlocksStack[i] = p_Blocks; p_Blocks += blockSize; } /* return handle to caller */ *p_Handle = (t_Handle)p_Mem; #ifdef DEBUG_MEM_LEAKS { t_Error errCode = InitMemDebugDatabase(p_Mem); if (errCode != E_OK) RETURN_ERROR(MAJOR, errCode, NO_MSG); p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]); p_Mem->blockSize = blockSize; } #endif /* DEBUG_MEM_LEAKS */ return E_OK; }