Пример #1
0
/*
s32 ISFS_ReadFileToArray (char *filepath, u8 *filearray, u32 max_size, u32 *file_size) {
	s32 ret, fd;
	static fstats filestats ATTRIBUTE_ALIGN(32);
	ret = ISFS_Open(filepath, ISFS_OPEN_READ);
	if (ret <= 0)
	{
		printf("Error! ISFS_Open (ret = %d)\n", ret);
		return -1;
	}
	
	fd = ret;
	
	ret = ISFS_GetFileStats(fd, &filestats);
	if (ret < 0)
	{
		printf("Error! ISFS_GetFileStats (ret = %d)\n", ret);
		return -1;
	}
	
	*file_size = filestats.file_length;
	
	if (*file_size > max_size)
	{
		printf("File is too large! Size: %u", *file_size);
		return -1;
	}
	
	ret = ISFS_Read(fd, filearray, *file_size);
	if (ret < 0)
	{
		printf("Error! ISFS_Read (ret = %d)\n", ret);
		return -1;
	}
	
	ret = ISFS_Close(fd);
	if (ret < 0)
	{
		printf("Error! ISFS_Close (ret = %d)\n", ret);
		return -1;
	}
	return 0;
}
*/
s32 Identify(const u8 *certs, u32 certs_size, const u8 *idtmd, u32 idtmd_size, const u8 *idticket, u32 idticket_size) {
	s32 ret;
	u32 keyid = 0;
	ret = ES_Identify((signed_blob*)certs, certs_size, (signed_blob*)idtmd, idtmd_size, (signed_blob*)idticket, idticket_size, &keyid);
	if (ret < 0)
	{
		printf("\n");
		switch(ret){
			case ES_EINVAL:
				printf("Error! ES_Identify (ret = %d;) Data invalid!\n", ret);
				break;
			case ES_EALIGN:
				printf("Error! ES_Identify (ret = %d;) Data not aligned!\n", ret);
				break;
			case ES_ENOTINIT:
				printf("Error! ES_Identify (ret = %d;) ES not initialized!\n", ret);
				break;
			case ES_ENOMEM:
				printf("Error! ES_Identify (ret = %d;) No memory!\n", ret);
				break;
			default:
				printf("Error! ES_Identify (ret = %d)\n", ret);
				break;
		}
		if (!ISALIGNED(certs)) printf("\tCertificate data is not aligned!\n");
		if (!ISALIGNED(idtmd)) printf("\tTMD data is not aligned!\n");
		if (!ISALIGNED(idticket)) printf("\tTicket data is not aligned!\n");
	}
	return ret;
}
Пример #2
0
static int
sb_write_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len)
{
	uint coreidx, n;
	void *regs;
	sbconfig_t *sb;
	pci_config_regs *cfg;

	if (dev >= SB_MAXCORES || (off + len) > sizeof(pci_config_regs))
		return -1;
	cfg = &sb_config_regs[dev];

	ASSERT(ISALIGNED(off, len));
	ASSERT(ISALIGNED((uintptr)buf, len));

	/* Emulate BAR sizing */
	if (off >= OFFSETOF(pci_config_regs, base[0]) && off <= OFFSETOF(pci_config_regs, base[3]) &&
	    len == 4 && *((uint32 *) buf) == ~0) {
		coreidx = sb_coreidx(sbh);
		if ((regs = sb_setcoreidx(sbh, dev))) {
			sb = (sbconfig_t *)((ulong) regs + SBCONFIGOFF);
			/* Highest numbered address match register */
			n = (R_REG(&sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT;
			if (off == OFFSETOF(pci_config_regs, base[0]))
				cfg->base[0] = ~(sb_size(R_REG(&sb->sbadmatch0)) - 1);
			else if (off == OFFSETOF(pci_config_regs, base[1]) && n >= 1)
				cfg->base[1] = ~(sb_size(R_REG(&sb->sbadmatch1)) - 1);
			else if (off == OFFSETOF(pci_config_regs, base[2]) && n >= 2)
				cfg->base[2] = ~(sb_size(R_REG(&sb->sbadmatch2)) - 1);
			else if (off == OFFSETOF(pci_config_regs, base[3]) && n >= 3)
				cfg->base[3] = ~(sb_size(R_REG(&sb->sbadmatch3)) - 1);
		}
Пример #3
0
void SequentialFile::frameData(void* payload, size_t size) {
  ASSERT_TRUE(ISALIGNED(payload, RECORD_FRAME_ALIGNMENT));
  Record* record = Record::GetRecord((char*)payload);

  SequentialFile::Record* new_record = new (record)Record(size);
  next_write_offset = record2offset( (SequentialFile::Record*)new_record->getEndOfRecord() );
  ASSERT_TRUE(ISALIGNED((void*)next_write_offset, RECORD_FRAME_ALIGNMENT));
}
Пример #4
0
s32 ssl_setrootca(s32 ssl_context, const void *root, u32 length){

	s32 aSsl_context[8] ATTRIBUTE_ALIGN(32);
	s32 aResponse[8] ATTRIBUTE_ALIGN(32);
	s32 ret;

	ret = ssl_open();
	if(ret){
		return ret;
	}

	aSsl_context[0] = ssl_context;

	if(ISALIGNED(root)){ //Avoid alignment if the input is aligned
		ret = IOS_IoctlvFormat(__ssl_hid, __ssl_fd, IOCTLV_SSL_SETROOTCA, "d:dd", aResponse, 0x20, aSsl_context, 0x20, root, length);
	}else{
		u8 *aRoot = NULL;

		aRoot = iosAlloc(__ssl_hid, length);
		if (!aRoot) {
			return IPC_ENOMEM;
		}

		memcpy(aRoot, root, length);
		ret = IOS_IoctlvFormat(__ssl_hid, __ssl_fd, IOCTLV_SSL_SETROOTCA, "d:dd", aResponse, 0x20, aSsl_context, 0x20, aRoot, length);

		if(aRoot){
			iosFree(__ssl_hid, aRoot);
		}
	}

	ssl_close();

return (ret ? ret : aResponse[0]);
}
Пример #5
0
s32 ssl_write(s32 ssl_context, const void *buffer, s32 length){

	s32 aSsl_context[8] ATTRIBUTE_ALIGN(32);
	s32 aResponse[8] ATTRIBUTE_ALIGN(32);
	s32 ret;

	ret = ssl_open();
	if(ret){
		return ret;
	}

	if(!buffer){
		return IPC_EINVAL;
	}

	aSsl_context[0] = ssl_context;

	if(ISALIGNED(buffer)){ //Avoid alignment if the input is aligned
		ret = IOS_IoctlvFormat(__ssl_hid, __ssl_fd, IOCTLV_SSL_WRITE, "d:dd", aResponse, 0x20, aSsl_context, 0x20, buffer, length);
	}else{
		u8 *aBuffer = NULL;
		aBuffer = iosAlloc(__ssl_hid, length);
		if (!aBuffer) {
			return IPC_ENOMEM;
		}

		memcpy(aBuffer, buffer, length);
		ret = IOS_IoctlvFormat(__ssl_hid, __ssl_fd, IOCTLV_SSL_WRITE, "d:dd", aResponse, 0x20, aSsl_context, 0x20, aBuffer, length);
	}

	ssl_close();

return (ret ? ret : aResponse[0]);
}
Пример #6
0
s32 ssl_new(u8 * CN, u32 ssl_verify_options){

	s32 ret;
	s32 aContext[8] ATTRIBUTE_ALIGN(32);
	u32 aVerify_options[8] ATTRIBUTE_ALIGN(32);

	ret = ssl_open();
	if(ret){
		return ret;
	}

	aVerify_options[0] = ssl_verify_options;

	if(ISALIGNED(CN)){ //Avoid alignment if the input is aligned
		ret = IOS_IoctlvFormat(__ssl_hid, __ssl_fd, IOCTLV_SSL_NEW, "d:dd", aContext, 0x20, aVerify_options, 0x20, CN, 0x100);
	}else{
		u8 *aCN = NULL;

		aCN = iosAlloc(__ssl_hid, 0x100);
		if (!aCN) {
			return IPC_ENOMEM;
		}

		memcpy(aCN, CN, 0x100);
		ret = IOS_IoctlvFormat(__ssl_hid, __ssl_fd, IOCTLV_SSL_NEW, "d:dd", aContext, 0x20, aVerify_options, 0x20, aCN, 0x100);

		if(aCN){
			iosFree(__ssl_hid, aCN);
		}
	}

	ssl_close();

return (ret ? ret : aContext[0]);
}
Пример #7
0
RecordFrame* RecordIterator::GetNext() {
  current = FindNextRecord(current);
  ASSERT_TRUE(ISALIGNED(current, RECORD_FRAME_ALIGNMENT));
  if (current != (RecordFrame*)RegionEnd()) {
    return current;
  } else {
    return NULL;
  }
}
Пример #8
0
void SequentialFile::AppendRaw(void* data, size_t size) {
  record_frame_t* location = (record_frame_t*)getFreeSpace(size);
  location--;  // TODO: clean up
  memcpy(location, data, size);
  Record* new_record = static_cast<Record*>((void*)location);
  ASSERT_TRUE(new_record->isValid());
  next_write_offset = record2offset( (SequentialFile::Record*)new_record->getEndOfRecord() );
  ASSERT_TRUE(ISALIGNED((void*)next_write_offset, RECORD_FRAME_ALIGNMENT));
}
Пример #9
0
/*
 * buffer_prepare: This is the callback function called from videobuf_qbuf()
 * function the buffer is prepared and user space virtual address is converted
 * into physical address
 */
static int vpif_buffer_prepare(struct videobuf_queue *q,
			       struct videobuf_buffer *vb,
			       enum v4l2_field field)
{
	struct vpif_fh *fh = q->priv_data;
	struct common_obj *common;
	unsigned long addr;

	common = &fh->channel->common[VPIF_VIDEO_INDEX];
	if (VIDEOBUF_NEEDS_INIT == vb->state) {
		vb->width	= common->width;
		vb->height	= common->height;
		vb->size	= vb->width * vb->height;
		vb->field	= field;
	}
	vb->state = VIDEOBUF_PREPARED;

	/* if user pointer memory mechanism is used, get the physical
	 * address of the buffer */
	if (V4L2_MEMORY_USERPTR == common->memory) {
		if (!vb->baddr) {
			vpif_err("buffer_address is 0\n");
			return -EINVAL;
		}

		vb->boff = vpif_uservirt_to_phys(vb->baddr);
		if (!ISALIGNED(vb->boff))
			goto buf_align_exit;
	}

	addr = vb->boff;
	if (q->streaming && (V4L2_BUF_TYPE_SLICED_VBI_OUTPUT != q->type)) {
		if (!ISALIGNED(addr + common->ytop_off) ||
		    !ISALIGNED(addr + common->ybtm_off) ||
		    !ISALIGNED(addr + common->ctop_off) ||
		    !ISALIGNED(addr + common->cbtm_off))
			goto buf_align_exit;
	}
	return 0;

buf_align_exit:
	vpif_err("buffer offset not aligned to 8 bytes\n");
	return -EINVAL;
}
Пример #10
0
static really_inline
u32 getSubOffset(const struct Tamarama *t, u32 num) {
    DEBUG_PRINTF("subengine:%u\n", num);
    assert(num < t->numSubEngines);
    const u32 *sub =
        (const u32 *)((const char *)t + sizeof(struct Tamarama) +
                      t->numSubEngines * sizeof(u32));
    assert(ISALIGNED(sub));
    return sub[num];
}
Пример #11
0
void*
osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
{
	uint16 align = (1 << align_bits);
	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));

	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
		size += align;
	*alloced = size;

	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
}
Пример #12
0
static really_inline
const struct NFA *getSomRevNFA(const struct RoseEngine *t, u32 i) {
    assert(t->somRevOffsetOffset);
    const u32 *rev_offsets
        = (const u32 *)((const u8 *)t + t->somRevOffsetOffset);
    u32 nfa_offset = rev_offsets[i];
    assert(nfa_offset && nfa_offset < t->size);
    const struct NFA *n = (const struct NFA *)(((const u8 *)t + nfa_offset));
    assert(ISALIGNED(n));

    return n;
}
Пример #13
0
/* allocate one page of cached, shared (dma-able) or regular memory */
NDIS_STATUS
shared_allocpage(
	IN shared_info_t *shared,
	IN BOOLEAN shared_mem,
	IN BOOLEAN cached,
	OUT page_t *page
)
{
	NDIS_STATUS status;

	/* uncached not supported */
	ASSERT(cached);

	bzero(page, sizeof(page_t));
	if (shared_mem) {
			NdisMAllocateSharedMemory(shared->adapterhandle, PAGE_SIZE, cached,
			                          (void **) &page->va, &page->pa);

		/* Make sure that we got valid address */
		if (!osl_dmaddr_valid(shared->osh, page->pa.LowPart, page->pa.HighPart)) {
			ND_ERROR(("%s%d: shared_allocpage: pa not valid \n", shared->id,
			          shared->unit));
			NdisMFreeSharedMemory(shared->adapterhandle, PAGE_SIZE,
			                      cached, (PVOID)page->va, page->pa);
			return (NDIS_STATUS_RESOURCES);
		}
	} else
		page->va = MALLOC(shared->osh, PAGE_SIZE);
	if (page->va == NULL) {
		ND_ERROR(("%s%d: shared_allocpage: out of memory, malloced %d bytes\n", shared->id,
		          shared->unit, MALLOCED(shared->osh)));
		return (NDIS_STATUS_RESOURCES);
	}
	ASSERT(!shared_mem || ISALIGNED((uintptr)page->va, PAGE_SIZE));
	ASSERT(page->pa.HighPart == 0);
	ASSERT(ISALIGNED(page->pa.LowPart, PAGE_SIZE));
	status = NDIS_STATUS_SUCCESS;

	return (status);
}
static int vpif_check_format(struct channel_obj *ch,
			     struct v4l2_pix_format *pixfmt)
{
	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
	enum v4l2_field field = pixfmt->field;
	u32 sizeimage, hpitch, vpitch;

	if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
		goto invalid_fmt_exit;

	if (!(VPIF_VALID_FIELD(field)))
		goto invalid_fmt_exit;

	if (pixfmt->bytesperline <= 0)
		goto invalid_pitch_exit;

	if (V4L2_MEMORY_USERPTR == common->memory)
		sizeimage = pixfmt->sizeimage;
	else
		sizeimage = config_params.channel_bufsize[ch->channel_id];

	if (vpif_get_std_info(ch)) {
		vpif_err("Error getting the standard info\n");
		return -EINVAL;
	}

	hpitch = pixfmt->bytesperline;
	vpitch = sizeimage / (hpitch * 2);

	/* Check for valid value of pitch */
	if ((hpitch < ch->vpifparams.std_info.width) ||
	    (vpitch < ch->vpifparams.std_info.height))
		goto invalid_pitch_exit;

	/* Check for 8 byte alignment */
	if (!ISALIGNED(hpitch)) {
		vpif_err("invalid pitch alignment\n");
		return -EINVAL;
	}
	pixfmt->width = common->fmt.fmt.pix.width;
	pixfmt->height = common->fmt.fmt.pix.height;

	return 0;

invalid_fmt_exit:
	vpif_err("invalid field format\n");
	return -EINVAL;

invalid_pitch_exit:
	vpif_err("invalid pitch\n");
	return -EINVAL;
}
Пример #15
0
static int
sb_read_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len)
{
	pci_config_regs *cfg;

	if (dev >= SB_MAXCORES || (off + len) > sizeof(pci_config_regs))
		return -1;
	cfg = &sb_config_regs[dev];

	ASSERT(ISALIGNED(off, len));
	ASSERT(ISALIGNED((uintptr)buf, len));

	if (len == 4)
		*((uint32 *) buf) = ltoh32(*((uint32 *)((ulong) cfg + off)));
	else if (len == 2)
		*((uint16 *) buf) = ltoh16(*((uint16 *)((ulong) cfg + off)));
	else if (len == 1)
		*((uint8 *) buf) = *((uint8 *)((ulong) cfg + off));
	else
		return -1;

	return 0;
}
void*
osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
{
	void *va;
	uint16 align = (1 << align_bits);
	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));

	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
		size += align;
	*alloced = size;

#ifdef __ARM_ARCH_7A__
	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
	if (va)
		*pap = (ulong)__virt_to_phys((ulong)va);
#else
	va = pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap);
#endif
	return va;
}
Пример #17
0
static really_inline
char fireReports(const struct sheng *sh, NfaCallback cb, void *ctxt,
                 const u8 state, u64a loc, u8 *const cached_accept_state,
                 ReportID *const cached_accept_id, char eod) {
    DEBUG_PRINTF("reporting matches @ %llu\n", loc);

    if (!eod && state == *cached_accept_state) {
        DEBUG_PRINTF("reporting %u\n", *cached_accept_id);
        if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
            return MO_HALT_MATCHING; /* termination requested */
        }

        return MO_CONTINUE_MATCHING; /* continue execution */
    }
    const struct sstate_aux *aux = get_aux(sh, state);
    const struct report_list *rl = eod ? get_eod_rl(sh, aux) : get_rl(sh, aux);
    assert(ISALIGNED(rl));

    DEBUG_PRINTF("report list has %u entries\n", rl->count);
    u32 count = rl->count;

    if (!eod && count == 1) {
        *cached_accept_state = state;
        *cached_accept_id = rl->report[0];

        DEBUG_PRINTF("reporting %u\n", rl->report[0]);
        if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
            return MO_HALT_MATCHING; /* termination requested */
        }

        return MO_CONTINUE_MATCHING; /* continue execution */
    }

    for (u32 i = 0; i < count; i++) {
        DEBUG_PRINTF("reporting %u\n", rl->report[i]);
        if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
            return MO_HALT_MATCHING; /* termination requested */
        }
    }
    return MO_CONTINUE_MATCHING; /* continue execution */
}
Пример #18
0
/*
 * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
 *                                  pool uses a contiguous block of pre-alloced
 *                                  memory. The memory block may either be provided
 *                                  by the client or dynamically allocated by the
 *                                  pool manager.
 *
 * Parameters:
 *    mgr:      INPUT  The handle to the pool manager
 *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool.
 *                     Must be >= sizeof(void *).
 *    nobj:     INPUT  Maximum number of concurrently existing objects to support
 *    memstart  INPUT  Pointer to the memory to use, or NULL to malloc()
 *    memsize   INPUT  Number of bytes referenced from memstart (for error checking)
 *                     Must be 0 if 'memstart' is NULL.
 *    poolname  INPUT  For instrumentation, the name of the pool
 *    newp:     OUTPUT The handle for the new pool, if creation is successful
 *
 * Returns:
 *    BCME_OK   Pool created ok.
 *    other     Pool not created due to indicated error. newpoolp set to NULL.
 *
 *
 */
int BCMATTACHFN(bcm_mpm_create_prealloc_pool)(bcm_mpm_mgr_h mgr,
                                              unsigned int obj_sz,
                                              int nobj,
                                              void *memstart,
                                              unsigned int memsize,
                                              char poolname[BCM_MP_NAMELEN],
                                              bcm_mp_pool_h *newp)
{
	int ret;
	unsigned int padded_obj_sz;

	/* Check parameters */
	if ((nobj == 0) ||
	    (memstart && !memsize) ||
	    (memsize && !memstart)) {
		return (BCME_BADARG);
	}


	/* Get padded object size. */
	ret = bcm_mpm_get_obj_size(mgr, obj_sz, &padded_obj_sz);
	if (ret != BCME_OK) {
		return (ret);
	}


	/* For client allocated memory, validate memory block size and alignment. */
	if (memstart != NULL) {
		if ((memsize < (padded_obj_sz * nobj)) || (!ISALIGNED(memstart, sizeof(void *)))) {
			return (BCME_BADARG);
		}
	}

	return (create_pool(mgr, obj_sz, padded_obj_sz, nobj, memstart, poolname,
	                    BCM_MP_TYPE_PREALLOC, newp));
}
Пример #19
0
RecordFrame* RecordIterator::GetPrevious() {
  current = FindPreviousRecord(current);
  ASSERT_TRUE(ISALIGNED(current, RECORD_FRAME_ALIGNMENT));
  return current;
}
Пример #20
0
/* post receive buffers */
void
dma_rxfill(dma_info_t *di)
{
	void *p;
	uint rxin, rxout;
	uint ctrl;
	uint n;
	uint i;
	uint32 pa;
	uint rxbufsize;

	/*
	 * Determine how many receive buffers we're lacking
	 * from the full complement, allocate, initialize,
	 * and post them, then update the chip rx lastdscr.
	 */

	rxin = di->rxin;
	rxout = di->rxout;
	rxbufsize = di->rxbufsize;

	n = di->nrxpost - NRXDACTIVE(rxin, rxout);

	DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));

	for (i = 0; i < n; i++) {
		if ((p = PKTGET(di->drv, rxbufsize, FALSE)) == NULL) {
			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
			di->hnddma.rxnobuf++;
			break;
		}

		/* PR3263 & PR3387 & PR4642 war: rxh.len=0 means dma writes not complete */
		*(uint32*)(OSL_UNCACHED(PKTDATA(di->drv, p))) = 0;

		pa = (uint32) DMA_MAP(di->dev, PKTDATA(di->drv, p), rxbufsize, DMA_RX, p);
		ASSERT(ISALIGNED(pa, 4));

		/* save the free packet pointer */
#if 0
		ASSERT(di->rxp[rxout] == NULL);
#endif
		di->rxp[rxout] = p;

		/* paranoia */
		ASSERT(R_SM(&di->rxd[rxout].addr) == 0);

		/* prep the descriptor control value */
		ctrl = rxbufsize;
		if (rxout == (di->nrxd - 1))
			ctrl |= CTRL_EOT;

		/* init the rx descriptor */
		W_SM(&di->rxd[rxout].ctrl, BUS_SWAP32(ctrl));
		W_SM(&di->rxd[rxout].addr, BUS_SWAP32(pa + di->dataoffset));

		DMA_TRACE(("%s: dma_rxfill:  ctrl %08x dataoffset: %08x\n", di->name, BUS_SWAP32(ctrl), BUS_SWAP32(pa + di->dataoffset)));

		rxout = NEXTRXD(rxout);
	}

	di->rxout = rxout;

	/* update the chip lastdscr pointer */
	W_REG(&di->regs->rcvptr, I2B(rxout));
}
Пример #21
0
void*
dma_attach(void *drv, void *dev, char *name, dmaregs_t *regs, uint ntxd, uint nrxd,
	uint rxbufsize, uint nrxpost, uint rxoffset, uint ddoffset, uint dataoffset, uint *msg_level)
{
	dma_info_t *di;
	void *va;

	ASSERT(ntxd <= MAXDD);
	ASSERT(nrxd <= MAXDD);

	/* allocate private info structure */
	if ((di = MALLOC(sizeof (dma_info_t))) == NULL)
		return (NULL);
	bzero((char*)di, sizeof (dma_info_t));

	/* set message level */
	di->msg_level = msg_level ? msg_level : &dma_msg_level;

	DMA_TRACE(("%s: dma_attach: drv 0x%x dev 0x%x regs 0x%x ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", name, (uint)drv, (uint)dev, (uint)regs, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, ddoffset, dataoffset));

	/* make a private copy of our callers name */
	strncpy(di->name, name, MAXNAMEL);
	di->name[MAXNAMEL-1] = '\0';

	di->drv = drv;
	di->dev = dev;
	di->regs = regs;

	/* allocate transmit descriptor ring */
	if (ntxd) {
		if ((va = DMA_ALLOC_CONSISTENT(dev, (DMAMAXRINGSZ + DMARINGALIGN), &di->txdpa)) == NULL)
			goto fail;
		di->txd = (dmadd_t*) ROUNDUP(va, DMARINGALIGN);
		di->txdalign = ((uint)di->txd - (uint)va);
		di->txdpa = (void*) ((uint)di->txdpa + di->txdalign);
		ASSERT(ISALIGNED(di->txd, DMARINGALIGN));
	}

	/* allocate receive descriptor ring */
	if (nrxd) {
		if ((va = DMA_ALLOC_CONSISTENT(dev, (DMAMAXRINGSZ + DMARINGALIGN), &di->rxdpa)) == NULL)
			goto fail;
		di->rxd = (dmadd_t*) ROUNDUP(va, DMARINGALIGN);
		di->rxdalign = ((uint)di->rxd - (uint)va);
		di->rxdpa = (void*) ((uint)di->rxdpa + di->rxdalign);
		ASSERT(ISALIGNED(di->rxd, DMARINGALIGN));
	}

	/* save tunables */
	di->ntxd = ntxd;
	di->nrxd = nrxd;
	di->rxbufsize = rxbufsize;
	di->nrxpost = nrxpost;
	di->rxoffset = rxoffset;
	di->ddoffset = ddoffset;
	di->dataoffset = dataoffset;

	return ((void*)di);

fail:
	dma_detach((void*)di);
	return (NULL);
}
Пример #22
0
/** Used by hs_alloc_scratch and hs_clone_scratch to allocate a complete
 * scratch region from a prototype structure. */
static
hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
    u32 queueCount = proto->queueCount;
    u32 deduperCount = proto->deduper.log_size;
    u32 bStateSize = proto->bStateSize;
    u32 tStateSize = proto->tStateSize;
    u32 fullStateSize = proto->fullStateSize;
    u32 anchored_literal_region_len = proto->anchored_literal_region_len;
    u32 anchored_literal_region_width = proto->anchored_literal_count;

    u32 som_store_size = proto->som_store_count * sizeof(u64a);
    u32 som_attempted_store_size = proto->som_store_count * sizeof(u64a);
    u32 som_now_size = fatbit_size(proto->som_store_count);
    u32 som_attempted_size = fatbit_size(proto->som_store_count);

    struct hs_scratch *s;
    struct hs_scratch *s_tmp;
    size_t queue_size = queueCount * sizeof(struct mq);
    size_t qmpq_size = queueCount * sizeof(struct queue_match);

    assert(anchored_literal_region_len < 8 * sizeof(s->al_log_sum));

    size_t anchored_literal_region_size = fatbit_array_size(
        anchored_literal_region_len, anchored_literal_region_width);
    size_t delay_region_size =
        fatbit_array_size(DELAY_SLOT_COUNT, proto->delay_count);

    // the size is all the allocated stuff, not including the struct itself
    size_t size = queue_size + 63
                  + bStateSize + tStateSize
                  + fullStateSize + 63 /* cacheline padding */
                  + fatbit_size(proto->handledKeyCount) /* handled roles */
                  + fatbit_size(queueCount) /* active queue array */
                  + 2 * fatbit_size(deduperCount) /* need odd and even logs */
                  + 2 * fatbit_size(deduperCount) /* ditto som logs */
                  + 2 * sizeof(u64a) * deduperCount /* start offsets for som */
                  + anchored_literal_region_size + qmpq_size
                  + delay_region_size
                  + som_store_size
                  + som_now_size
                  + som_attempted_size
                  + som_attempted_store_size + 15;

    /* the struct plus the allocated stuff plus padding for cacheline
     * alignment */
    const size_t alloc_size = sizeof(struct hs_scratch) + size + 256;
    s_tmp = hs_scratch_alloc(alloc_size);
    hs_error_t err = hs_check_alloc(s_tmp);
    if (err != HS_SUCCESS) {
        hs_scratch_free(s_tmp);
        *scratch = NULL;
        return err;
    }

    memset(s_tmp, 0, alloc_size);
    s = ROUNDUP_PTR(s_tmp, 64);
    DEBUG_PRINTF("allocated %zu bytes at %p but realigning to %p\n", alloc_size, s_tmp, s);
    DEBUG_PRINTF("sizeof %zu\n", sizeof(struct hs_scratch));
    *s = *proto;

    s->magic = SCRATCH_MAGIC;
    s->in_use = 0;
    s->scratchSize = alloc_size;
    s->scratch_alloc = (char *)s_tmp;

    // each of these is at an offset from the previous
    char *current = (char *)s + sizeof(*s);

    // align current so that the following arrays are naturally aligned: this
    // is accounted for in the padding allocated
    current = ROUNDUP_PTR(current, 8);

    s->queues = (struct mq *)current;
    current += queue_size;

    assert(ISALIGNED_N(current, 8));
    s->som_store = (u64a *)current;
    current += som_store_size;

    s->som_attempted_store = (u64a *)current;
    current += som_attempted_store_size;

    current = ROUNDUP_PTR(current, alignof(struct fatbit *));
    s->delay_slots = (struct fatbit **)current;
    current += sizeof(struct fatbit *) * DELAY_SLOT_COUNT;
    current = ROUNDUP_PTR(current, alignof(struct fatbit));
    for (u32 i = 0; i < DELAY_SLOT_COUNT; i++) {
        s->delay_slots[i] = (struct fatbit *)current;
        assert(ISALIGNED(s->delay_slots[i]));
        current += fatbit_size(proto->delay_count);
    }

    current = ROUNDUP_PTR(current, alignof(struct fatbit *));
    s->al_log = (struct fatbit **)current;
    current += sizeof(struct fatbit *) * anchored_literal_region_len;
    current = ROUNDUP_PTR(current, alignof(struct fatbit));
    for (u32 i = 0; i < anchored_literal_region_len; i++) {
        s->al_log[i] = (struct fatbit *)current;
        assert(ISALIGNED(s->al_log[i]));
        current += fatbit_size(anchored_literal_region_width);
    }

    current = ROUNDUP_PTR(current, 8);
    s->catchup_pq.qm = (struct queue_match *)current;
    current += qmpq_size;

    s->bstate = (char *)current;
    s->bStateSize = bStateSize;
    current += bStateSize;

    s->tstate = (char *)current;
    s->tStateSize = tStateSize;
    current += tStateSize;

    current = ROUNDUP_PTR(current, 64);

    assert(ISALIGNED_N(current, 8));
    s->deduper.som_start_log[0] = (u64a *)current;
    current += sizeof(u64a) * deduperCount;

    s->deduper.som_start_log[1] = (u64a *)current;
    current += sizeof(u64a) * deduperCount;

    assert(ISALIGNED_N(current, 8));
    s->aqa = (struct fatbit *)current;
    current += fatbit_size(queueCount);

    s->handled_roles = (struct fatbit *)current;
    current += fatbit_size(proto->handledKeyCount);

    s->deduper.log[0] = (struct fatbit *)current;
    current += fatbit_size(deduperCount);

    s->deduper.log[1] = (struct fatbit *)current;
    current += fatbit_size(deduperCount);

    s->deduper.som_log[0] = (struct fatbit *)current;
    current += fatbit_size(deduperCount);

    s->deduper.som_log[1] = (struct fatbit *)current;
    current += fatbit_size(deduperCount);

    s->som_set_now = (struct fatbit *)current;
    current += som_now_size;

    s->som_attempted_set = (struct fatbit *)current;
    current += som_attempted_size;

    current = ROUNDUP_PTR(current, 64);
    assert(ISALIGNED_CL(current));
    s->fullState = (char *)current;
    s->fullStateSize = fullStateSize;
    current += fullStateSize;

    *scratch = s;

    // Don't get too big for your boots
    assert((size_t)(current - (char *)s) <= alloc_size);

    // Init q->scratch ptr for every queue.
    for (struct mq *qi = s->queues; qi != s->queues + queueCount; ++qi) {
        qi->scratch = s;
    }

    return HS_SUCCESS;
}
Пример #23
0
static rose_inline
int roseEodRunIterator(const struct RoseEngine *t, u8 *state, u64a offset,
                       struct hs_scratch *scratch) {
    if (!t->eodIterOffset) {
        return MO_CONTINUE_MATCHING;
    }

    const struct RoseRole *roleTable = getRoleTable(t);
    const struct RosePred *predTable = getPredTable(t);
    const struct RoseIterMapping *iterMapBase
        = getByOffset(t, t->eodIterMapOffset);
    const struct mmbit_sparse_iter *it = getByOffset(t, t->eodIterOffset);
    assert(ISALIGNED(iterMapBase));
    assert(ISALIGNED(it));

    // Sparse iterator state was allocated earlier
    struct mmbit_sparse_state *s = scratch->sparse_iter_state;
    struct fatbit *handled_roles = scratch->handled_roles;

    const u32 numStates = t->rolesWithStateCount;

    void *role_state = getRoleState(state);
    u32 idx = 0;
    u32 i = mmbit_sparse_iter_begin(role_state, numStates, &idx, it, s);

    fatbit_clear(handled_roles);

    for (; i != MMB_INVALID;
           i = mmbit_sparse_iter_next(role_state, numStates, i, &idx, it, s)) {
        DEBUG_PRINTF("pred state %u (iter idx=%u) is on\n", i, idx);
        const struct RoseIterMapping *iterMap = iterMapBase + idx;
        const struct RoseIterRole *roles = getByOffset(t, iterMap->offset);
        assert(ISALIGNED(roles));

        DEBUG_PRINTF("%u roles to consider\n", iterMap->count);
        for (u32 j = 0; j != iterMap->count; j++) {
            u32 role = roles[j].role;
            assert(role < t->roleCount);
            DEBUG_PRINTF("checking role %u, pred %u:\n", role, roles[j].pred);
            const struct RoseRole *tr = roleTable + role;

            if (fatbit_isset(handled_roles, t->roleCount, role)) {
                DEBUG_PRINTF("role %u already handled by the walk, skip\n",
                             role);
                continue;
            }

            // Special case: if this role is a trivial case (pred type simple)
            // we don't need to check any history and we already know the pred
            // role is on.
            if (tr->flags & ROSE_ROLE_PRED_SIMPLE) {
                DEBUG_PRINTF("pred type is simple, no need for checks\n");
            } else {
                assert(roles[j].pred < t->predCount);
                const struct RosePred *tp = predTable + roles[j].pred;
                if (!roseCheckPredHistory(tp, offset)) {
                    continue;
                }
            }

            /* mark role as handled so we don't touch it again in this walk */
            fatbit_set(handled_roles, t->roleCount, role);

            DEBUG_PRINTF("fire report for role %u, report=%u\n", role,
                         tr->reportId);
            int rv = scratch->tctxt.cb(offset, tr->reportId,
                                       scratch->tctxt.userCtx);
            if (rv == MO_HALT_MATCHING) {
                return MO_HALT_MATCHING;
            }
        }
    }

    return MO_CONTINUE_MATCHING;
}
Пример #24
0
RecordFrame::RecordFrame(size_t size_in_bytes ) {
  ASSERT_TRUE(ISALIGNED((&header_data), RECORD_FRAME_ALIGNMENT));
  ASSERT_TRUE(header_data.plain_header == 0);
  setLength(size_in_bytes);            // ASSERT: memory was prev. filled with 0s
}
Пример #25
0
NDIS_STATUS
shared_flush(
	IN shared_info_t *shared,
	IN uchar *va,
	IN ULONG pa,
	IN ULONG len,
	IN BOOLEAN writetodevice
)
{
#ifndef NDIS60
	PNDIS_BUFFER b;
	NDIS_STATUS status;
	NDIS_PHYSICAL_ADDRESS npa;

	/* if receive, buffer must begin and end on a cacheline boundary */
	if (!writetodevice) {
		ASSERT(ISALIGNED((uintptr)va, shared->cacheline));
		len = ROUNDUP(len, shared->cacheline);
	}

	/* alloc a temp buffer descriptor */
	NdisAllocateBuffer(&status, &b, shared->rxbufferpool, va, len);
	if (status != NDIS_STATUS_SUCCESS) {
		ND_ERROR(("%s%d: shared_flush: NdisAllocateBuffer error 0x%x\n",
			shared->id, shared->unit, status));
		return status;
	}

	/* flush processor cache */
	NdisAdjustBufferLength(b, len);
	NdisFlushBuffer(b, writetodevice);

	npa.HighPart = 0;
	npa.LowPart = pa;

#ifndef USEWDK


	if (!writetodevice)
		NdisMUpdateSharedMemory(shared->adapterhandle, len, va, npa);

#endif /* USEWDK */

	/* free the temp buffer descriptor */
	NdisFreeBuffer(b);
#else /* NDIS60 */
	PMDL b;

	/* if receive, buffer must begin and end on a cacheline boundary */
	if (!writetodevice) {
		ASSERT(ISALIGNED((uintptr)va, shared->cacheline));
		len = ROUNDUP(len, shared->cacheline);
	}

	/* alloc a temp MDL */
	b = NdisAllocateMdl(shared->adapterhandle, va, len);
	if (b == NULL) {
		ND_ERROR(("%s%d: shared_flush: NdisAllocateMdl error\n", shared->id, shared->unit));
		return NDIS_STATUS_FAILURE;
	}

	/* flush processor cache */
	NdisAdjustMdlLength(b, len);
	NdisFlushBuffer(b, writetodevice);

	/* free the temp MDL */
	NdisFreeMdl(b);
#endif /* NDIS60 */
	return NDIS_STATUS_SUCCESS;
}