/*
**
** Get the contain of a note.
**
*/
int	cpy_frm_elfnote(elfobj* obj, char* note_name, int note_type, void* buf, int len)
{
  int		i;
  Elf_Note*	note;
  char*		start, * end, * cur_name, * data;

  for (i = 0; i < obj->header->e_phnum; i++)
    {
      if (obj->program_headers[i]->p_type == PT_NOTE)
	{
	  start = obj->data + obj->program_headers[i]->p_offset;
	  end = start + obj->program_headers[i]->p_filesz;
	  for (; start < end; )
	    {
	      note = (Elf_Note*) start;
	      start += sizeof(Elf_Note);
	      cur_name = start;
	      start += roundup2(note->n_namesz, 4);
	      data = start;
	      start += roundup2(note->n_descsz, 4);
	      if (!strcmp(note_name, cur_name) && note->n_type == note_type)
		{
		  memcpy(buf, data, len);
		  return (len);
		  /* *buf = data; */
		  /* *len = note->n_descsz; */
		}
	    }
	}
    }
  return (-1);
}
Example #2
0
bool
tmpfs_strname_neqlen(struct componentname *fcnp, struct componentname *tcnp)
{
	const size_t fln = roundup2(fcnp->cn_namelen, TMPFS_NAME_QUANTUM);
	const size_t tln = roundup2(tcnp->cn_namelen, TMPFS_NAME_QUANTUM);

	return (fln != tln) || memcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fln);
}
Example #3
0
static void
nv_add(struct nv *nv, const unsigned char *value, size_t vsize, int type,
    const char *name)
{
	static unsigned char align[7];
	struct nvhdr *nvh;
	size_t namesize;

	if (nv == NULL) {
		errno = ENOMEM;
		return;
	}

	NV_CHECK(nv);

	namesize = strlen(name) + 1;

	nvh = malloc(sizeof(*nvh) + roundup2(namesize, 8));
	if (nvh == NULL) {
		if (nv->nv_error == 0)
			nv->nv_error = ENOMEM;
		return;
	}
	nvh->nvh_type = NV_ORDER_HOST | type;
	nvh->nvh_namesize = (uint8_t)namesize;
	nvh->nvh_dsize = (uint32_t)vsize;
	bcopy(name, nvh->nvh_name, namesize);

	/* Add header first. */
	if (ebuf_add_tail(nv->nv_ebuf, nvh, NVH_HSIZE(nvh)) == -1) {
		PJDLOG_ASSERT(errno != 0);
		if (nv->nv_error == 0)
			nv->nv_error = errno;
		free(nvh);
		return;
	}
	free(nvh);
	/* Add the actual data. */
	if (ebuf_add_tail(nv->nv_ebuf, value, vsize) == -1) {
		PJDLOG_ASSERT(errno != 0);
		if (nv->nv_error == 0)
			nv->nv_error = errno;
		return;
	}
	/* Align the data (if needed). */
	vsize = roundup2(vsize, 8) - vsize;
	if (vsize == 0)
		return;
	PJDLOG_ASSERT(vsize > 0 && vsize <= sizeof(align));
	if (ebuf_add_tail(nv->nv_ebuf, align, vsize) == -1) {
		PJDLOG_ASSERT(errno != 0);
		if (nv->nv_error == 0)
			nv->nv_error = errno;
		return;
	}
}
Example #4
0
/*
 * Return a pointer to the first file handle in the packet.
 * If the packet was truncated, return 0.
 */
static const uint32_t *
parsereq(netdissect_options *ndo,
         const struct sunrpc_msg *rp, u_int length)
{
	const uint32_t *dp;
	u_int len, rounded_len;

	/*
	 * Find the start of the req data (if we captured it).
	 * First, get the length of the credentials, and make sure
	 * we have all of the opaque part of the credentials.
	 */
	dp = (const uint32_t *)&rp->rm_call.cb_cred;
	if (length < 2 * sizeof(*dp))
		goto trunc;
	ND_TCHECK_4(dp + 1);
	len = EXTRACT_BE_U_4(dp + 1);
	rounded_len = roundup2(len, 4);
	ND_TCHECK_LEN(dp + 2, rounded_len);
	if (2 * sizeof(*dp) + rounded_len <= length) {
		/*
		 * We have all of the credentials.  Skip past them; they
		 * consist of 4 bytes of flavor, 4 bytes of length,
		 * and len-rounded-up-to-a-multiple-of-4 bytes of
		 * data.
		 */
		dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp);
		length -= 2 * sizeof(*dp) + rounded_len;

		/*
		 * Now get the length of the verifier, and make sure
		 * we have all of the opaque part of the verifier.
		 */
		if (length < 2 * sizeof(*dp))
			goto trunc;
		ND_TCHECK_4(dp + 1);
		len = EXTRACT_BE_U_4(dp + 1);
		rounded_len = roundup2(len, 4);
		ND_TCHECK_LEN(dp + 2, rounded_len);
		if (2 * sizeof(*dp) + rounded_len < length) {
			/*
			 * We have all of the verifier.  Skip past it;
			 * it consists of 4 bytes of flavor, 4 bytes of
			 * length, and len-rounded-up-to-a-multiple-of-4
			 * bytes of data.
			 */
			dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp);
			return (dp);
		}
	}
trunc:
	return (NULL);
}
Example #5
0
File: tls.c Project: Hooman3/minix
int
_rtld_tls_offset_allocate(Obj_Entry *obj)
{
	size_t offset, next_offset;

	if (obj->tls_done)
		return 0;
	if (obj->tlssize == 0) {
		obj->tlsoffset = 0;
		obj->tls_done = 1;
		return 0;
	}

#ifdef __HAVE_TLS_VARIANT_I
	offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
	next_offset = offset + obj->tlssize;
#else
	offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
	    obj->tlsalign);
	next_offset = offset;
#endif

	/*
	 * Check if the static allocation was already done.
	 * This happens if dynamically loaded modules want to use
	 * static TLS space.
	 *
	 * XXX Keep an actual free list and callbacks for initialisation.
	 */
	if (_rtld_tls_static_space) {
		if (obj->tlsinitsize) {
			_rtld_error("%s: Use of initialized "
			    "Thread Local Storage with model initial-exec "
			    "and dlopen is not supported",
			    obj->path);
			return -1;
		}
		if (next_offset > _rtld_tls_static_space) {
			_rtld_error("%s: No space available "
			    "for static Thread Local Storage",
			    obj->path);
			return -1;
		}
	}
	obj->tlsoffset = offset;
	_rtld_tls_static_offset = next_offset;
	obj->tls_done = 1;

	return 0;
}
Example #6
0
static void
pci_vtblk_ring_init(struct pci_vtblk_softc *sc, uint64_t pfn)
{
	struct vring_hqueue *hq;

	sc->vbsc_pfn = pfn << VRING_PFN;
	
	/*
	 * Set up host pointers to the various parts of the
	 * queue
	 */
	hq = &sc->vbsc_q;
	hq->hq_size = VTBLK_RINGSZ;

	hq->hq_dtable = paddr_guest2host(pfn << VRING_PFN);
	hq->hq_avail_flags =  (uint16_t *)(hq->hq_dtable + hq->hq_size);
	hq->hq_avail_idx = hq->hq_avail_flags + 1;
	hq->hq_avail_ring = hq->hq_avail_flags + 2;
	hq->hq_used_flags = (uint16_t *)roundup2((uintptr_t)hq->hq_avail_ring,
						 VRING_ALIGN);
	hq->hq_used_idx = hq->hq_used_flags + 1;
	hq->hq_used_ring = (struct virtio_used *)(hq->hq_used_flags + 2);

	/*
	 * Initialize queue indexes
	 */
	hq->hq_cur_aidx = 0;
}
Example #7
0
struct tls_tcb *
_rtld_tls_allocate(void)
{
	struct tls_tcb *tcb;
	uint8_t *p;

	if (initial_thread_tcb == NULL) {
#ifdef __HAVE_TLS_VARIANT_II
		tls_size = roundup2(tls_size, sizeof(void *));
#endif
		tls_allocation = tls_size + sizeof(*tcb);

		initial_thread_tcb = p = mmap(NULL, tls_allocation,
		    PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
	} else {
		p = calloc(1, tls_allocation);
	}
	if (p == NULL) {
		static const char msg[] =  "TLS allocation failed, terminating\n";
		write(STDERR_FILENO, msg, sizeof(msg));
		_exit(127);
	}
#ifdef __HAVE_TLS_VARIANT_I
	/* LINTED */
	tcb = (struct tls_tcb *)p;
	p += sizeof(struct tls_tcb);
#else
	/* LINTED tls_size is rounded above */
	tcb = (struct tls_tcb *)(p + tls_size);
	tcb->tcb_self = tcb;
#endif
	memcpy(p, tls_initaddr, tls_initsize);

	return tcb;
}
Example #8
0
/*
 * Allocate and setup a management frame of the specified
 * size.  We return the mbuf and a pointer to the start
 * of the contiguous data area that's been reserved based
 * on the packet length.  The data area is forced to 32-bit
 * alignment and the buffer length to a multiple of 4 bytes.
 * This is done mainly so beacon frames (that require this)
 * can use this interface too.
 */
struct mbuf *
ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
{
	struct mbuf *m;
	u_int len;

	/*
	 * NB: we know the mbuf routines will align the data area
	 *     so we don't need to do anything special.
	 */
	len = roundup2(headroom + pktlen, 4);
	KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
	if (len < MINCLSIZE) {
		m = m_gethdr(M_NOWAIT, MT_DATA);
		/*
		 * Align the data in case additional headers are added.
		 * This should only happen when a WEP header is added
		 * which only happens for shared key authentication mgt
		 * frames which all fit in MHLEN.
		 */
		if (m != NULL)
			MH_ALIGN(m, len);
	} else {
		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
		if (m != NULL)
			MC_ALIGN(m, len);
	}
	if (m != NULL) {
		m->m_data += headroom;
		*frm = m->m_data;
	}
	return m;
}
Example #9
0
static void
set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
{
    u32 cb_color_info;
    int pitch, slice;
    RING_LOCALS;
    DRM_DEBUG("\n");

    h = roundup2(h, 8);
    if (h < 8)
        h = 8;

    cb_color_info = ((format << 2) | (1 << 27));
    pitch = (w / 8) - 1;
    slice = ((w * h) / 64) - 1;

    if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
            ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
        BEGIN_RING(21 + 2);
        OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
        OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
        OUT_RING(gpu_addr >> 8);
        OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
        OUT_RING(2 << 0);
    } else {
Example #10
0
int
efi_copy_init(void)
{
	EFI_STATUS	status;

	status = BS->AllocatePages(AllocateAnyPages, EfiLoaderData,
	    STAGE_PAGES, &staging);
	if (EFI_ERROR(status)) {
		printf("failed to allocate staging area: %lu\n",
		    EFI_ERROR_CODE(status));
		return (status);
	}
	staging_end = staging + STAGE_PAGES * EFI_PAGE_SIZE;

#if defined(__aarch64__) || defined(__arm__)
	/*
	 * Round the kernel load address to a 2MiB value. This is needed
	 * because the kernel builds a page table based on where it has
	 * been loaded in physical address space. As the kernel will use
	 * either a 1MiB or 2MiB page for this we need to make sure it
	 * is correctly aligned for both cases.
	 */
	staging = roundup2(staging, 2 * 1024 * 1024);
#endif

	return (0);
}
Example #11
0
void i915_gem_context_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t ctx_size;

	if (!HAS_HW_CONTEXTS(dev)) {
		dev_priv->hw_contexts_disabled = true;
		return;
	}

	/* If called from reset, or thaw... we've been here already */
	if (dev_priv->hw_contexts_disabled ||
	    dev_priv->ring[RCS].default_context)
		return;

	ctx_size = get_context_size(dev);
	dev_priv->hw_context_size = get_context_size(dev);
	dev_priv->hw_context_size = roundup2(dev_priv->hw_context_size, 4096);

	if (ctx_size <= 0 || ctx_size > (1<<20)) {
		dev_priv->hw_contexts_disabled = true;
		return;
	}

	if (create_default_context(dev_priv)) {
		dev_priv->hw_contexts_disabled = true;
		return;
	}

	DRM_DEBUG_DRIVER("HW context support initialized\n");
}
Example #12
0
int
workqueue_create(struct workqueue **wqp, const char *name,
    void (*callback_func)(struct work *, void *), void *callback_arg,
    pri_t prio, int ipl, int flags)
{
	struct workqueue *wq;
	struct workqueue_queue *q;
	void *ptr;
	int error = 0;

	CTASSERT(sizeof(work_impl_t) <= sizeof(struct work));

	ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP);
	wq = (void *)roundup2((uintptr_t)ptr, coherency_unit);
	wq->wq_ptr = ptr;
	wq->wq_flags = flags;

	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);

	if (flags & WQ_PERCPU) {
		struct cpu_info *ci;
		CPU_INFO_ITERATOR cii;

		/* create the work-queue for each CPU */
		for (CPU_INFO_FOREACH(cii, ci)) {
			q = workqueue_queue_lookup(wq, ci);
			error = workqueue_initqueue(wq, q, ipl, ci);
			if (error) {
				break;
			}
		}
	} else {
Example #13
0
int radeon_mode_dumb_create(struct drm_file *file_priv,
			    struct drm_device *dev,
			    struct drm_mode_create_dumb *args)
{
	struct radeon_device *rdev = dev->dev_private;
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
	args->size = args->pitch * args->height;
	args->size = roundup2(args->size, PAGE_SIZE);

	r = radeon_gem_object_create(rdev, args->size, 0,
				     RADEON_GEM_DOMAIN_VRAM,
				     false, ttm_bo_type_device,
				     &gobj);
	if (r)
		return -ENOMEM;

	handle = 0;
	r = drm_gem_handle_create(file_priv, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(gobj);
	if (r) {
		return r;
	}
	args->handle = handle;
	return 0;
}
Example #14
0
/*
 * This *must* be called first before any of the functions above!!!
 */
void
snd_unit_init(void)
{
	int i;

	if (snd_unit_initialized != 0)
		return;

	snd_unit_initialized = 1;

	if (getenv_int("hw.snd.maxunit", &i) != 0) {
		if (i < SND_UNIT_UMIN)
			i = SND_UNIT_UMIN;
		else if (i > SND_UNIT_UMAX)
			i = SND_UNIT_UMAX;
		else
			i = roundup2(i, 2);

		for (snd_u_shift = 0; (i >> (snd_u_shift + 1)) != 0;
		    snd_u_shift++)
			;

		/*
		 * Make room for channels/clones allocation unit
		 * to fit within 24bit MAXMINOR limit.
		 */
		snd_c_shift = 24 - snd_u_shift - snd_d_shift;
	}

	if (bootverbose != 0)
		printf("%s() u=0x%08x [%d] d=0x%08x [%d] c=0x%08x [%d]\n",
		    __func__, SND_U_MASK, snd_max_u() + 1,
		    SND_D_MASK, snd_max_d() + 1, SND_C_MASK, snd_max_c() + 1);
}
Example #15
0
/*
 * Initialize the currently-selected virtio queue (vs->vs_curq).
 * The guest just gave us a page frame number, from which we can
 * calculate the addresses of the queue.
 */
void
vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
{
	struct vqueue_info *vq;
	uint64_t phys;
	size_t size;
	char *base;

	vq = &vs->vs_queues[vs->vs_curq];
	vq->vq_pfn = pfn;
	phys = (uint64_t)pfn << VRING_PFN;
	size = vring_size(vq->vq_qsize);
	base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);

	/* First page(s) are descriptors... */
	vq->vq_desc = (struct virtio_desc *)base;
	base += vq->vq_qsize * sizeof(struct virtio_desc);

	/* ... immediately followed by "avail" ring (entirely uint16_t's) */
	vq->vq_avail = (struct vring_avail *)base;
	base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);

	/* Then it's rounded up to the next page... */
	base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);

	/* ... and the last page(s) are the used ring. */
	vq->vq_used = (struct vring_used *)base;

	/* Mark queue as allocated, and start at 0 when we use it. */
	vq->vq_flags = VQ_ALLOC;
	vq->vq_last_avail = 0;
}
/*
 * Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in
 * the TCB.  We allocate page pods in multiples of PPOD_CLUSTER_SIZE.  First we
 * try to allocate enough page pods to accommodate the whole buffer, subject to
 * the MAX_PPODS limit.  If that fails we try to allocate PPOD_CLUSTER_SIZE page
 * pods before failing entirely.
 */
static int
alloc_buf1_ppods(struct toepcb *toep, struct ddp_state *p,
			    unsigned long addr, unsigned int len)
{
	int err, tag, npages, nppods;
	struct tom_data *d = TOM_DATA(toep->tp_toedev);

#if 0
	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
#endif	
	npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
	nppods = min(pages2ppods(npages), MAX_PPODS);
	nppods = roundup2(nppods, PPOD_CLUSTER_SIZE);
	err = t3_alloc_ppods(d, nppods, &tag);
	if (err && nppods > PPOD_CLUSTER_SIZE) {
		nppods = PPOD_CLUSTER_SIZE;
		err = t3_alloc_ppods(d, nppods, &tag);
	}
	if (err)
		return (ENOMEM);

	p->ubuf_nppods = nppods;
	p->ubuf_tag = tag;
#if NUM_DDP_KBUF == 1
	t3_set_ddp_tag(toep, 1, tag << 6);
#endif
	return (0);
}
Example #17
0
static struct mbuf *
finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp)
{
	struct icl_pdu *ip = &icp->ip;
	uint8_t ulp_submode, padding;
	struct mbuf *m, *last;
	struct iscsi_bhs *bhs;

	/*
	 * Fix up the data segment mbuf first.
	 */
	m = ip->ip_data_mbuf;
	ulp_submode = icc->ulp_submode;
	if (m) {
		last = m_last(m);

		/*
		 * Round up the data segment to a 4B boundary.  Pad with 0 if
		 * necessary.  There will definitely be room in the mbuf.
		 */
		padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len;
		if (padding) {
			bzero(mtod(last, uint8_t *) + last->m_len, padding);
			last->m_len += padding;
		}
	} else {
Example #18
0
File: tls.c Project: Hooman3/minix
void
_rtld_tls_initial_allocation(void)
{
	struct tls_tcb *tcb;

	_rtld_tls_static_space = _rtld_tls_static_offset +
	    RTLD_STATIC_TLS_RESERVATION;

#ifndef __HAVE_TLS_VARIANT_I
	_rtld_tls_static_space = roundup2(_rtld_tls_static_space,
	    sizeof(void *));
#endif
	dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));

	tcb = _rtld_tls_allocate_locked();
#ifdef __HAVE___LWP_SETTCB
	__lwp_settcb(tcb);
#ifdef __powerpc__
	/*
	 * Save the tcb pointer so that libc can retrieve it.  Older
	 * crt0 will obliterate r2 so there is code in libc to restore it.
	 */
	_lwp_setprivate(tcb);
#endif
#else
	_lwp_setprivate(tcb);
#endif
}
Example #19
0
void
tmpfs_strname_free(struct tmpfs_mount *mp, char *str, size_t len)
{
	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);

	KASSERT(sz > 0 && sz <= 1024);
	tmpfs_mem_decr(mp, sz);
	kmem_free(str, sz);
}
Example #20
0
/*
 * Emit one note section to "dst", or just size it if "dst" is NULL.
 */
static void
elf_putnote(void *dst, size_t *off, const char *name, int type,
    const void *desc, size_t descsz)
{
	Elf_Note note;

	note.n_namesz = strlen(name) + 1;
	note.n_descsz = descsz;
	note.n_type = type;
	if (dst != NULL)
		bcopy(&note, (char *)dst + *off, sizeof note);
	*off += sizeof note;
	if (dst != NULL)
		bcopy(name, (char *)dst + *off, note.n_namesz);
	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
	if (dst != NULL)
		bcopy(desc, (char *)dst + *off, note.n_descsz);
	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
}
Example #21
0
struct pcb *
get_pcb_td(struct thread *td)
{
	vm_offset_t p;

	p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
	    roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
	    sizeof(struct pcb);
	return ((struct pcb *)p);
}
Example #22
0
struct savefpu *
get_pcb_user_save_td(struct thread *td)
{
	vm_offset_t p;

	p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
	    roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
	KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
	return ((struct savefpu *)p);
}
Example #23
0
size_t
high_bios_size(void)
{
	size_t size = 0;

	if (ovmf_file_name)
		size = ovmf_image_size();

	return roundup2(size, 2 * MB);
}
Example #24
0
char *
tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len)
{
	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);

	if (sz == 0 || sz > 1024 || !tmpfs_mem_incr(mp, sz))
		return NULL;

	return malloc(sz, M_TEMP, M_WAITOK); /* XXX */
}
Example #25
0
static int
getint(void **ptr)
{
	int *p = *ptr;
	int rv;

	p = (int *)roundup2((intptr_t)p, sizeof(int));
	rv = *p++;
	*ptr = p;
	return rv;
}
Example #26
0
char *
tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len)
{
	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);

	KASSERT(sz > 0 && sz <= 1024);
	if (!tmpfs_mem_incr(mp, sz)) {
		return NULL;
	}
	return kmem_alloc(sz, KM_SLEEP);
}
Example #27
0
char *
tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len)
{
	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);

	KASSERT(sz > 0 && sz <= 1024);
	if (!tmpfs_mem_incr(mp, sz)) {
		return NULL;
	}
	return malloc(sz, M_TEMP, M_WAITOK); /* XXX */
}
Example #28
0
void
npr_mempool_clear(struct npr_mempool *p)
{
    if (p->entry_index == 0 && p->large_index == 0) {
        p->entry_byte_pos = 0;
        return;
    }

    npr_mempool_fini(p);
    npr_mempool_init(p, roundup2(p->alloc_small));
}
Example #29
0
static int 
elf_getnote(void *src, size_t *off, const char *name, unsigned int type,
	    void **desc, size_t descsz) 
{
	Elf_Note note;
	int error;

	TRACE_ENTER;
	if (src == NULL) {
		error = EFAULT;
		goto done;
	}
	bcopy((char *)src + *off, &note, sizeof note);
	
	PRINTF(("at offset: %zd expected note of type: %d - got: %d\n",
	       *off, type, note.n_type));
	*off += sizeof note;
	if (type != note.n_type) {
		TRACE_ERR;
		error = EINVAL;
		goto done;
	}
	if (strncmp(name, (char *) src + *off, note.n_namesz) != 0) {
		error = EINVAL;
		goto done;
	}
	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
	if (note.n_descsz != descsz) {
		TRACE_ERR;
		error = EINVAL;
		goto done;
	}
	if (desc)
	        bcopy((char *)src + *off, *desc, note.n_descsz);
	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
	error = 0;
 done:
	TRACE_EXIT;
	return error;
}
Example #30
0
int
at91_usart_bus_probe(struct uart_softc *sc)
{
	int value;

	value = USART_DEFAULT_FIFO_BYTES;
	resource_int_value(device_get_name(sc->sc_dev), 
	    device_get_unit(sc->sc_dev), "fifo_bytes", &value);
	value = roundup2(value, arm_dcache_align);
	sc->sc_txfifosz = value;
	sc->sc_rxfifosz = value;
	sc->sc_hwiflow = 0;
	return (0);
}