static vaddr_t boot_spd_end(struct cobj_header *h)
{
	struct cobj_sect *sect;
	int max_sect;

	max_sect = h->nsect-1;
	sect = cobj_sect_get(h, max_sect);
	
	return sect->vaddr + round_up_to_page(sect->bytes);
}
示例#2
0
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
{
	unsigned int i, n_pages;

	/* round up to page size */
	n_bytes = round_up_to_page(n_bytes);

	n_pages = n_bytes / PAGE_SIZE;

	dma->kvirt = vmalloc_32(n_pages * PAGE_SIZE);
	if (!dma->kvirt) {
		printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
		goto err;
	}

	dma->n_pages = n_pages;

	/* Clear the ram out, no junk to the user */
	memset(dma->kvirt, 0, n_pages * PAGE_SIZE);

	/* allocate scatter/gather list */
	dma->sglist = kmalloc(dma->n_pages * sizeof(struct scatterlist), GFP_KERNEL);
	if (!dma->sglist) {
		printk(KERN_ERR "dma_region_alloc: kmalloc(sglist) failed\n");
		goto err;
	}

	/* just to be safe - this will become unnecessary once sglist->address goes away */
	memset(dma->sglist, 0, dma->n_pages * sizeof(struct scatterlist));

	/* fill scatter/gather list with pages */
	for (i = 0; i < dma->n_pages; i++) {
		unsigned long va = (unsigned long) dma->kvirt + i * PAGE_SIZE;
			
		dma->sglist[i].page = vmalloc_to_page((void *)va);
		dma->sglist[i].length = PAGE_SIZE;
	}

	/* map sglist to the IOMMU */
	dma->n_dma_pages = pci_map_sg(dev, &dma->sglist[0], dma->n_pages, direction);

	if (dma->n_dma_pages == 0) {
		printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
		goto err;
	}

	dma->dev = dev;
	dma->direction = direction;

	return 0;

err:
	dma_region_free(dma);
	return -ENOMEM;
}
示例#3
0
int  dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
{
	/* round up to page size */
	n_bytes = round_up_to_page(n_bytes);

	prog->n_pages = n_bytes / PAGE_SIZE;

	prog->kvirt = pci_alloc_consistent(dev, prog->n_pages * PAGE_SIZE, &prog->bus_addr);
	if (!prog->kvirt) {
		printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
		dma_prog_region_free(prog);
		return -ENOMEM;
	}

	prog->dev = dev;

	return 0;
}
示例#4
0
/* target_size is an absolute size */
void
cbuf_mempool_resize(spdid_t spdid, unsigned long target_size)
{
	struct cbuf_comp_info *cci;
	int diff;

	CBUF_TAKE();
	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	target_size = round_up_to_page(target_size);
	diff = (int)(target_size - cci->target_size);
	cci->target_size = target_size;
	if (diff < 0 && cci->allocated_size > cci->target_size) {
		cbuf_shrink(cci, cci->allocated_size - cci->target_size);
	}
	if (diff > 0) cbuf_expand(cci, diff);
done:
	CBUF_RELEASE();
}
示例#5
0
void 
make_spd_llboot(struct service_symbs *boot, struct service_symbs *all)
{
	volatile int **heap_ptr;
	int *heap_ptr_val, n = 0;
	struct cobj_header *h;
	char *mem;
	u32_t obj_size;
	struct cos_component_information *ci;
	struct service_symbs *first = all;

	if (service_get_spdid(boot) != LLBOOT_COMPN) {
		printf("Low-Level Booter component must be component number %d, but is %d instead.\n"
		       "\tSuggested fix: Your first four components should be e.g. "
		       "c0.o, ;llboot.o, ;*fprr.o, ;mm.o, ;boot.o, ;\n", 
		       LLBOOT_COMPN, service_get_spdid(boot));
		exit(-1);
	}

	/* Setup the capabilities for each of the booter-loaded
	 * components */
	all = first;
	for (all = first ; NULL != all ; all = all->next) {
		if (!is_booter_loaded(all)) continue;

		if (make_cobj_caps(all, all->cobj)) {
			printl(PRINT_HIGH, "Could not create capabilities in cobj for %s\n", all->obj);
			exit(-1);
		}
	}

	heap_ptr = (volatile int **)get_heap_ptr(boot);
	ci = (void *)get_symb_address(&boot->exported, COMP_INFO);
	ci->cos_poly[0] = (vaddr_t)*heap_ptr;

	for (all = first ; NULL != all ; all = all->next) {
		vaddr_t map_addr;
		int map_sz;

		if (!is_booter_loaded(all) || is_hl_booter_loaded(all)) continue;
		n++;

		heap_ptr_val = (int*)*heap_ptr;
		assert(is_booter_loaded(all));
		h = all->cobj;
		assert(h);

		obj_size = round_up_to_cacheline(h->size);
		map_addr = round_up_to_page(heap_ptr_val);
		map_sz = (int)obj_size - (int)(map_addr-(vaddr_t)heap_ptr_val);
		if (map_sz > 0) {
			mem = mmap((void*)map_addr, map_sz, PROT_WRITE | PROT_READ,
				   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
			if (MAP_FAILED == mem) {
				perror("Couldn't map test component into the boot component");
				exit(-1);
			}
		}
		printl(PRINT_HIGH, "boot component: placing %s:%d @ %p, copied from %p:%d\n", 
		       all->obj, service_get_spdid(all), heap_ptr_val, h, obj_size);
		memcpy(heap_ptr_val, h, h->size);
		*heap_ptr = (void*)(((int)heap_ptr_val) + obj_size);
	}
	all = first;
	*heap_ptr = (int*)(round_up_to_page((int)*heap_ptr));
	ci->cos_poly[1] = (vaddr_t)n;

	ci->cos_poly[2] = ((unsigned int)*heap_ptr);
	make_spd_mpd_mgr(boot, all);

	ci->cos_poly[3] = ((unsigned int)*heap_ptr);
	make_spd_config_comp(boot, all);
	llboot_mem = (unsigned int)*heap_ptr - boot->lower_addr;
	boot->heap_top = (unsigned int)*heap_ptr; /* ensure that we copy all of the meta-data as well */
}
示例#6
0
void 
make_spd_boot(struct service_symbs *boot, struct service_symbs *all)
{
	int n = 0, cnt = 0, tot_sz = 0;
	unsigned int off = 0, i;
	struct cobj_header *h, *new_h;
	char *new_end, *new_sect_start;
	u32_t new_vaddr_start;
	u32_t all_obj_sz;
	struct cos_component_information *ci;
	struct service_symbs *first = all;
	/* array to hold the order of initialization/schedule */
	struct service_symbs **schedule; 

	if (service_get_spdid(boot) != LLBOOT_BOOT) {
		printf("Booter component must be component number %d, is %d.\n"
		       "\tSuggested fix: Your first four components should be e.g. "
		       "c0.o, ;llboot.o, ;*fprr.o, ;mm.o, ;print.o, ;boot.o, ;\n", LLBOOT_BOOT, service_get_spdid(boot));
		exit(-1);
	}

	/* should be loaded by llboot */
	assert(is_booter_loaded(boot) && !is_hl_booter_loaded(boot)); 
	assert(boot->cobj->nsect == MAXSEC_S); /* extra section for other components */
	/* Assign ids to the booter-loaded components. */
	for (all = first ; NULL != all ; all = all->next) {
		if (!is_hl_booter_loaded(all)) continue;

		h = all->cobj;
		assert(h);
		cnt++;
		tot_sz += h->size;
	}

	schedule = malloc(sizeof(struct service_symbs *) * cnt);
	assert(schedule);
	printl(PRINT_HIGH, "Loaded component's initialization scheduled in the following order:\n");
	for (all = first ; NULL != all ; all = all->next) {
		make_spd_boot_schedule(all, schedule, &off);
	}

	/* Setup the capabilities for each of the booter-loaded
	 * components */
	all = first;
	for (all = first ; NULL != all ; all = all->next) {
		if (!is_hl_booter_loaded(all)) continue;

		if (make_cobj_caps(all, all->cobj)) {
			printl(PRINT_HIGH, "Could not create capabilities in cobj for %s\n", all->obj);
			exit(-1);
		}
	}

	all_obj_sz = 0;
	/* Find the cobj's size */
	for (all = first ; NULL != all ; all = all->next) {
		struct cobj_header *h;

		if (!is_hl_booter_loaded(all)) continue;
		printl(PRINT_HIGH, "booter found %s:%d with len %d\n", 
		       all->obj, service_get_spdid(all), all->cobj->size)
		n++;

		assert(is_hl_booter_loaded(all));
		h = all->cobj;
		assert(h);
		all_obj_sz += round_up_to_cacheline(h->size);
	}
	all_obj_sz  = all_obj_sz;
	all_obj_sz += 3 * PAGE_SIZE; // schedule, config info, and edge info
	h           = boot->cobj;
	assert(h->nsect == MAXSEC_S);
	new_h       = malloc(h->size + all_obj_sz);
	assert(new_h);
	memcpy(new_h, h, h->size);

	/* Initialize the new section */
	{
		struct cobj_sect *s_prev;

		new_h->size += all_obj_sz;

		s_prev = cobj_sect_get(new_h, INITFILE_S);

		cobj_sect_init(new_h, INITFILE_S, cos_sect_get(INITFILE_S)->cobj_flags, 
			       round_up_to_page(s_prev->vaddr + s_prev->bytes), 
			       all_obj_sz);
	}
	new_sect_start  = new_end = cobj_sect_contents(new_h, INITFILE_S);
	new_vaddr_start = cobj_sect_get(new_h, INITFILE_S)->vaddr;

	ci = (void *)cobj_vaddr_get(new_h, (u32_t)get_symb_address(&boot->exported, COMP_INFO));
	assert(ci);
	ci->cos_poly[0] = ADDR2VADDR(new_sect_start);

	/* copy the cobjs */
	for (all = first ; NULL != all ; all = all->next) {
		struct cobj_header *h;

		if (!is_hl_booter_loaded(all)) continue;
		h = all->cobj;
		assert(h);
		memcpy(new_end, h, h->size);
		new_end += round_up_to_cacheline(h->size);
 	}
	assert((u32_t)(new_end - new_sect_start) + 3*PAGE_SIZE == 
	       cobj_sect_get(new_h, INITFILE_S)->bytes);
	
	all = first;
	ci->cos_poly[1] = (vaddr_t)n;

	ci->cos_poly[2] = ADDR2VADDR(new_end);
	serialize_spd_graph((struct comp_graph*)new_end, PAGE_SIZE/sizeof(struct comp_graph), all);

	new_end += PAGE_SIZE;
	ci->cos_poly[3] = ADDR2VADDR(new_end);
	format_config_info(all, (struct component_init_str*)new_end);

	assert(off < PAGE_SIZE/sizeof(unsigned int)); /* schedule must fit into page. */
	new_end += PAGE_SIZE;
	ci->cos_poly[4] = ADDR2VADDR(new_end);
	for (i = 0 ; i < off ; i++) {
		((int *)new_end)[i] = service_get_spdid(schedule[i]);
	}
	((int *)new_end)[off] = 0;

	new_end += PAGE_SIZE;
	ci->cos_heap_ptr = round_up_to_page(ADDR2VADDR(new_end));

	boot->cobj = new_h;

	printl(PRINT_HIGH, "boot component %s:%d has new section @ %x:%x at address %x, \n\t"
	       "with n %d, graph @ %x, config info @ %x, schedule %x, and heap %x\n",
	       boot->obj, service_get_spdid(boot), (unsigned int)cobj_sect_get(new_h, 3)->vaddr, 
	       (int)cobj_sect_get(new_h, 3)->bytes, (unsigned int)ci->cos_poly[0], (unsigned int)ci->cos_poly[1], 
	       (unsigned int)ci->cos_poly[2], (unsigned int)ci->cos_poly[3], (unsigned int)ci->cos_poly[4], (unsigned int)ci->cos_heap_ptr);
}
示例#7
0
文件: iso.c 项目: sarnobat/knoppix
static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
					     unsigned int data_buf_size,
					     unsigned int buf_packets,
					     int channel,
					     int dma_mode,
					     int irq_interval,
					     void (*callback)(struct hpsb_iso*))
{
	struct hpsb_iso *iso;
	int dma_direction;

	/* make sure driver supports the ISO API */
	if (!host->driver->isoctl) {
		printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n",
		       host->driver->name);
		return NULL;
	}

	/* sanitize parameters */

	if (buf_packets < 2)
		buf_packets = 2;

	if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
		dma_mode=HPSB_ISO_DMA_DEFAULT;

	if (irq_interval == 0)     /* really interrupt for each packet*/
		irq_interval = 1;
	else if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
 		irq_interval = buf_packets / 4;

	if (channel < -1 || channel >= 64)
		return NULL;

	/* channel = -1 is OK for multi-channel recv but not for xmit */
	if (type == HPSB_ISO_XMIT && channel < 0)
		return NULL;

	/* allocate and write the struct hpsb_iso */

	iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL);
	if (!iso)
		return NULL;

	iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);

	iso->type = type;
	iso->host = host;
	iso->hostdata = NULL;
	iso->callback = callback;
	init_waitqueue_head(&iso->waitq);
	iso->channel = channel;
	iso->irq_interval = irq_interval;
	iso->dma_mode = dma_mode;
	dma_region_init(&iso->data_buf);
	iso->buf_size = round_up_to_page(data_buf_size);
	iso->buf_packets = buf_packets;
	iso->pkt_dma = 0;
	iso->first_packet = 0;
	spin_lock_init(&iso->lock);

	if (iso->type == HPSB_ISO_XMIT) {
		iso->n_ready_packets = iso->buf_packets;
		dma_direction = PCI_DMA_TODEVICE;
	} else {
		iso->n_ready_packets = 0;
		dma_direction = PCI_DMA_FROMDEVICE;
	}

	atomic_set(&iso->overflows, 0);
	iso->flags = 0;
	iso->prebuffer = 0;

	/* allocate the packet buffer */
	if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
		goto err;

	return iso;

err:
	hpsb_iso_shutdown(iso);
	return NULL;
}
示例#8
0
/*
 * For a certain principal, collect any unreferenced and not_in 
 * free list cbufs so that they can be reused.  This is the 
 * garbage-collection mechanism.
 *
 * Collect cbufs and add them onto the shared component's ring buffer.
 *
 * This function is semantically complicated. It can return no cbufs 
 * even if they are available to force the pool of cbufs to be
 * expanded (the client will call cbuf_create in this case). 
 * Or, the common case: it can return a number of available cbufs.
 */
int
cbuf_collect(spdid_t spdid, unsigned long size)
{
	struct cbuf_info *cbi;
	struct cbuf_comp_info *cci;
	struct cbuf_shared_page *csp;
	struct cbuf_bin *bin;
	int ret = 0;

	printl("cbuf_collect\n");

	CBUF_TAKE();
	cci  = cbuf_comp_info_get(spdid);
	tracking_start(&cci->track, CBUF_COLLECT);
	if (unlikely(!cci)) ERR_THROW(-ENOMEM, done);
	if (size + cci->allocated_size <= cci->target_size) goto done;

	csp  = cci->csp;
	if (unlikely(!csp)) ERR_THROW(-EINVAL, done);

	assert(csp->ring.size == CSP_BUFFER_SIZE);
	ret = CK_RING_SIZE(cbuf_ring, &csp->ring);
	if (ret != 0) goto done;
	/* 
	 * Go through all cbufs we own, and report all of them that
	 * have no current references to them.  Unfortunately, this is
	 * O(N*M), N = min(num cbufs, PAGE_SIZE/sizeof(int)), and M =
	 * num components.
	 */
	size = round_up_to_page(size);
	bin  = cbuf_comp_info_bin_get(cci, size);
	if (!bin) ERR_THROW(0, done);
	cbi  = bin->c;
	do {
		if (!cbi) break;
		/*
		 * skip cbufs which are in freelist. Coordinates with cbuf_free to 
		 * detect such cbufs correctly. 
		 * We must check refcnt first and then next pointer.
		 *
		 * If do not check refcnt: the manager may check "next" before cbuf_free 
		 * (when it is NULL), then switch to client who calls cbuf_free to set 
		 * "next", decrease refcnt and add cbuf to freelist. Then switch back to 
		 * manager, but now it will collect this in-freelist cbuf.
		 * 
		 * Furthermore we must check refcnt before the "next" pointer: 
		 * If not, similar to above case, the manager maybe preempted by client 
		 * between the manager checks "next" and refcnt. Therefore the manager 
		 * finds the "next" is null and refcnt is 0, and collect this cbuf.
		 * Short-circuit can prevent reordering. 
		 */
		assert(cbi->owner.m);
		if (!CBUF_REFCNT(cbi->owner.m) && !CBUF_IS_IN_FREELIST(cbi->owner.m)
                 		    && !cbuf_referenced(cbi)) {
			struct cbuf_ring_element el = { .cbid = cbi->cbid };
			cbuf_references_clear(cbi);
			if (!CK_RING_ENQUEUE_SPSC(cbuf_ring, &csp->ring, &el)) break;
			/*
			 * Prevent other collection collecting those cbufs.
			 * The manager checks if the shared ring buffer is empty upon 
			 * the entry, if not, it just returns. This is not enough to 
			 * prevent double-collection. The corner case is: 
			 * after the last one in ring buffer is dequeued and 
			 * before it is added to the free-list, the manager  
			 * appears. It may collect the last one again.
			 */
			cbi->owner.m->next = (struct cbuf_meta *)1;
			if (++ret == CSP_BUFFER_SIZE) break;
		}
		cbi = FIRST_LIST(cbi, next, prev);
	} while (cbi != bin->c);
	if (ret) cbuf_thd_wake_up(cci, ret*size);

done:
	tracking_end(&cci->track, CBUF_COLLECT);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf_deref.
 */
int
cbuf_delete(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	int ret = -EINVAL, sz;

	printl("cbuf_delete\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_DEL);

	cci  = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cbi  = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) goto done;
	meta = cbuf_meta_lookup(cci, cbid);

	/*
	 * Other threads can access the meta data simultaneously. For
	 * example, others call cbuf2buf which increase the refcnt.
	 */
	CBUF_REFCNT_ATOMIC_DEC(meta);
	/* Find the owner of this cbuf */
	if (cbi->owner.spdid != spdid) {
		cci = cbuf_comp_info_get(cbi->owner.spdid);
		if (unlikely(!cci)) goto done;
	}
	if (cbuf_free_unmap(cci, cbi)) 	goto done;
	if (cci->allocated_size < cci->target_size) {
		cbuf_thd_wake_up(cci, cci->target_size - cci->allocated_size);
	}
	ret = 0;
done:
	tracking_end(NULL, CBUF_DEL);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf2buf to retrieve a given cbid.
 */
int
cbuf_retrieve(spdid_t spdid, unsigned int cbid, unsigned long size)
{
	struct cbuf_comp_info *cci, *own;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta, *own_meta;
	struct cbuf_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -EINVAL, off;

	printl("cbuf_retrieve\n");

	CBUF_TAKE();
	tracking_start(NULL, CBUF_RETRV);

	cci        = cbuf_comp_info_get(spdid);
	if (!cci) {printd("no cci\n"); goto done; }
	cbi        = cmap_lookup(&cbufs, cbid);
	if (!cbi) {printd("no cbi\n"); goto done; }
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) {
		printd("owner\n"); 
		goto done;
	}
	meta       = cbuf_meta_lookup(cci, cbid);
	if (!meta) {printd("no meta\n"); goto done; }
	assert(!(meta->nfo & ~CBUF_INCONSISENT));

	map        = malloc(sizeof(struct cbuf_maps));
	if (!map) {printd("no map\n"); ERR_THROW(-ENOMEM, done); }
	if (size > cbi->size) {printd("too big\n"); goto done; }
	assert(round_to_page(cbi->size) == cbi->size);
	size       = cbi->size;
	/* TODO: change to MAPPING_READ */
	if (cbuf_alloc_map(spdid, &map->addr, NULL, cbi->mem, size, MAPPING_RW)) {
		printc("cbuf mgr map fail spd %d mem %p sz %lu cbid %u\n", spdid, cbi->mem, size, cbid);
		goto free;
	}

	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);
	CBUF_PTR_SET(meta, map->addr);
	map->spdid          = spdid;
	map->m              = meta;
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = cbid;
	own                 = cbuf_comp_info_get(cbi->owner.spdid);
	if (unlikely(!own)) goto done;
	/*
	 * We need to inherit the relinquish bit from the sender. 
	 * Otherwise, this cbuf cannot be returned to the manager. 
	 */
	own_meta            = cbuf_meta_lookup(own, cbid);
	if (CBUF_RELINQ(own_meta)) CBUF_FLAG_ADD(meta, CBUF_RELINQ);
	ret                 = 0;
done:
	tracking_end(NULL, CBUF_RETRV);

	CBUF_RELEASE();
	return ret;
free:
	free(map);
	goto done;
}

vaddr_t
cbuf_register(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info  *cci;
	struct cbuf_meta_range *cmr;
	void *p;
	vaddr_t dest, ret = 0;

	printl("cbuf_register\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_REG);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cmr = cbuf_meta_lookup_cmr(cci, cbid);
	if (cmr) ERR_THROW(cmr->dest, done);

	/* Create the mapping into the client */
	if (cbuf_alloc_map(spdid, &dest, &p, NULL, PAGE_SIZE, MAPPING_RW)) goto done;
	assert((unsigned int)p == round_to_page(p));
	cmr = cbuf_meta_add(cci, cbid, p, dest);
	assert(cmr);
	ret = cmr->dest;
done:
	tracking_end(NULL, CBUF_REG);

	CBUF_RELEASE();
	return ret;
}

static void
cbuf_shrink(struct cbuf_comp_info *cci, int diff)
{
	int i, sz;
	struct cbuf_bin *bin;
	struct cbuf_info *cbi, *next, *head;

	for (i = cci->nbin-1 ; i >= 0 ; i--) {
		bin = &cci->cbufs[i];
		sz = (int)bin->size;
		if (!bin->c) continue;
		cbi = FIRST_LIST(bin->c, next, prev);
		while (cbi != bin->c) {
			next = FIRST_LIST(cbi, next, prev);
			if (!cbuf_free_unmap(cci, cbi)) {
				diff -= sz;
				if (diff <= 0) return;
			}
			cbi = next;
		}
		if (!cbuf_free_unmap(cci, cbi)) {
			diff -= sz;
			if (diff <= 0) return;
		}
	}
	if (diff > 0) cbuf_mark_relinquish_all(cci);
}
示例#9
0
int
cbuf_create(spdid_t spdid, unsigned long size, int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	struct cbuf_bin *bin;
	int ret = 0;
	unsigned int id = (unsigned int)cbid;

	printl("cbuf_create\n");
	if (unlikely(cbid < 0)) return 0;
	CBUF_TAKE();
	tracking_start(NULL, CBUF_CRT);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;

	/* 
	 * Client wants to allocate a new cbuf, but the meta might not
	 * be mapped in.
	 */
	if (!cbid) {
		/* TODO: check if have enough free memory: ask mem manager */
		/*memory usage exceeds the target, block this thread*/
		if (size + cci->allocated_size > cci->target_size) {
			cbuf_shrink(cci, size);
			if (size + cci->allocated_size > cci->target_size) {
				cbuf_thread_block(cci, size);
				return 0;
			}
		}

 		cbi = malloc(sizeof(struct cbuf_info));

		if (unlikely(!cbi)) goto done;
		/* Allocate and map in the cbuf. Discard inconsistent cbufs */
		/* TODO: Find a better way to manage those inconsistent cbufs */
		do {
			id   = cmap_add(&cbufs, cbi);
			meta = cbuf_meta_lookup(cci, id);
		} while(meta && CBUF_INCONSISENT(meta));

		cbi->cbid        = id;
		size             = round_up_to_page(size);
		cbi->size        = size;
		cbi->owner.m     = NULL;
		cbi->owner.spdid = spdid;
		INIT_LIST(&cbi->owner, next, prev);
		INIT_LIST(cbi, next, prev);
		if (cbuf_alloc_map(spdid, &(cbi->owner.addr), 
				   (void**)&(cbi->mem), NULL, size, MAPPING_RW)) {
			goto free;
		}
	} 
	/* If the client has a cbid, then make sure we agree! */
	else {
		cbi = cmap_lookup(&cbufs, id);
		if (unlikely(!cbi)) goto done;
		if (unlikely(cbi->owner.spdid != spdid)) goto done;
	}
	meta = cbuf_meta_lookup(cci, id);

	/* We need to map in the meta for this cbid.  Tell the client. */
	if (!meta) {
		ret = (int)id * -1;
		goto done;
	}
	
	/* 
	 * Now we know we have a cbid, a backing structure for it, a
	 * component structure, and the meta mapped in for the cbuf.
	 * Update the meta with the correct addresses and flags!
	 */
	memset(meta, 0, sizeof(struct cbuf_meta));
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = id;
	CBUF_FLAG_ADD(meta, CBUF_OWNER);
	CBUF_PTR_SET(meta, cbi->owner.addr);
	CBUF_REFCNT_INC(meta);

	/*
	 * When creates a new cbuf, the manager should be the only
	 * one who can access the meta
	 */
	/* TODO: malicious client may trigger this assertion, just for debug */
	assert(CBUF_REFCNT(meta) == 1);
	assert(CBUF_PTR(meta));
	cbi->owner.m = meta;

	/*
	 * Install cbi last. If not, after return a negative cbid, 
	 * collection may happen and get a dangle cbi
	 */
	bin = cbuf_comp_info_bin_get(cci, size);
	if (!bin) bin = cbuf_comp_info_bin_add(cci, size);
	if (unlikely(!bin)) goto free;
	if (bin->c) ADD_LIST(bin->c, cbi, next, prev);
	else        bin->c   = cbi;
	cci->allocated_size += size;
	ret = (int)id;
done:
	tracking_end(NULL, CBUF_CRT);
	CBUF_RELEASE();

	return ret;
free:
	cmap_del(&cbufs, id);
	free(cbi);
	goto done;
}
示例#10
0
文件: elf.c 项目: cheako/atratus
int elf_object_map(struct process *proc, struct elf_module *m)
{
	int i;
	int r;

	dprintf("to load (%d)\n", m->num_to_load);
	dprintf("%-8s %-8s %-8s %-8s\n", "vaddr", "memsz", "offset", "filesz");
	for (i = 0; i < m->num_to_load; i++)
	{
		m->min_vaddr = MIN(round_down_to_page(m->to_load[i].p_vaddr), m->min_vaddr);
		m->max_vaddr = MAX(round_up_to_page(m->to_load[i].p_vaddr + m->to_load[i].p_memsz), m->max_vaddr);

		dprintf("%08x %08x %08x %08x\n",
			m->to_load[i].p_vaddr, m->to_load[i].p_memsz,
			m->to_load[i].p_offset, m->to_load[i].p_filesz);
	}

	dprintf("vaddr -> %08x-%08x\n", m->min_vaddr, m->max_vaddr);

	/* reserve memory for image */
	m->base = vm_process_map(proc, m->min_vaddr, m->max_vaddr - m->min_vaddr,
			_l_PROT_NONE, _l_MAP_ANONYMOUS|_l_MAP_PRIVATE, NULL, 0);
	if (m->base == _l_MAP_FAILED)
	{
		dprintf("mmap failed\n");
		goto error;
	}
	dprintf("base = %08x\n", m->base);

	for (i = 0; i < m->num_to_load; i++)
	{
		int mapflags = elf_mmap_flags_get(m->to_load[i].p_flags);
		user_ptr_t p;
		unsigned int vaddr = round_down_to_page(m->to_load[i].p_vaddr);
		unsigned int vaddr_offset = (m->to_load[i].p_vaddr & pagemask);
		unsigned int memsz = round_up_to_page(vaddr_offset + m->to_load[i].p_memsz);
		unsigned int max_addr;
		void *ptr;
		size_t max_sz = 0;

		elf_map_flags_print(mapflags);

		p = m->base - m->min_vaddr + vaddr;

		dprintf("map at %08x, offset %08x sz %08x\n", p, vaddr, memsz);
		/*
		 * Map anonymous memory then read the data in
		 * rather than mapping the file directly.
		 *
		 * The windows page granularity is different to that on Linux.
		 * The pages may need to be modified to apply relocations.
		 *
		 * nb. need MAP_FIXED to blow away our old mapping
		 */
		p = vm_process_map(proc, p, memsz,
			_l_PROT_READ | _l_PROT_WRITE | _l_PROT_EXEC,
			_l_MAP_FIXED|_l_MAP_PRIVATE|_l_MAP_ANONYMOUS, NULL, 0);
		if (p == _l_MAP_FAILED)
		{
			fprintf(stderr, "mmap failed (%d)\n", -(int)p);
			goto error;
		}

		p = m->base - m->min_vaddr + m->to_load[i].p_vaddr;
		dprintf("pread %08x bytes from %08x to %08x\n",
			m->to_load[i].p_filesz, m->to_load[i].p_offset, p);

		r = vm_get_pointer(proc, p, &ptr, &max_sz);
		if (r < 0)
			goto error;
		if (max_sz < m->to_load[i].p_filesz)
		{
			r = -_L(EPERM);
			goto error;
		}

		r = elf_read(m->fp, ptr, m->to_load[i].p_filesz, m->to_load[i].p_offset);
		if (r != m->to_load[i].p_filesz)
		{
			fprintf(stderr, "read failed (%08x != %08x)\n",
				m->to_load[i].p_filesz, r);
			goto error;
		}

		/* remember highest address we mapped, use it for brk */
		max_addr = m->to_load[i].p_vaddr + m->to_load[i].p_memsz;
		max_addr = round_up(max_addr, pagesize);
		if (proc->brk < max_addr)
			proc->brk = max_addr;
		dprintf("brk at %08x\n", proc->brk);
	}

	m->entry_point = m->base - m->min_vaddr + m->ehdr.e_entry;

	return 0;
error:
	return -1;
}
示例#11
0
__NO_SAFESTACK thrd_t __allocate_thread(
    size_t requested_guard_size,
    size_t requested_stack_size,
    const char* thread_name,
    char vmo_name[ZX_MAX_NAME_LEN]) {
    thread_allocation_acquire();

    const size_t guard_size =
        requested_guard_size == 0 ? 0 : round_up_to_page(requested_guard_size);
    const size_t stack_size = round_up_to_page(requested_stack_size);

    const size_t tls_size = libc.tls_size;
    const size_t tcb_size = round_up_to_page(tls_size);

    const size_t vmo_size = tcb_size + stack_size * 2;
    zx_handle_t vmo;
    zx_status_t status = _zx_vmo_create(vmo_size, 0, &vmo);
    if (status != ZX_OK) {
        __thread_allocation_release();
        return NULL;
    }
    struct iovec tcb, tcb_region;
    if (map_block(_zx_vmar_root_self(), vmo, 0, tcb_size, PAGE_SIZE, PAGE_SIZE,
                  &tcb, &tcb_region)) {
        __thread_allocation_release();
        _zx_handle_close(vmo);
        return NULL;
    }

    thrd_t td = copy_tls(tcb.iov_base, tcb.iov_len);

    // At this point all our access to global TLS state is done, so we
    // can allow dlopen again.
    __thread_allocation_release();

    // For the initial thread, it's too early to call snprintf because
    // it's not __NO_SAFESTACK.
    if (vmo_name != NULL) {
        // For other threads, try to give the VMO a name that includes
        // the thrd_t value (and the TLS size if that fits too), but
        // don't use a truncated value since that would be confusing to
        // interpret.
        if (snprintf(vmo_name, ZX_MAX_NAME_LEN, "%s:%p/TLS=%#zx",
                     thread_name, td, tls_size) < ZX_MAX_NAME_LEN ||
            snprintf(vmo_name, ZX_MAX_NAME_LEN, "%s:%p",
                     thread_name, td) < ZX_MAX_NAME_LEN)
            thread_name = vmo_name;
    }
    _zx_object_set_property(vmo, ZX_PROP_NAME,
                            thread_name, strlen(thread_name));

    if (map_block(_zx_vmar_root_self(), vmo,
                  tcb_size, stack_size, guard_size, 0,
                  &td->safe_stack, &td->safe_stack_region)) {
        _zx_vmar_unmap(_zx_vmar_root_self(),
                       (uintptr_t)tcb_region.iov_base, tcb_region.iov_len);
        _zx_handle_close(vmo);
        return NULL;
    }

    if (map_block(_zx_vmar_root_self(), vmo,
                  tcb_size + stack_size, stack_size, guard_size, 0,
                  &td->unsafe_stack, &td->unsafe_stack_region)) {
        _zx_vmar_unmap(_zx_vmar_root_self(),
                       (uintptr_t)td->safe_stack_region.iov_base,
                       td->safe_stack_region.iov_len);
        _zx_vmar_unmap(_zx_vmar_root_self(),
                       (uintptr_t)tcb_region.iov_base, tcb_region.iov_len);
        _zx_handle_close(vmo);
        return NULL;
    }

    _zx_handle_close(vmo);
    td->tcb_region = tcb_region;
    td->locale = &libc.global_locale;
    td->head.tp = (uintptr_t)pthread_to_tp(td);
    td->abi.stack_guard = __stack_chk_guard;
    td->abi.unsafe_sp =
        (uintptr_t)td->unsafe_stack.iov_base + td->unsafe_stack.iov_len;
    return td;
}