Esempio n. 1
0
int mlx4_en_map_buffer(struct mlx4_buf *buf)
{
	struct page **pages;
	int i;

	if (buf->direct.buf != NULL || buf->nbufs == 1)
		return 0;

	pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
	if (!pages)
		return -ENOMEM;

	for (i = 0; i < buf->nbufs; ++i)
		pages[i] = virt_to_page(buf->page_list[i].buf);

	buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!buf->direct.buf)
		return -ENOMEM;

	return 0;
}
Esempio n. 2
0
static int __init init_vdso_vars(void)
{
	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
	int i;
	char *vbase;

	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
	if (!vdso_pages)
		goto oom;
	for (i = 0; i < npages; i++) {
		struct page *p;
		p = alloc_page(GFP_KERNEL);
		if (!p)
			goto oom;
		vdso_pages[i] = p;
		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
	}

	vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
	if (!vbase)
		goto oom;

	if (memcmp(vbase, "\177ELF", 4)) {
		printk("VDSO: I'm broken; not ELF\n");
		vdso_enabled = 0;
	}

#define VEXTERN(x) \
	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
#include "vextern.h"
#undef VEXTERN
	return 0;

 oom:
	printk("Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
/* Create a vDSO page holding the signal trampoline.
 * We want this for a non-executable stack.
 */
static int __init vdso_init(void)
{
	struct hexagon_vdso *vdso;

	vdso_page = alloc_page(GFP_KERNEL);
	if (!vdso_page)
		panic("Cannot allocate vdso");

	vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
	if (!vdso)
		panic("Cannot map vdso");
	clear_page(vdso);

	/* Install the signal trampoline; currently looks like this:
	 *	r6 = #__NR_rt_sigreturn;
	 *	trap0(#1);
	 */
	vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
	vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];

	vunmap(vdso);

	return 0;
}
Esempio n. 4
0
int
ksocknal_lib_recv_iov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
	struct kvec  scratch;
	struct kvec *scratchiov = &scratch;
	unsigned int  niov = 1;
#else
	struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
	unsigned int  niov = conn->ksnc_rx_niov;
#endif
	struct kvec *iov = conn->ksnc_rx_iov;
	struct msghdr msg = {
		.msg_flags      = 0
	};
        int          nob;
        int          i;
        int          rc;
        int          fragnob;
        int          sum;
        __u32        saved_csum;

        /* NB we can't trust socket ops to either consume our iovs
         * or leave them alone. */
        LASSERT (niov > 0);

        for (nob = i = 0; i < niov; i++) {
                scratchiov[i] = iov[i];
                nob += scratchiov[i].iov_len;
        }
        LASSERT (nob <= conn->ksnc_rx_nob_wanted);

	rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
			    MSG_DONTWAIT);

        saved_csum = 0;
        if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
                saved_csum = conn->ksnc_msg.ksm_csum;
                conn->ksnc_msg.ksm_csum = 0;
        }

        if (saved_csum != 0) {
                /* accumulate checksum */
                for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
                        LASSERT (i < niov);

                        fragnob = iov[i].iov_len;
                        if (fragnob > sum)
                                fragnob = sum;

                        conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
                                                           iov[i].iov_base, fragnob);
                }
                conn->ksnc_msg.ksm_csum = saved_csum;
        }

        return rc;
}

static void
ksocknal_lib_kiov_vunmap(void *addr)
{
        if (addr == NULL)
                return;

        vunmap(addr);
}

static void *
ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
		       struct kvec *iov, struct page **pages)
{
        void             *addr;
        int               nob;
        int               i;

        if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
                return NULL;

        LASSERT (niov <= LNET_MAX_IOV);

        if (niov < 2 ||
            niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
                return NULL;

        for (nob = i = 0; i < niov; i++) {
                if ((kiov[i].kiov_offset != 0 && i > 0) ||
		    (kiov[i].kiov_offset + kiov[i].kiov_len !=
		     PAGE_CACHE_SIZE && i < niov - 1))
                        return NULL;

                pages[i] = kiov[i].kiov_page;
                nob += kiov[i].kiov_len;
        }

        addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
        if (addr == NULL)
                return NULL;

        iov->iov_base = addr + kiov[0].kiov_offset;
        iov->iov_len = nob;

        return addr;
}
Esempio n. 5
0
static Uart *
oxpnp(void)
{
	Pcidev *p;
	Ctlr *ctlr;
	Port *port;
	int i;
	char *model;
	char name[12+1];
	Uart *head, *tail;
	static int ctlrno;

	p = nil;
	head = tail = nil;
	while(p = pcimatch(p, 0x1415, 0)){
		switch(p->did){
		case 0xc101:
		case 0xc105:
		case 0xc11b:
		case 0xc11f:
		case 0xc120:
		case 0xc124:
		case 0xc138:
		case 0xc13d:
		case 0xc140:
		case 0xc141:
		case 0xc144:
		case 0xc145:
		case 0xc158:
		case 0xc15d:
			model = "OXPCIe952";
			break;
		case 0xc208:
		case 0xc20d:
			model = "OXPCIe954";
			break;
		case 0xc308:
		case 0xc30d:
			model = "OXPCIe958";
			break;
		default:
			continue;
		}
		ctlr = malloc(sizeof *ctlr);
		if(ctlr == nil){
			print("oxpnp: out of memory\n");
			continue;
		}
		ctlr->pcidev = p;
		ctlr->mem = vmap(p->mem[0].bar & ~0xf, p->mem[0].size);
		if(ctlr->mem == nil){
			print("oxpnp: vmap failed\n");
			free(ctlr);
			continue;
		}
		snprint(name, sizeof name, "uartox%d", ctlrno);
		kstrdup(&ctlr->name, name);
		ctlr->nport = ctlr->mem[Nuart] & 0x1f;
		for(i = 0; i < ctlr->nport; ++i){
			port = &ctlr->port[i];
			port->ctlr = ctlr;
			port->mem = (u8int *)ctlr->mem + 0x1000 + 0x200*i;
			port->regs = port;
			snprint(name, sizeof name, "%s.%d", ctlr->name, i);
			kstrdup(&port->name, name);
			port->phys = &oxphysuart;
			if(head == nil)
				head = port;
			else
				tail->next = port;
			tail = port;
		}
		print("%s: %s: %d ports irq %d\n",
		    ctlr->name, model, ctlr->nport, p->intl);
		ctlrno++;
	}
	return head;
}
Esempio n. 6
0
int 
main(int argc, char **argv)
{

	if (argc < 3)
	{
		std::cerr << "Needs two pcd input files." << std::endl;
		return (-1);
	}

	pcl::PointCloud<pcl::RGB>::Ptr left_cloud (new pcl::PointCloud<pcl::RGB>);
	pcl::PointCloud<pcl::RGB>::Ptr right_cloud (new pcl::PointCloud<pcl::RGB>);

	//Read pcd files
	pcl::PCDReader pcd;
	
	if (pcd.read (argv[1], *left_cloud) == -1)
		return (-1);

	if (pcd.read (argv[2], *right_cloud) == -1)
		return (-1);

	if ( !left_cloud->isOrganized() || !right_cloud->isOrganized() || left_cloud->width != right_cloud -> width || left_cloud->height != left_cloud->height)
	{
		std::cout << "Wrong stereo pair; please check input pcds .." << std::endl; 
		return 0;
	}

	pcl::AdaptiveCostSOStereoMatching stereo;

	stereo.setMaxDisparity(60);
	stereo.setXOffset(0);
	stereo.setRadius(5);

	stereo.setSmoothWeak(20);
	stereo.setSmoothStrong(100);
	stereo.setGammaC(25);
	stereo.setGammaS(10);

	stereo.setRatioFilter(20);
	stereo.setPeakFilter(0);

	stereo.setLeftRightCheck(true);
	stereo.setLeftRightCheckThreshold(1);

	stereo.setPreProcessing(true);

	stereo.compute(*left_cloud,  *right_cloud);

	stereo.medianFilter(4);

	pcl::PointCloud<pcl::PointXYZRGB>::Ptr out_cloud( new pcl::PointCloud<pcl::PointXYZRGB> );

	stereo.getPointCloud(318.112200, 224.334900, 368.534700, 0.8387445, out_cloud, left_cloud);

	pcl::PointCloud<pcl::RGB>::Ptr vmap( new pcl::PointCloud<pcl::RGB> );
	stereo.getVisualMap(vmap);

	pcl::visualization::ImageViewer iv ("My viewer");
	iv.addRGBImage<pcl::RGB> (vmap); 
	//iv.addRGBImage<pcl::RGB> (left_cloud);
	//iv.spin (); // press 'q' to exit

	boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer (new pcl::visualization::PCLVisualizer ("3D Viewer"));
	viewer->setBackgroundColor (0, 0, 0);
	
	//viewer->addPointCloud<pcl::PointXYZRGB> (out_cloud, "stereo");
	pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> intensity(out_cloud);
	viewer->addPointCloud<pcl::PointXYZRGB> (out_cloud, intensity, "stereo");

	//viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "stereo");
	viewer->initCameraParameters ();
	//viewer->spin();
	while (!viewer->wasStopped ())
	{
		viewer->spinOnce (100);
		iv.spinOnce (100); // press 'q' to exit
		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
	}
}
Esempio n. 7
0
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
		   struct mlx4_buf *buf)
{
	dma_addr_t t;

	if (size <= max_direct) {
		buf->nbufs        = 1;
		buf->npages       = 1;
		buf->page_shift   = get_order(size) + PAGE_SHIFT;
		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
						       size, &t, GFP_KERNEL);
		if (!buf->direct.buf)
			return -ENOMEM;

		buf->direct.map = t;

		while (t & ((1 << buf->page_shift) - 1)) {
			--buf->page_shift;
			buf->npages *= 2;
		}

		memset(buf->direct.buf, 0, size);
	} else {
		int i;

		buf->direct.buf  = NULL;
		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		buf->npages      = buf->nbufs;
		buf->page_shift  = PAGE_SHIFT;
		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
					   GFP_KERNEL);
		if (!buf->page_list)
			return -ENOMEM;

		for (i = 0; i < buf->nbufs; ++i) {
			buf->page_list[i].buf =
				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
						   &t, GFP_KERNEL);
			if (!buf->page_list[i].buf)
				goto err_free;

			buf->page_list[i].map = t;

			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
		}

		if (BITS_PER_LONG == 64) {
			struct page **pages;
			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
			if (!pages)
				goto err_free;
			for (i = 0; i < buf->nbufs; ++i)
				pages[i] = virt_to_page(buf->page_list[i].buf);
			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
			kfree(pages);
			if (!buf->direct.buf)
				goto err_free;
		}
	}

	return 0;

err_free:
	mlx4_buf_free(dev, size, buf);

	return -ENOMEM;
}
Esempio n. 8
0
int register_trapped_io(struct trapped_io *tiop)
{
	struct resource *res;
	unsigned long len = 0, flags = 0;
	struct page *pages[TRAPPED_PAGES_MAX];
	int k, n;

	if (unlikely(trapped_io_disable))
		return 0;

	/* structure must be page aligned */
	if ((unsigned long)tiop & (PAGE_SIZE - 1))
		goto bad;

	for (k = 0; k < tiop->num_resources; k++) {
		res = tiop->resource + k;
		len += roundup(resource_size(res), PAGE_SIZE);
		flags |= res->flags;
	}

	/* support IORESOURCE_IO _or_ MEM, not both */
	if (hweight_long(flags) != 1)
		goto bad;

	n = len >> PAGE_SHIFT;

	if (n >= TRAPPED_PAGES_MAX)
		goto bad;

	for (k = 0; k < n; k++)
		pages[k] = virt_to_page(tiop);

	tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
	if (!tiop->virt_base)
		goto bad;

	len = 0;
	for (k = 0; k < tiop->num_resources; k++) {
		res = tiop->resource + k;
		pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
		       (unsigned long)(tiop->virt_base + len),
		       res->flags & IORESOURCE_IO ? "io" : "mmio",
		       (unsigned long)res->start);
		len += roundup(resource_size(res), PAGE_SIZE);
	}

	tiop->magic = IO_TRAPPED_MAGIC;
	INIT_LIST_HEAD(&tiop->list);
	spin_lock_irq(&trapped_lock);
#ifdef CONFIG_HAS_IOPORT
	if (flags & IORESOURCE_IO)
		list_add(&tiop->list, &trapped_io);
#endif
#ifdef CONFIG_HAS_IOMEM
	if (flags & IORESOURCE_MEM)
		list_add(&tiop->list, &trapped_mem);
#endif
	spin_unlock_irq(&trapped_lock);

	return 0;
 bad:
	pr_warning("unable to install trapped io filter\n");
	return -1;
}
Esempio n. 9
0
void smController::update() {
	
	// grab a new frame from camera
	videoGrabber.grabFrame();
	
	if(videoGrabber.isFrameNew()) {		
		
		model->submitFrame(videoGrabber.getPixels());
		
		// check for motion detect
		if(model->motionDetected) {
		
			if(model->debug) {
				cout << "motion bbox = ("
				     << model->motionRectX1 << ","
				     << model->motionRectY1 << ","
				     << model->motionRectX2 << ","
				     << model->motionRectY2
				     << ")" << endl;
			}
			
			int velocity, noteIndex;
			
			// 'intelligent' music generation based on 
			// y-histogram of motion detect translated directly into musical chords
			if(model->playChromatic) {
				
				for(int y = model->motionRectY1; y <= model->motionRectY2; y += 3) {
					int yvel = 0;
					for(int x = model->motionRectX1; x <= model->motionRectX2; x++) {
						if(model->getClampAt(x,y) > 0) {
							yvel += model->getDiffAt(x,y);
						}
					}
					
					velocity = vmap(yvel,1,10000,30,127);
					if(velocity > 127) {
						velocity = 127;
					}
					noteIndex = model->getNoteIndex(y);
					
					emitNote(noteIndex, velocity, 0.f, (float)y*model->blockHeight);
				}
			}
			// otherwise, just fake it by playing at the top of the motion detect boundary
			else {
				
				// figure out which notes to trigger and fire add to model and trigger audio delegate
				int noteIndex = model->getNoteIndex(model->motionRectY1);
				// get the size of the area as an indicator of volume
				int detectSize =
						(model->motionRectX2 - model->motionRectX1) *
						(model->motionRectY2 - model->motionRectY1);
				
				// calculate note volume
				int velocity = vmap(detectSize,1,1000,45,127);
				if(velocity > 127) {
					velocity = 127;
				}
				
				emitNote(noteIndex, velocity, model->motionRectX1 * model->blockWidth, (float)model->motionRectY1 * model->blockHeight);
				
				if(noteIndex >= 5) {
					emitNote(noteIndex-5, velocity*0.5f, model->motionRectX1 * model->blockWidth, (float)(model->motionRectY1+5) * model->blockHeight);
				}
				
				//if(noteIndex >= 12) {
				//	emitNote(noteIndex-12,velocity*0.5, model->motionRectX1 * model->blockWidth, (float)(model->motionRectY1+12) * model->blockHeight);
				//}
			}
		}
	}
	
	model->update();
	model->advanceHue();
	advanceNotes();
}
void *snd_malloc_sgbuf_pages(struct device *device,
			     size_t size, struct snd_dma_buffer *dmab,
			     size_t *res_size)
{
	struct snd_sg_buf *sgbuf;
	unsigned int i, pages, chunk, maxpages;
	struct snd_dma_buffer tmpb;
	struct snd_sg_page *table;
	struct page **pgtable;

	dmab->area = NULL;
	dmab->addr = 0;
	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
	if (! sgbuf)
		return NULL;
	sgbuf->dev = device;
	pages = snd_sgbuf_aligned_pages(size);
	sgbuf->tblsize = sgbuf_align_table(pages);
	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
	if (!table)
		goto _failed;
	sgbuf->table = table;
	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
	if (!pgtable)
		goto _failed;
	sgbuf->page_table = pgtable;

	/* allocate pages */
	maxpages = MAX_ALLOC_PAGES;
	while (pages > 0) {
		chunk = pages;
		/* don't be too eager to take a huge chunk */
		if (chunk > maxpages)
			chunk = maxpages;
		chunk <<= PAGE_SHIFT;
		if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
						 chunk, &tmpb) < 0) {
			if (!sgbuf->pages)
				return NULL;
			if (!res_size)
				goto _failed;
			size = sgbuf->pages * PAGE_SIZE;
			break;
		}
		chunk = tmpb.bytes >> PAGE_SHIFT;
		for (i = 0; i < chunk; i++) {
			table->buf = tmpb.area;
			table->addr = tmpb.addr;
			if (!i)
				table->addr |= chunk; /* mark head */
			table++;
			*pgtable++ = virt_to_page(tmpb.area);
			tmpb.area += PAGE_SIZE;
			tmpb.addr += PAGE_SIZE;
		}
		sgbuf->pages += chunk;
		pages -= chunk;
		if (chunk < maxpages)
			maxpages = chunk;
	}

	sgbuf->size = size;
	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
	if (! dmab->area)
		goto _failed;
	if (res_size)
		*res_size = sgbuf->size;
	return dmab->area;

 _failed:
	snd_free_sgbuf_pages(dmab); /* free the table */
	return NULL;
}
static int lgx_alloc_buffer(struct inno_lgx *lgx)
{
	//int i,j;
	//int page_num;
	//struct page *page = NULL;
	int i;
	struct inno_buffer *inno_buf = &lgx->inno_buffer;
	//int ret = 0;

	memset(inno_buf, 0, sizeof(struct inno_buffer));
	sema_init(&inno_buf->sem,1);
	down(&inno_buf->sem);

	// alloc buffer
#if 0                        //xingyu buffer issue
	page_num = PAGE_ALIGN(INNO_BUFFER_SIZE) / PAGE_SIZE;
	inno_buf->pages = (struct page **)kzalloc(page_num * sizeof(struct page *), GFP_KERNEL);         
	if (!inno_buf->pages) {
		inno_err("lgx_alloc_buffer:No enough memory");
		ret = -ENOMEM;
		goto alloc_node_error;
	}

	for (i = 0; i < page_num; i++) {
		page = alloc_page(GFP_KERNEL);

		if (!page) {
			inno_err("lgx_alloc_buffer:No enough page");
			ret = -ENOMEM;
			goto alloc_pages_error;
		}
		//SetPageReserved(page);
		inno_buf->pages[i] = page;
	}

	inno_buf->page_num = page_num;

	inno_buf->bufsize = page_num * PAGE_SIZE;
	inno_buf->vaddr = vmap(inno_buf->pages, page_num, VM_MAP, PAGE_KERNEL);

	/* check if the memory map is OK. */
	if (!inno_buf->vaddr) {
		inno_err("lgx_alloc_buffer:vmap() failure");
		ret = -EFAULT;
		goto vmap_error;
	}

	memset(inno_buf->vaddr, 0, inno_buf->bufsize);

	inno_buf->start = inno_buf->vaddr;
#else	
#ifndef _buffer_global                                                      // buffer alloc modify xingyu 0714
	inno_buf->vaddr = kmalloc(INNO_BUFFER_SIZE, GFP_KERNEL| GFP_DMA);         
	if(inno_buf->vaddr == NULL || (int)inno_buf->vaddr %4 !=0){
		inno_err("inno_buf->vaddr kmalloc fail");
		up(&inno_buf->sem);
		return -1;
	}			 
#else
	for(i=0; lgx_ids_table[i].name!=NULL; i++){
		if(lgx->ids  == &lgx_ids_table[i]){
			inno_buf->vaddr =i*INNO_BUFFER_SIZE+g_inno_buffer;
			inno_msg("use global mem");
			break;
		}
	}
#endif
	inno_buf->start = inno_buf->vaddr;
	inno_buf->bufsize = INNO_BUFFER_SIZE;
	
#endif
	up(&inno_buf->sem);

	return 0;

#if 0                          //xingyu buffer issue
	//vmap_error:
	//alloc_pages_error:
	for (j = 0; j < i; j++) {
		page = inno_buf->pages[j];
		//ClearPageReserved(page);
		__free_page(page);
	}
	kfree(inno_buf->pages);
	//alloc_node_error:
	return ret;
#endif
}
Esempio n. 12
0
void writeFrames(const Kinect::FrameSource::IntrinsicParameters& ip,const Kinect::FrameBuffer& color,const Kinect::MeshBuffer& mesh,const char* lwoFileName)
	{
	/* Create the texture file name: */
	std::string textureFileName(lwoFileName,Misc::getExtension(lwoFileName));
	textureFileName.append("-color.png");
	
	/* Write the color frame as a texture image: */
	{
	Images::RGBImage texImage(color.getSize(0),color.getSize(1));
	Images::RGBImage::Color* tiPtr=texImage.modifyPixels();
	const unsigned char* cfPtr=reinterpret_cast<const unsigned char*>(color.getBuffer());
	for(int y=0;y<color.getSize(1);++y)
		for(int x=0;x<color.getSize(0);++x,++tiPtr,cfPtr+=3)
			*tiPtr=Images::RGBImage::Color(cfPtr);
	
	Images::writeImageFile(texImage,textureFileName.c_str());
	}
	
	/* Open the LWO file: */
	IO::FilePtr lwoFile=IO::openFile(lwoFileName,IO::File::WriteOnly);
	lwoFile->setEndianness(Misc::BigEndian);
	
	/* Create the LWO file structure via the FORM chunk: */
	{
	IFFChunkWriter form(lwoFile,"FORM");
	form.write<char>("LWO2",4);
	
	/* Create the TAGS chunk: */
	{
	IFFChunkWriter tags(&form,"TAGS");
	tags.writeString("ColorImage");
	tags.writeChunk();
	}
	
	/* Create the LAYR chunk: */
	{
	IFFChunkWriter layr(&form,"LAYR");
	layr.write<Misc::UInt16>(0U);
	layr.write<Misc::UInt16>(0x0U);
	for(int i=0;i<3;++i)
		layr.write<Misc::Float32>(0.0f);
	layr.writeString("DepthImage");
	layr.writeChunk();
	}
	
	/* Create an index map for all vertices to omit unused vertices: */
	unsigned int* indices=new unsigned int[mesh.numVertices];
	for(unsigned int i=0;i<mesh.numVertices;++i)
		indices[i]=~0x0U;
	unsigned int numUsedVertices=0;
	
	/* Create the PNTS, BBOX and VMAP chunks in one go: */
	{
	typedef Kinect::FrameSource::IntrinsicParameters::PTransform PTransform;
	typedef PTransform::Point Point;
	typedef Geometry::Box<Point::Scalar,3> Box;
	
	IFFChunkWriter bbox(&form,"BBOX");
	IFFChunkWriter pnts(&form,"PNTS");
	IFFChunkWriter vmap(&form,"VMAP");
	
	/* Write the VMAP header: */
	vmap.write<char>("TXUV",4);
	vmap.write<Misc::UInt16>(2U);
	vmap.writeString("ColorImageUV");
	
	/* Process all triangle vertices: */
	Box pBox=Box::empty;
	const Kinect::MeshBuffer::Vertex* vertices=mesh.getVertices();
	const Kinect::MeshBuffer::Index* tiPtr=mesh.getTriangleIndices();
	for(unsigned int i=0;i<mesh.numTriangles*3;++i,++tiPtr)
		{
		/* Check if the triangle vertex doesn't already have an index: */
		if(indices[*tiPtr]==~0x0U)
			{
			/* Assign an index to the triangle vertex: */
			indices[*tiPtr]=numUsedVertices;
			
			/* Transform the mesh vertex to camera space using the depth projection matrix: */
			Point dp(vertices[*tiPtr].position.getXyzw());
			Point cp=ip.depthProjection.transform(dp);
			
			/* Transform the depth-space point to texture space using the color projection matrix: */
			Point tp=ip.colorProjection.transform(dp);
			
			/* Add the point to the bounding box: */
			pBox.addPoint(cp);
			
			/* Store the point and its texture coordinates: */
			pnts.writePoint(cp);
			vmap.writeVarIndex(numUsedVertices);
			for(int i=0;i<2;++i)
				vmap.write<Misc::Float32>(tp[i]);
			
			++numUsedVertices;
			}
		}
	
	/* Write the bounding box: */
	bbox.writeBox(pBox);
	
	/* Write the BBOX, PNTS, and VMAP chunks: */
	bbox.writeChunk();
	pnts.writeChunk();
	vmap.writeChunk();
	}
	
	/* Create the POLS chunk: */
	{
	IFFChunkWriter pols(&form,"POLS");
	pols.write<char>("FACE",4);
	const Kinect::MeshBuffer::Index* tiPtr=mesh.getTriangleIndices();
	for(unsigned int triangleIndex=0;triangleIndex<mesh.numTriangles;++triangleIndex,tiPtr+=3)
		{
		pols.write<Misc::UInt16>(3U);
		for(int i=0;i<3;++i)
			pols.writeVarIndex(indices[tiPtr[2-i]]);
		}
	pols.writeChunk();
	}
	
	/* Delete the vertex index map: */
	delete[] indices;
	
	/* Create the PTAG chunk: */
	{
	IFFChunkWriter ptag(&form,"PTAG");
	ptag.write<char>("SURF",4);
	for(unsigned int triangleIndex=0;triangleIndex<mesh.numTriangles;++triangleIndex)
		{
		ptag.writeVarIndex(triangleIndex);
		ptag.write<Misc::UInt16>(0U);
		}
	ptag.writeChunk();
	}
	
	/* Create the CLIP chunk: */
	{
	IFFChunkWriter clip(&form,"CLIP");
	clip.write<Misc::UInt32>(1U);
	
	/* Create the STIL chunk: */
	{
	IFFChunkWriter stil(&clip,"STIL",true);
	stil.writeString(textureFileName.c_str());
	stil.writeChunk();
	}
	
	clip.writeChunk();
	}
	
	/* Create the SURF chunk: */
	{
	IFFChunkWriter surf(&form,"SURF");
	surf.writeString("ColorImage");
	surf.writeString("");
	
	/* Create the SIDE subchunk: */
	{
	IFFChunkWriter side(&surf,"SIDE",true);
	side.write<Misc::UInt16>(3U);
	side.writeChunk();
	}
	
	/* Create the SMAN subchunk: */
	{
	IFFChunkWriter sman(&surf,"SMAN",true);
	sman.write<Misc::Float32>(Math::rad(90.0f));
	sman.writeChunk();
	}
	
	/* Create the COLR subchunk: */
	{
	IFFChunkWriter colr(&surf,"COLR",true);
	colr.writeColor(1.0f,1.0f,1.0f);
	colr.writeVarIndex(0U);
	colr.writeChunk();
	}
	
	/* Create the DIFF subchunk: */
	{
	IFFChunkWriter diff(&surf,"DIFF",true);
	diff.write<Misc::Float32>(1.0f);
	diff.writeVarIndex(0U);
	diff.writeChunk();
	}
	
	/* Create the LUMI subchunk: */
	{
	IFFChunkWriter lumi(&surf,"LUMI",true);
	lumi.write<Misc::Float32>(0.0f);
	lumi.writeVarIndex(0U);
	lumi.writeChunk();
	}
	
	/* Create the BLOK subchunk: */
	{
	IFFChunkWriter blok(&surf,"BLOK",true);
	
	/* Create the IMAP subchunk: */
	{
	IFFChunkWriter imap(&blok,"IMAP",true);
	imap.writeString("1");
	
	/* Create the CHAN subchunk: */
	{
	IFFChunkWriter chan(&imap,"CHAN",true);
	chan.write<char>("COLR",4);
	chan.writeChunk();
	}
	
	imap.writeChunk();
	}
	
	/* Create the PROJ subchunk: */
	{
	IFFChunkWriter proj(&blok,"PROJ",true);
	proj.write<Misc::UInt16>(5U);
	proj.writeChunk();
	}
	
	/* Create the IMAG subchunk: */
	{
	IFFChunkWriter imag(&blok,"IMAG",true);
	imag.writeVarIndex(1U);
	imag.writeChunk();
	}
	
	/* Create the VMAP subchunk: */
	{
	IFFChunkWriter vmap(&blok,"VMAP",true);
	vmap.writeString("ColorImageUV");
	vmap.writeChunk();
	}
	
	blok.writeChunk();
	}
	
	/* Write the SURF chunk: */
	surf.writeChunk();
	}
	
	/* Write the FORM chunk: */
	form.writeChunk();
	}
	}
Esempio n. 13
0
int
ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
        struct socket *sock = conn->ksnc_sock;
        int            nob;
        int            rc;

        if (*ksocknal_tunables.ksnd_enable_csum        && /* checksum enabled */
            conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection  */
            tx->tx_nob == tx->tx_resid                 && /* frist sending    */
            tx->tx_msg.ksm_csum == 0)                     /* not checksummed  */
                ksocknal_lib_csum_tx(tx);

        /* NB we can't trust socket ops to either consume our iovs
         * or leave them alone. */

        {
#if SOCKNAL_SINGLE_FRAG_TX
                struct iovec    scratch;
                struct iovec   *scratchiov = &scratch;
                unsigned int    niov = 1;
#else
                struct iovec   *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
                unsigned int    niov = tx->tx_niov;
#endif
                struct msghdr msg = {
                        .msg_name       = NULL,
                        .msg_namelen    = 0,
                        .msg_iov        = scratchiov,
                        .msg_iovlen     = niov,
                        .msg_control    = NULL,
                        .msg_controllen = 0,
                        .msg_flags      = MSG_DONTWAIT
                };
                mm_segment_t oldmm = get_fs();
                int  i;

                for (nob = i = 0; i < niov; i++) {
                        scratchiov[i] = tx->tx_iov[i];
                        nob += scratchiov[i].iov_len;
                }

                if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
                    nob < tx->tx_resid)
                        msg.msg_flags |= MSG_MORE;

                set_fs (KERNEL_DS);
                rc = sock_sendmsg(sock, &msg, nob);
                set_fs (oldmm);
        }
        return rc;
}

int
ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
{
        struct socket *sock = conn->ksnc_sock;
        lnet_kiov_t   *kiov = tx->tx_kiov;
        int            rc;
        int            nob;

        /* Not NOOP message */
        LASSERT (tx->tx_lnetmsg != NULL);

        /* NB we can't trust socket ops to either consume our iovs
         * or leave them alone. */
        if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
                /* Zero copy is enabled */
                struct sock   *sk = sock->sk;
                struct page   *page = kiov->kiov_page;
                int            offset = kiov->kiov_offset;
                int            fragsize = kiov->kiov_len;
                int            msgflg = MSG_DONTWAIT;

                CDEBUG(D_NET, "page %p + offset %x for %d\n",
                               page, offset, kiov->kiov_len);

                if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
                    fragsize < tx->tx_resid)
                        msgflg |= MSG_MORE;

                if (sk->sk_prot->sendpage != NULL) {
                        rc = sk->sk_prot->sendpage(sk, page,
                                                   offset, fragsize, msgflg);
                } else {
                        rc = cfs_tcp_sendpage(sk, page, offset, fragsize,
                                              msgflg);
                }
        } else {
#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
                struct iovec  scratch;
                struct iovec *scratchiov = &scratch;
                unsigned int  niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
                struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
                unsigned int  niov = tx->tx_nkiov;
#endif
                struct msghdr msg = {
                        .msg_name       = NULL,
                        .msg_namelen    = 0,
                        .msg_iov        = scratchiov,
                        .msg_iovlen     = niov,
                        .msg_control    = NULL,
                        .msg_controllen = 0,
                        .msg_flags      = MSG_DONTWAIT
                };
                mm_segment_t  oldmm = get_fs();
                int           i;

                for (nob = i = 0; i < niov; i++) {
                        scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
                                                 kiov[i].kiov_offset;
                        nob += scratchiov[i].iov_len = kiov[i].kiov_len;
                }

                if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
                    nob < tx->tx_resid)
                        msg.msg_flags |= MSG_MORE;

                set_fs (KERNEL_DS);
                rc = sock_sendmsg(sock, &msg, nob);
                set_fs (oldmm);

                for (i = 0; i < niov; i++)
                        kunmap(kiov[i].kiov_page);
        }
        return rc;
}

void
ksocknal_lib_eager_ack (ksock_conn_t *conn)
{
        int            opt = 1;
        mm_segment_t   oldmm = get_fs();
        struct socket *sock = conn->ksnc_sock;

        /* Remind the socket to ACK eagerly.  If I don't, the socket might
         * think I'm about to send something it could piggy-back the ACK
         * on, introducing delay in completing zero-copy sends in my
         * peer. */

        set_fs(KERNEL_DS);
        sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
                               (char *)&opt, sizeof (opt));
        set_fs(oldmm);
}

int
ksocknal_lib_recv_iov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
        struct iovec  scratch;
        struct iovec *scratchiov = &scratch;
        unsigned int  niov = 1;
#else
        struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
        unsigned int  niov = conn->ksnc_rx_niov;
#endif
        struct iovec *iov = conn->ksnc_rx_iov;
        struct msghdr msg = {
                .msg_name       = NULL,
                .msg_namelen    = 0,
                .msg_iov        = scratchiov,
                .msg_iovlen     = niov,
                .msg_control    = NULL,
                .msg_controllen = 0,
                .msg_flags      = 0
        };
        mm_segment_t oldmm = get_fs();
        int          nob;
        int          i;
        int          rc;
        int          fragnob;
        int          sum;
        __u32        saved_csum;

        /* NB we can't trust socket ops to either consume our iovs
         * or leave them alone. */
        LASSERT (niov > 0);

        for (nob = i = 0; i < niov; i++) {
                scratchiov[i] = iov[i];
                nob += scratchiov[i].iov_len;
        }
        LASSERT (nob <= conn->ksnc_rx_nob_wanted);

        set_fs (KERNEL_DS);
        rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
        /* NB this is just a boolean..........................^ */
        set_fs (oldmm);

        saved_csum = 0;
        if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
                saved_csum = conn->ksnc_msg.ksm_csum;
                conn->ksnc_msg.ksm_csum = 0;
        }

        if (saved_csum != 0) {
                /* accumulate checksum */
                for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
                        LASSERT (i < niov);

                        fragnob = iov[i].iov_len;
                        if (fragnob > sum)
                                fragnob = sum;

                        conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
                                                           iov[i].iov_base, fragnob);
                }
                conn->ksnc_msg.ksm_csum = saved_csum;
        }

        return rc;
}

static void
ksocknal_lib_kiov_vunmap(void *addr)
{
        if (addr == NULL)
                return;

        vunmap(addr);
}

static void *
ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
                       struct iovec *iov, struct page **pages)
{
        void             *addr;
        int               nob;
        int               i;

        if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
                return NULL;

        LASSERT (niov <= LNET_MAX_IOV);

        if (niov < 2 ||
            niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
                return NULL;

        for (nob = i = 0; i < niov; i++) {
                if ((kiov[i].kiov_offset != 0 && i > 0) ||
                    (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1))
                        return NULL;

                pages[i] = kiov[i].kiov_page;
                nob += kiov[i].kiov_len;
        }

        addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
        if (addr == NULL)
                return NULL;

        iov->iov_base = addr + kiov[0].kiov_offset;
        iov->iov_len = nob;

        return addr;
}

int
ksocknal_lib_recv_kiov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
        struct iovec   scratch;
        struct iovec  *scratchiov = &scratch;
        struct page  **pages      = NULL;
        unsigned int   niov       = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
        struct iovec  *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
        struct page  **pages      = conn->ksnc_scheduler->kss_rx_scratch_pgs;
        unsigned int   niov       = conn->ksnc_rx_nkiov;
#endif
        lnet_kiov_t   *kiov = conn->ksnc_rx_kiov;
        struct msghdr msg = {
                .msg_name       = NULL,
                .msg_namelen    = 0,
                .msg_iov        = scratchiov,
                .msg_control    = NULL,
                .msg_controllen = 0,
                .msg_flags      = 0
        };
        mm_segment_t oldmm = get_fs();
        int          nob;
        int          i;
        int          rc;
        void        *base;
        void        *addr;
        int          sum;
        int          fragnob;

        /* NB we can't trust socket ops to either consume our iovs
         * or leave them alone. */
        if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
                nob = scratchiov[0].iov_len;
                msg.msg_iovlen = 1;

        } else {
                for (nob = i = 0; i < niov; i++) {
                        nob += scratchiov[i].iov_len = kiov[i].kiov_len;
                        scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
                                                 kiov[i].kiov_offset;
                }
                msg.msg_iovlen = niov;
        }

        LASSERT (nob <= conn->ksnc_rx_nob_wanted);

        set_fs (KERNEL_DS);
        rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
        /* NB this is just a boolean.......................^ */
        set_fs (oldmm);

        if (conn->ksnc_msg.ksm_csum != 0) {
                for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
                        LASSERT (i < niov);

                        /* Dang! have to kmap again because I have nowhere to stash the
                         * mapped address.  But by doing it while the page is still
                         * mapped, the kernel just bumps the map count and returns me
                         * the address it stashed. */
                        base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
                        fragnob = kiov[i].kiov_len;
                        if (fragnob > sum)
                                fragnob = sum;

                        conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
                                                           base, fragnob);

                        kunmap(kiov[i].kiov_page);
                }
        }

        if (addr != NULL) {
                ksocknal_lib_kiov_vunmap(addr);
        } else {
                for (i = 0; i < niov; i++)
                        kunmap(kiov[i].kiov_page);
        }

        return (rc);
}

void
ksocknal_lib_csum_tx(ksock_tx_t *tx)
{
        int          i;
        __u32        csum;
        void        *base;

        LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
        LASSERT(tx->tx_conn != NULL);
        LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);

        tx->tx_msg.ksm_csum = 0;

        csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
                             tx->tx_iov[0].iov_len);

        if (tx->tx_kiov != NULL) {
                for (i = 0; i < tx->tx_nkiov; i++) {
                        base = kmap(tx->tx_kiov[i].kiov_page) +
                               tx->tx_kiov[i].kiov_offset;

                        csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);

                        kunmap(tx->tx_kiov[i].kiov_page);
                }
        } else {
                for (i = 1; i < tx->tx_niov; i++)
                        csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
                                             tx->tx_iov[i].iov_len);
        }

        if (*ksocknal_tunables.ksnd_inject_csum_error) {
                csum++;
                *ksocknal_tunables.ksnd_inject_csum_error = 0;
        }

        tx->tx_msg.ksm_csum = csum;
}

int
ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
{
        mm_segment_t   oldmm = get_fs ();
        struct socket *sock = conn->ksnc_sock;
        int            len;
        int            rc;

        rc = ksocknal_connsock_addref(conn);
        if (rc != 0) {
                LASSERT (conn->ksnc_closing);
                *txmem = *rxmem = *nagle = 0;
                return (-ESHUTDOWN);
        }

        rc = libcfs_sock_getbuf(sock, txmem, rxmem);
        if (rc == 0) {
                len = sizeof(*nagle);
                set_fs(KERNEL_DS);
                rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
                                           (char *)nagle, &len);
                set_fs(oldmm);
        }

        ksocknal_connsock_decref(conn);

        if (rc == 0)
                *nagle = !*nagle;
        else
                *txmem = *rxmem = *nagle = 0;

        return (rc);
}
Esempio n. 14
0
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                              unsigned fProt, size_t offSub, size_t cbSub)
{
    int rc = VERR_NO_MEMORY;
    PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
    PRTR0MEMOBJLNX pMemLnx;

    /* Fail if requested to do something we can't. */
    AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Create the IPRT memory object.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
    if (pMemLnx)
    {
        if (pMemLnxToMap->cPages)
        {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
            /*
             * Use vmap - 2.4.22 and later.
             */
            pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
# ifdef VM_MAP
            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
# else
            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
# endif
            if (pMemLnx->Core.pv)
            {
                pMemLnx->fMappedToRing0 = true;
                rc = VINF_SUCCESS;
            }
            else
                rc = VERR_MAP_FAILED;

#else   /* < 2.4.22 */
            /*
             * Only option here is to share mappings if possible and forget about fProt.
             */
            if (rtR0MemObjIsRing3(pMemToMap))
                rc = VERR_NOT_SUPPORTED;
            else
            {
                rc = VINF_SUCCESS;
                if (!pMemLnxToMap->Core.pv)
                    rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
                if (RT_SUCCESS(rc))
                {
                    Assert(pMemLnxToMap->Core.pv);
                    pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
                }
            }
#endif
        }
        else
        {
            /*
             * MMIO / physical memory.
             */
            Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
            pMemLnx->Core.pv = pMemLnxToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO
                             ? ioremap_nocache(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb)
                             : ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
            if (pMemLnx->Core.pv)
            {
                /** @todo fix protection. */
                rc = VINF_SUCCESS;
            }
        }
        if (RT_SUCCESS(rc))
        {
            pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
            *ppMem = &pMemLnx->Core;
            return VINF_SUCCESS;
        }
        rtR0MemObjDelete(&pMemLnx->Core);
    }

    return rc;
}
Esempio n. 15
0
static long kern_unlocked_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg) {
	/* this is the buffer which will hold the data of the buffer from user
	 * space... */
	BufferStruct b;
	/* for results from calls */
	int res;
	/* for printing buffers */
	char *cptr;
	/* for loop length */
	unsigned int sloop;
	/* for loops */
	unsigned int i;
	/* some unsigned ints for address manipulation... */
	unsigned long bpointer, offset, aligned, newsize;
	/* the newly created vma */
	struct vm_area_struct *vma;

	PR_DEBUG("start with ioctl %u", cmd);
	switch (cmd) {
	/*
	*	This is asking the kernel to map the memory to kernel space.
	*/
	case IOCTL_DEMO_MAP:
		/* get the data from the user */
		res = copy_from_user(&b, (void *)arg, sizeof(b));
		if (res) {
			PR_ERROR("problem with copy_from_user");
			return res;
		}
		PR_DEBUG("after copy");
		bpointer = (unsigned long)b.pointer;
		offset = bpointer % PAGE_SIZE;
		aligned = bpointer - offset;
		newsize = b.size + offset;
		PR_DEBUG("bpointer is %x", bpointer);
		PR_DEBUG("offset is %u", offset);
		PR_DEBUG("aligned is %x", aligned);
		PR_DEBUG("newsize is %u", newsize);

		/*
		* // make sure that the user data is page aligned...
		* if(((unsigned int)b.pointer)%PAGE_SIZE!=0) {
		*	PR_ERROR("pointer is not page aligned");
		*	return -EFAULT;
		* }
		* PR_DEBUG("after modulu check");
		*/
		/* find the number of pages */
		nr_pages = (newsize - 1) / PAGE_SIZE + 1;
		PR_DEBUG("nr_pages is %d", nr_pages);
		/* alocate page structures... */
		pages = kmalloc(nr_pages * sizeof(struct page *),
				GFP_KERNEL);
		if (IS_ERR(pages)) {
			PR_ERROR("could not allocate page structs");
			return PTR_ERR(pages);
		}
		PR_DEBUG("after pages allocation");
		/* get user pages and fault them in */
		down_write(&current->mm->mmap_sem);
		/* rw==READ means read from drive, write into memory area */
		res = get_user_pages(
			/* current->mm,
			current, */
			aligned,
			nr_pages,
			1,/* write */
			0,/* force */
			pages,
			NULL
		);
		vma = find_vma(current->mm, bpointer);
		vma->vm_flags |= VM_DONTCOPY;
		up_write(&current->mm->mmap_sem);
		PR_DEBUG("after get_user_pages res is %d", res);
		/* Errors and no page mapped should return here */
		if (res != nr_pages) {
			PR_ERROR("could not get_user_pages. res was %d", res);
			kfree(pages);
			return -EFAULT;
		}
		/* pages_lock();
		pages_reserve();
		pages_unlock(); */
		/* map the pages to kernel space... */
		vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
		if (vptr == NULL) {
			PR_ERROR("could not get_user_pages. res was %d", res);
			kfree(pages);
			return -EFAULT;
		}
		ptr = vptr + offset;
		size = b.size;
		PR_DEBUG("after vmap - vptr is %p", vptr);
		/* free the pages */
		kfree(pages);
		pages = NULL;
		PR_DEBUG("after freeing the pages");
		/* were dont! return with success */
		PR_DEBUG("success - on the way out");
		return 0;

	/*
	*	This is asking the kernel to unmap the data
	*	No arguments are passed
	*/
	case IOCTL_DEMO_UNMAP:
		/* this function does NOT return an error code. Strange...:) */
		vunmap(vptr);
		vptr = NULL;
		ptr = NULL;
		size = 0;
		nr_pages = 0;
		pages_unmap();
		return 0;

	/*
	*	This is asking the kernel to read the data.
	*	No arguments are passed
	*/
	case IOCTL_DEMO_READ:
		cptr = (char *)ptr;
		sloop = min_t(unsigned int, size, (unsigned int)10);
		PR_DEBUG("sloop is %d", sloop);
		for (i = 0; i < sloop; i++)
			PR_INFO("value of %d is %c", i, cptr[i]);
		return 0;
	/*
	*	This is asking the kernel to write on our data
	*	argument is the constant which will be used...
	*/
	case IOCTL_DEMO_WRITE:
		memset(ptr, arg, size);
		/* pages_dirty(); */
		return 0;
	}
	return -EINVAL;
}
Esempio n. 16
0
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
		       struct hns_roce_buf *buf)
{
	int i = 0;
	dma_addr_t t;
	struct page **pages;
	struct device *dev = &hr_dev->pdev->dev;
	u32 bits_per_long = BITS_PER_LONG;

	/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
	if (size <= max_direct) {
		buf->nbufs = 1;
		/* Npages calculated by page_size */
		buf->npages = 1 << get_order(size);
		buf->page_shift = PAGE_SHIFT;
		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
		buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
		if (!buf->direct.buf)
			return -ENOMEM;

		buf->direct.map = t;

		while (t & ((1 << buf->page_shift) - 1)) {
			--buf->page_shift;
			buf->npages *= 2;
		}

		memset(buf->direct.buf, 0, size);
	} else {
		buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		buf->npages = buf->nbufs;
		buf->page_shift = PAGE_SHIFT;
		buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
					 GFP_KERNEL);

		if (!buf->page_list)
			return -ENOMEM;

		for (i = 0; i < buf->nbufs; ++i) {
			buf->page_list[i].buf = dma_alloc_coherent(dev,
								  PAGE_SIZE, &t,
								  GFP_KERNEL);

			if (!buf->page_list[i].buf)
				goto err_free;

			buf->page_list[i].map = t;
			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
		}
		if (bits_per_long == 64) {
			pages = kmalloc_array(buf->nbufs, sizeof(*pages),
					      GFP_KERNEL);
			if (!pages)
				goto err_free;

			for (i = 0; i < buf->nbufs; ++i)
				pages[i] = virt_to_page(buf->page_list[i].buf);

			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
					       PAGE_KERNEL);
			kfree(pages);
			if (!buf->direct.buf)
				goto err_free;
		}
	}

	return 0;

err_free:
	hns_roce_buf_free(hr_dev, size, buf);
	return -ENOMEM;
}
Esempio n. 17
0
static int uts_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	struct uts_namespace *uts_ns = current->nsproxy->uts_ns;
	struct ve_struct *ve = get_exec_env();
	int i, n1, n2, n3, new_version;
	struct page **new_pages, **p;

	/*
	 * For node or in case we've not changed UTS simply
	 * map preallocated original vDSO.
	 *
	 * In turn if we already allocated one for this UTS
	 * simply reuse it. It improves speed significantly.
	 */
	if (uts_ns == &init_uts_ns)
		goto map_init_uts;
	/*
	 * Dirty lockless hack. Strictly speaking
	 * we need to return @p here if it's non-nil,
	 * but since there only one trasition possible
	 * { =0 ; !=0 } we simply return @uts_ns->vdso.pages
	 */
	p = ACCESS_ONCE(uts_ns->vdso.pages);
	smp_read_barrier_depends();
	if (p)
		goto map_uts;

	if (sscanf(uts_ns->name.release, "%d.%d.%d", &n1, &n2, &n3) == 3) {
		/*
		 * If there were no changes on version simply reuse
		 * preallocated one.
		 */
		new_version = KERNEL_VERSION(n1, n2, n3);
		if (new_version == LINUX_VERSION_CODE)
			goto map_init_uts;
	} else {
		/*
		 * If admin is passed malformed string here
		 * lets warn him once but continue working
		 * not using vDSO virtualization at all. It's
		 * better than walk out with error.
		 */
		pr_warn_once("Wrong release uts name format detected."
			     " Ignoring vDSO virtualization.\n");
		goto map_init_uts;
	}

	mutex_lock(&vdso_mutex);
	if (uts_ns->vdso.pages) {
		mutex_unlock(&vdso_mutex);
		goto map_uts;
	}

	uts_ns->vdso.nr_pages	= init_uts_ns.vdso.nr_pages;
	uts_ns->vdso.size	= init_uts_ns.vdso.size;
	uts_ns->vdso.version_off= init_uts_ns.vdso.version_off;
	new_pages		= kmalloc(sizeof(struct page *) * init_uts_ns.vdso.nr_pages, GFP_KERNEL);
	if (!new_pages) {
		pr_err("Can't allocate vDSO pages array for VE %d\n", ve->veid);
		goto out_unlock;
	}

	for (i = 0; i < uts_ns->vdso.nr_pages; i++) {
		struct page *p = alloc_page(GFP_KERNEL);
		if (!p) {
			pr_err("Can't allocate page for VE %d\n", ve->veid);
			for (; i > 0; i--)
				put_page(new_pages[i - 1]);
			kfree(new_pages);
			goto out_unlock;
		}
		new_pages[i] = p;
		copy_page(page_address(p), page_address(init_uts_ns.vdso.pages[i]));
	}

	uts_ns->vdso.addr = vmap(new_pages, uts_ns->vdso.nr_pages, 0, PAGE_KERNEL);
	if (!uts_ns->vdso.addr) {
		pr_err("Can't map vDSO pages for VE %d\n", ve->veid);
		for (i = 0; i < uts_ns->vdso.nr_pages; i++)
			put_page(new_pages[i]);
		kfree(new_pages);
		goto out_unlock;
	}

	*((int *)(uts_ns->vdso.addr + uts_ns->vdso.version_off)) = new_version;
	smp_wmb();
	uts_ns->vdso.pages = new_pages;
	mutex_unlock(&vdso_mutex);

	pr_debug("vDSO version transition %d -> %d for VE %d\n",
		 LINUX_VERSION_CODE, new_version, ve->veid);

map_uts:
	return setup_additional_pages(bprm, uses_interp, uts_ns->vdso.pages, uts_ns->vdso.size);
map_init_uts:
	return setup_additional_pages(bprm, uses_interp, init_uts_ns.vdso.pages, init_uts_ns.vdso.size);
out_unlock:
	mutex_unlock(&vdso_mutex);
	return -ENOMEM;
}
Esempio n. 18
0
static void
pcmpinit(void)
{
	uchar *p, *e;
	Apic *apic;
	void *va;

	/*
	 * Map the local APIC.
	 */
	va = vmap(pcmp->lapicbase, 1024);

	print("LAPIC: %.8lux %#p\n", pcmp->lapicbase, va);
	if(va == nil)
		panic("pcmpinit: cannot map lapic %.8lux", pcmp->lapicbase);

	p = ((uchar*)pcmp)+PCMPsz;
	e = ((uchar*)pcmp)+pcmp->length;
	if(getconf("*dumpmp") != nil)
		dumpmp(p, e);
	if(getconf("*mp") != nil)
		mpoverride(&p, &e);

	/*
	 * Run through the table saving information needed for starting
	 * application processors and initialising any I/O APICs. The table
	 * is guaranteed to be in order such that only one pass is necessary.
	 */
	while(p < e) switch(*p){
	default:
		print("pcmpinit: unknown PCMP type 0x%uX (e-p 0x%luX)\n",
			*p, e-p);
		while(p < e){
			print("%uX ", *p);
			p++;
		}
		break;

	case PcmpPROCESSOR:
		if(apic = mkprocessor((PCMPprocessor*)p)){
			apic->addr = va;
			apic->paddr = pcmp->lapicbase;
		}
		p += PCMPprocessorsz;
		continue;

	case PcmpBUS:
		mkbus((PCMPbus*)p);
		p += PCMPbussz;
		continue;

	case PcmpIOAPIC:
		if(apic = mkioapic((PCMPioapic*)p))
			ioapicinit(apic, apic->apicno);
		p += PCMPioapicsz;
		continue;

	case PcmpIOINTR:
		mkiointr((PCMPintr*)p);
		p += PCMPintrsz;
		continue;

	case PcmpLINTR:
		mklintr((PCMPintr*)p);
		p += PCMPintrsz;
		continue;
	}

	/*
	 * Ininitalize local APIC and start application processors.
	 */
	mpinit();
}
Esempio n. 19
0
int spu_alloc_lscsa(struct spu_state *csa)
{
	struct page	**pgarray;
	unsigned char	*p;
	int		i, j, n_4k;

	/* Check availability of 64K pages */
	if (!spu_64k_pages_available())
		goto fail;

	csa->use_big_pages = 1;

	pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
		 csa);

	/* First try to allocate our 64K pages. We need 5 of them
	 * with the current implementation. In the future, we should try
	 * to separate the lscsa with the actual local store image, thus
	 * allowing us to require only 4 64K pages per context
	 */
	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
		/* XXX This is likely to fail, we should use a special pool
		 *     similiar to what hugetlbfs does.
		 */
		csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
						  SPU_64K_PAGE_ORDER);
		if (csa->lscsa_pages[i] == NULL)
			goto fail;
	}

	pr_debug(" success ! creating vmap...\n");

	/* Now we need to create a vmalloc mapping of these for the kernel
	 * and SPU context switch code to use. Currently, we stick to a
	 * normal kernel vmalloc mapping, which in our case will be 4K
	 */
	n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
	pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
	if (pgarray == NULL)
		goto fail;
	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
		for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
			/* We assume all the struct page's are contiguous
			 * which should be hopefully the case for an order 4
			 * allocation..
			 */
			pgarray[i * SPU_64K_PAGE_COUNT + j] =
				csa->lscsa_pages[i] + j;
	csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
	kfree(pgarray);
	if (csa->lscsa == NULL)
		goto fail;

	memset(csa->lscsa, 0, sizeof(struct spu_lscsa));

	/* Set LS pages reserved to allow for user-space mapping.
	 *
	 * XXX isn't that a bit obsolete ? I think we should just
	 * make sure the page count is high enough. Anyway, won't harm
	 * for now
	 */
	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
		SetPageReserved(vmalloc_to_page(p));

	pr_debug(" all good !\n");

	return 0;
fail:
	pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
	spu_free_lscsa(csa);
	return spu_alloc_lscsa_std(csa);
}
Esempio n. 20
0
File: mp.c Progetto: Earnestly/plan9
void
mpinit(void)
{
	int ncpu, cpuson;
	char *cp;
	PCMP *pcmp;
	uchar *e, *p;
	Apic *apic, *bpapic;
	void *va;

	mpdebug = getconf("*debugmp") != nil;
	i8259init();
	syncclock();

	bpapic = nil;
	cpuson = 0;

	if(_mp_ == 0) {
		/*
		 * We can easily get processor info from ACPI, but
		 * interrupt routing, etc. would require interpreting AML.
		 */
		print("mpinit: no mp table found, assuming uniprocessor\n");
		archrevert();
		return;
	}
	pcmp = KADDR(_mp_->physaddr);

	/*
	 * Map the local APIC.
	 */
	if((va = vmap(pcmp->lapicbase, 1024)) == nil)
		return;
	mppcmp = pcmp;
	print("LAPIC: %#lux %#lux\n", pcmp->lapicbase, (ulong)va);

	/*
	 * Run through the table saving information needed for starting
	 * application processors and initialising any I/O APICs. The table
	 * is guaranteed to be in order such that only one pass is necessary.
	 */
	p = ((uchar*)pcmp)+sizeof(PCMP);
	e = ((uchar*)pcmp)+pcmp->length;
	while(p < e) switch(*p){

	default:
		print("mpinit: unknown PCMP type 0x%uX (e-p 0x%luX)\n",
			*p, e-p);
		while(p < e){
			print("%uX ", *p);
			p++;
		}
		break;

	case PcmpPROCESSOR:
		if(apic = mkprocessor((PCMPprocessor*)p)){
			/*
			 * Must take a note of bootstrap processor APIC
			 * now as it will be needed in order to start the
			 * application processors later and there's no
			 * guarantee that the bootstrap processor appears
			 * first in the table before the others.
			 */
			apic->addr = va;
			apic->paddr = pcmp->lapicbase;
			if(apic->flags & PcmpBP)
				bpapic = apic;
			cpuson++;
		}
		p += sizeof(PCMPprocessor);
		continue;

	case PcmpBUS:
		mkbus((PCMPbus*)p);
		p += sizeof(PCMPbus);
		continue;

	case PcmpIOAPIC:
		if(apic = mkioapic((PCMPioapic*)p))
			ioapicinit(apic, ((PCMPioapic*)p)->apicno);
		p += sizeof(PCMPioapic);
		continue;

	case PcmpIOINTR:
		mkiointr((PCMPintr*)p);
		p += sizeof(PCMPintr);
		continue;

	case PcmpLINTR:
		mklintr((PCMPintr*)p);
		p += sizeof(PCMPintr);
		continue;
	}

	dprint("mpinit: mp table describes %d cpus\n", cpuson);

	/* For now, always scan ACPI's MADT for processors that MP missed. */
	trympacpi();

	if (bpapic == nil)
		bpapic = bootapic;

	/*
	 * No bootstrap processor, no need to go further.
	 */
	if(bpapic == 0)
		return;
	bpapic->online = 1;

	lapicinit(bpapic);

	/*
	 * These interrupts are local to the processor
	 * and do not appear in the I/O APIC so it is OK
	 * to set them now.
	 */
	intrenable(IrqTIMER, lapicclock, 0, BUSUNKNOWN, "clock");
	intrenable(IrqERROR, lapicerror, 0, BUSUNKNOWN, "lapicerror");
	intrenable(IrqSPURIOUS, lapicspurious, 0, BUSUNKNOWN, "lapicspurious");
	lapiconline();

	checkmtrr();

	/*
	 * Initialise the application processors.
	 */
	if(cp = getconf("*ncpu")){
		ncpu = strtol(cp, 0, 0);
		if(ncpu < 1)
			ncpu = 1;
		else if(ncpu > MAXMACH)
			ncpu = MAXMACH;
	}
	else
		ncpu = MAXMACH;
	memmove((void*)APBOOTSTRAP, apbootstrap, sizeof(apbootstrap));
	for(apic = mpapic; apic <= &mpapic[MaxAPICNO]; apic++){
		if(ncpu <= 1)
			break;
		if((apic->flags & (PcmpBP|PcmpEN)) == PcmpEN
		&& apic->type == PcmpPROCESSOR){
			mpstartap(apic);
			conf.nmach++;
			ncpu--;
		}
	}

	/*
	 *  we don't really know the number of processors till
	 *  here.
	 *
	 *  set conf.copymode here if nmach > 1.
	 *  Should look for an ExtINT line and enable it.
	 */
	if(X86FAMILY(m->cpuidax) == 3 || conf.nmach > 1)
		conf.copymode = 1;
}
Esempio n. 21
0
static int usbcam_videobuf_prepare(struct videobuf_queue *vq,
				   struct videobuf_buffer *vb,
				   enum v4l2_field field)
{
	struct usbcam_fh *ufp = container_of(vq, struct usbcam_fh, ufh_vbq);
	struct usbcam_dev *udp = ufp->ufh_dev;
	struct usbcam_frame *framep =
		container_of(vb, struct usbcam_frame, vbb);
	struct videobuf_dmabuf *dma = usbframe_get_dmabuf(&framep->vbb);
	int res;

	framep->vbb.size = udp->ud_format.sizeimage;
	if (framep->vbb.baddr && (framep->vbb.bsize < framep->vbb.size)) {
		usbcam_warn(udp, "process %s requested capture of a frame "
			    "larger than its", current->comm);
		usbcam_warn(udp, "allocated frame buffer, fix it!");
		return -EINVAL;
	}

	if (framep->vbb.state == STATE_NEEDS_INIT) {
		/*
		 * This is the place where we initialize the rest of
		 * the usbcam_frame structure.
		 */
		INIT_LIST_HEAD(&framep->cap_links);
		framep->vmap_base = NULL;
		framep->vmap_sof = NULL;

		usbcam_dbg(udp, VIDEOBUF,
			   "preparing frame %d/%p", framep->vbb.i, framep);

		/* We also lock down the memory that was allocated for it */
		res = videobuf_iolock(vq, &framep->vbb, NULL);
		if (res)
			goto fail;

		/* If there's no kernel mapping, we must create one */
		if (!dma->vmalloc) {
			framep->vmap_base = vmap(dma->pages,
						 dma->nr_pages,
						 VM_MAP,
						 PAGE_KERNEL);
			if (!framep->vmap_base) {
				res = -ENOMEM;
				goto fail;
			}

			framep->vmap_sof =
				((char *)framep->vmap_base) +
				dma->offset;
		}
	}

	framep->vbb.field = field;
	framep->vbb.state = STATE_PREPARED;
	return 0;

fail:
	usbcam_videobuf_free(vq, framep);
	return res;
}
Esempio n. 22
0
File: mm.c Progetto: mirage/xen
void *map_domain_page_global(mfn_t mfn)
{
    return vmap(&mfn, 1);
}
Esempio n. 23
0
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
			     struct drm_fb_helper_surface_size *sizes)
{
	struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
	struct tegra_drm *tegra = helper->dev->dev_private;
	struct drm_device *drm = helper->dev;
	struct drm_mode_fb_cmd2 cmd = { 0 };
	unsigned int bytes_per_pixel;
	struct drm_framebuffer *fb;
	unsigned long offset;
	struct fb_info *info;
	struct tegra_bo *bo;
	size_t size;
	int err;

	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);

	cmd.width = sizes->surface_width;
	cmd.height = sizes->surface_height;
	cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
				  tegra->pitch_align);

	cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
						     sizes->surface_depth);

	size = cmd.pitches[0] * cmd.height;

	bo = tegra_bo_create(drm, size, 0);
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info)) {
		dev_err(drm->dev, "failed to allocate framebuffer info\n");
		drm_gem_object_put_unlocked(&bo->gem);
		return PTR_ERR(info);
	}

	fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
	if (IS_ERR(fbdev->fb)) {
		err = PTR_ERR(fbdev->fb);
		dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
			err);
		drm_gem_object_put_unlocked(&bo->gem);
		return PTR_ERR(fbdev->fb);
	}

	fb = &fbdev->fb->base;
	helper->fb = fb;
	helper->fbdev = info;

	info->par = helper;
	info->flags = FBINFO_FLAG_DEFAULT;
	info->fbops = &tegra_fb_ops;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
	drm_fb_helper_fill_var(info, helper, fb->width, fb->height);

	offset = info->var.xoffset * bytes_per_pixel +
		 info->var.yoffset * fb->pitches[0];

	if (bo->pages) {
		bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
				 pgprot_writecombine(PAGE_KERNEL));
		if (!bo->vaddr) {
			dev_err(drm->dev, "failed to vmap() framebuffer\n");
			err = -ENOMEM;
			goto destroy;
		}
	}

	drm->mode_config.fb_base = (resource_size_t)bo->paddr;
	info->screen_base = (void __iomem *)bo->vaddr + offset;
	info->screen_size = size;
	info->fix.smem_start = (unsigned long)(bo->paddr + offset);
	info->fix.smem_len = size;

	return 0;

destroy:
	drm_framebuffer_remove(fb);
	return err;
}
Esempio n. 24
0
static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
		size_t size)
{
	struct a6xx_gmu_bo *bo;
	int ret, count, i;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	bo->size = PAGE_ALIGN(size);

	count = bo->size >> PAGE_SHIFT;

	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
	if (!bo->pages) {
		kfree(bo);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < count; i++) {
		bo->pages[i] = alloc_page(GFP_KERNEL);
		if (!bo->pages[i])
			goto err;
	}

	bo->iova = gmu->uncached_iova_base;

	for (i = 0; i < count; i++) {
		ret = iommu_map(gmu->domain,
			bo->iova + (PAGE_SIZE * i),
			page_to_phys(bo->pages[i]), PAGE_SIZE,
			IOMMU_READ | IOMMU_WRITE);

		if (ret) {
			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");

			for (i = i - 1 ; i >= 0; i--)
				iommu_unmap(gmu->domain,
					bo->iova + (PAGE_SIZE * i),
					PAGE_SIZE);

			goto err;
		}
	}

	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
		pgprot_writecombine(PAGE_KERNEL));
	if (!bo->virt)
		goto err;

	/* Align future IOVA addresses on 1MB boundaries */
	gmu->uncached_iova_base += ALIGN(size, SZ_1M);

	return bo;

err:
	for (i = 0; i < count; i++) {
		if (bo->pages[i])
			__free_pages(bo->pages[i], 0);
	}

	kfree(bo->pages);
	kfree(bo);

	return ERR_PTR(-ENOMEM);
}
Esempio n. 25
0
int iterate_phdr(int (*cb) (struct phdr_info *info,
			    struct task_struct *task,
			    void *data),
		 struct task_struct *task, void *data)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = task->mm;
	struct phdr_info pi;
	char buf[NAME_BUFLEN];
	int res = 0, err = 0;
	struct page *page; // FIXME Is one page enough for all phdrs?
	Elf64_Ehdr *ehdr;
	bool first = true;

	if (!mm) return -EINVAL;

	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->vm_pgoff)
			// Only the first page contains the elf
			// headers, normally.
			continue;

		err = __get_user_pages_unlocked(
			task, task->mm, vma->vm_start,
			1, 0, 0, &page, FOLL_TOUCH);
		if (err < 0)
			continue;

		ehdr = vmap(&page, 1, vma->vm_flags, vma->vm_page_prot);
		if (!ehdr) goto PUT;

		// Test magic bytes to check that it is an ehdr
		err = 0;
		err |= (ehdr->e_ident[0] != ELFMAG0);
		err |= (ehdr->e_ident[1] != ELFMAG1);
		err |= (ehdr->e_ident[2] != ELFMAG2);
		err |= (ehdr->e_ident[3] != ELFMAG3);
		if (err) goto UNMAP;

		// Set addresses
		pi.addr = first ? 0 : vma->vm_start;
		pi.phdr = (void *) ehdr + ehdr->e_phoff;
		pi.phnum = ehdr->e_phnum;

		// Find path
		pi.name = vma_file_path(vma, buf, NAME_BUFLEN);

		// Call the callback
		res = cb(&pi, task, data);

		// Free resources
	UNMAP:
		vunmap(ehdr);
	PUT:
		put_page(page);

		if (res) break;

		first = false;
	}
	return res;
}
Esempio n. 26
0
static Uart*
axpalloc(int ctlrno, Pcidev* pcidev)
{
	Cc *cc;
	uchar *p;
	Ctlr *ctlr;
	void *addr;
	char name[64];
	u32int bar, r;
	int i, n, timeo;

	ctlr = malloc(sizeof(Ctlr));
	if(ctlr == nil)
		error(Enomem);
	seprint(name, name+sizeof(name), "uartaxp%d", ctlrno);
	kstrdup(&ctlr->name, name);
	ctlr->pcidev = pcidev;
	ctlr->ctlrno = ctlrno;

	/*
	 * Access to runtime registers.
	 */
	bar = pcidev->mem[0].bar;
	if((addr = vmap(bar & ~0x0F, pcidev->mem[0].size)) == 0){
		print("%s: can't map registers at %#ux\n", ctlr->name, bar);
		return axpdealloc(ctlr);
	}
	ctlr->reg = addr;
	print("%s: port 0x%ux irq %d ", ctlr->name, bar, pcidev->intl);

	/*
	 * Local address space 0.
	 */
	bar = pcidev->mem[2].bar;
	if((addr = vmap(bar & ~0x0F, pcidev->mem[2].size)) == 0){
		print("%s: can't map memory at %#ux\n", ctlr->name, bar);
		return axpdealloc(ctlr);
	}
	ctlr->mem = addr;
	ctlr->gcb = (Gcb*)(ctlr->mem+0x10000);
	print("mem 0x%ux size %d: ", bar, pcidev->mem[2].size);

	/*
	 * Toggle the software reset and wait for
	 * the adapter local init status to indicate done.
	 *
	 * The two 'delay(100)'s below are important,
	 * without them the board seems to become confused
	 * (perhaps it needs some 'quiet time' because the
	 * timeout loops are not sufficient in themselves).
	 */
	r = csr32r(ctlr, Mcc);
	csr32w(ctlr, Mcc, r|Asr);
	microdelay(1);
	csr32w(ctlr, Mcc, r&~Asr);
	delay(100);

	for(timeo = 0; timeo < 100000; timeo++){
		if(csr32r(ctlr, Mcc) & Lis)
			break;
		microdelay(1);
	}
	if(!(csr32r(ctlr, Mcc) & Lis)){
		print("%s: couldn't reset\n", ctlr->name);
		return axpdealloc(ctlr);
	}
	print("downloading...");
	/*
	 * Copy the control programme to the card memory.
	 * The card's i960 control structures live at 0xD000.
	 */
	if(sizeof(uartaxpcp) > 0xD000){
		print("%s: control programme too big\n", ctlr->name);
		return axpdealloc(ctlr);
	}
	/* TODO: is this right for more than 1 card? devastar does the same */
	csr32w(ctlr, Remap, 0xA0000001);
	for(i = 0; i < sizeof(uartaxpcp); i++)
		ctlr->mem[i] = uartaxpcp[i];
	/*
	 * Execute downloaded code and wait for it
	 * to signal ready.
	 */
	csr32w(ctlr, Mb0, Edcc);
	delay(100);
	/* the manual says to wait for Cpr for 1 second */
	for(timeo = 0; timeo < 10000; timeo++){
		if(csr32r(ctlr, Mb0) & Cpr)
			break;
		microdelay(100);
	}
	if(!(csr32r(ctlr, Mb0) & Cpr)){
		print("control programme not ready; Mb0 %#ux\n",
			csr32r(ctlr, Mb0));
		print("%s: distribution panel not connected or card not fully seated?\n",
			ctlr->name);

		return axpdealloc(ctlr);
	}
	print("\n");

	n = ctlr->gcb->ccbn;
	if(ctlr->gcb->bt != 0x12 || n > 16){
		print("%s: wrong board type %#ux, %d channels\n",
			ctlr->name, ctlr->gcb->bt, ctlr->gcb->ccbn);
		return axpdealloc(ctlr);
	}

	p = ((uchar*)ctlr->gcb) + ctlr->gcb->ccboff;
	for(i = 0; i < n; i++){
		cc = &ctlr->cc[i];
		cc->ccb = (Ccb*)p;
		p += ctlr->gcb->ccbsz;
		cc->uartno = i;
		cc->ctlr = ctlr;

		cc->regs = cc;		/* actually Uart->regs */
		seprint(name, name+sizeof(name), "uartaxp%d%2.2d", ctlrno, i);
		kstrdup(&cc->name, name);
		cc->freq = 0;
		cc->bits = 8;
		cc->stop = 1;
		cc->parity = 'n';
		cc->baud = 9600;
		cc->phys = &axpphysuart;
		cc->console = 0;
		cc->special = 0;

		cc->next = &ctlr->cc[i+1];
	}
	ctlr->cc[n-1].next = nil;

	ctlr->next = nil;
	if(axpctlrhead != nil)
		axpctlrtail->next = ctlr;
	else
		axpctlrhead = ctlr;
	axpctlrtail = ctlr;

	return ctlr->cc;
}