static void rx_complete(struct urb *req)
{
	struct usbsvn_rx *svn_rx = req->context;
	struct net_device *dev = svn_rx->netdev;
	struct usbsvn *svn = netdev_priv(dev);
	struct page *page = virt_to_page(req->transfer_buffer);
	struct sipc4_rx_data rx_data;
	int dev_id = svn_rx->dev_id;
	int flags = 0;
	int err;

	usb_mark_last_busy(svn->usbdev);

	switch (req->status) {
	case -ENOENT:
		if (req->actual_length == 0) {
			req = NULL;
			break;
		}
		printk(KERN_DEBUG "%s: Rx ENOENT", __func__);

	case 0:
		if (!svn->driver_info)
			flags |= SIPC4_RX_HDLC;
		if (req->actual_length < PAGE_SIZE)
			flags |= SIPC4_RX_LAST;

		rx_data.dev = dev;
		rx_data.skb = svn->devdata[dev_id].rx_skb;
		rx_data.page = page;
		rx_data.size = req->actual_length;
		rx_data.format = dev_id;
		rx_data.flags = flags;
		rx_data.rx_hdr = &svn->devdata[dev_id].rx_hdr;

		page = NULL;

		if (rx_debug) {
			char *buf = req->transfer_buffer;
			int i;

			printk(KERN_DEBUG "[RX] dev_id: %d, size: %d\n", dev_id,
					req->actual_length);
			for (i = 0; i < req->actual_length; i++)
				printk(KERN_DEBUG "%x ", *(buf + i));
		}

		if (dev_id == SIPC4_CMD)
			err = usbsvn_cmd_rx(&rx_data, svn);
		else
			err = sipc4_rx(&rx_data);
		if (err < 0) {
			svn->devdata[dev_id].rx_skb = NULL;
			break;
		}
		svn->devdata[dev_id].rx_skb = rx_data.skb;

		if (dev_id == SIPC4_RAW)
			wake_lock_timeout_data(svn);

		goto resubmit;

	case -ECONNRESET:
	case -ESHUTDOWN:
		if (!svn->suspended)
			printk(KERN_DEBUG "%s: RX complete Status(%d)\n",
				__func__, req->status);
		req = NULL;
		break;

	case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		dev_err(&dev->dev, "RX overflow\n");
		break;

	case -EILSEQ:
		dev->stats.rx_crc_errors++;
		break;
	}

	dev->stats.rx_errors++;

resubmit:
	kfree(svn_rx);

	if (page)
		netdev_free_page(dev, page);
	if (req && req->status != -ENOENT) {
		rx_submit(svn, dev_id, req, GFP_ATOMIC);
	}
}
Example #2
0
static void account_kernel_stack(struct thread_info *ti, int account)
{
	struct zone *zone = page_zone(virt_to_page(ti));

	mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
Example #3
0
	if (xdr_off < xdr->head[0].iov_len) {
		/* This offset is in the head */
		xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
		page = virt_to_page(xdr->head[0].iov_base);
	} else {
		xdr_off -= xdr->head[0].iov_len;
		if (xdr_off < xdr->page_len) {
			/* This offset is in the page list */
			page = xdr->pages[xdr_off >> PAGE_SHIFT];
			xdr_off &= ~PAGE_MASK;
		} else {
			/* This offset is in the tail */
			xdr_off -= xdr->page_len;
			xdr_off += (unsigned long)
				xdr->tail[0].iov_base & ~PAGE_MASK;
			page = virt_to_page(xdr->tail[0].iov_base);
		}
	}
	dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
				   min_t(size_t, PAGE_SIZE, len), dir);
	return dma_addr;
}

/* Assumptions:
 * - We are using FRMR
 *     - or -
 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
 */
static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
		      u32 rmr, u64 to,
		      u32 xdr_off, int write_len,
Example #4
0
static struct page *dummy_pcm_page(struct snd_pcm_substream *substream,
				   unsigned long offset)
{
	return virt_to_page(dummy_page[substream->stream]); /* the same page */
}
Example #5
0
static void ati_free_page_map(struct ati_page_map *page_map)
{
	unmap_page_from_agp(virt_to_page(page_map->real));
	set_memory_wb((unsigned long)page_map->real, 1);
	free_page((unsigned long) page_map->real);
}
Example #6
0
int request_buffer(struct prev_device *device, struct prev_reqbufs *reqbufs)
{
	struct prev_buffer *buffer = NULL;
	int count = 0;
	unsigned long adr;
	u32 size;

	dev_dbg(prev_dev, __FUNCTION__ "E\n");
	if (!reqbufs || !device) {
		dev_err(prev_dev, "request_buffer: error in argument\n");
		return -EINVAL;
	}

	/* if number of buffers requested is more then support return error */
	if (reqbufs->count > MAX_BUFFER) {
		dev_err(prev_dev, "request_buffer: invalid buffer count\n");
		return -EINVAL;
	}

	/* if buf_type is input then allocate buffers for input */
	if (reqbufs->buf_type == PREV_BUF_IN) {
		/*if buffer count is zero, free all the buffers */
		if (reqbufs->count == 0) {
			/* free all the buffers */
			for (count = 0; count < device->in_numbuffers; count++) {
				/* free memory allocate for the image */
				if (device->in_buff[count]) {
					adr =
					    (unsigned long)device->
					    in_buff[count]->offset;
					if (adr)
						prev_free_pages((unsigned long)
								phys_to_virt
								(adr),
								device->in_buff
								[count]->size);

					/* free the memory allocated
					   to prev_buffer */
					kfree(device->in_buff[count]);

					device->in_buff[count] = NULL;
				}
			}
			device->in_numbuffers = 0;
			return 0;
		}

		/* free the extra buffers */
		if (device->in_numbuffers > reqbufs->count &&
		    reqbufs->size == device->in_buff[0]->size) {
			for (count = reqbufs->count;
			     count < device->in_numbuffers; count++) {
				/* free memory allocate for the image */
				if (device->in_buff[count]) {
					adr = device->in_buff[count]->offset;
					if (adr)
						prev_free_pages((unsigned long)
								phys_to_virt
								(adr),
								device->in_buff
								[count]->size);

					/* free the memory allocated
					   to prev_buffer */
					kfree(device->in_buff[count]);

					device->in_buff[count] = NULL;
				}
			}
			device->in_numbuffers = reqbufs->count;
			return 0;
		}
		/* if size requested is different from already allocated,
		   free memory of all already allocated buffers */
		if (device->in_numbuffers) {
			if (reqbufs->size != device->in_buff[0]->size) {
				for (count = 0;
				     count < device->in_numbuffers; count++) {
					if (device->in_buff[count]) {
						adr =
						    device->
						    in_buff[count]->offset;
						if (adr)
							prev_free_pages((unsigned long)
									phys_to_virt
									(adr),
									device->
									in_buff
									[count]->
									size);

						kfree(device->in_buff[count]);

						device->in_buff[count] = NULL;
					}
				}
				device->in_numbuffers = 0;
			}
		}

		/* allocate the buffer */
		for (count = device->in_numbuffers; count < reqbufs->count;
		     count++) {
			/* Allocate memory for struct prev_buffer */
			buffer =
			    kmalloc(sizeof(struct prev_buffer), GFP_KERNEL);

			/* if memory allocation fails then return error */
			if (!buffer) {
				/* free all the buffers */
				while (--count >= device->in_numbuffers) {
					adr = device->in_buff[count]->offset;
					if (adr)
						prev_free_pages((unsigned long)
								phys_to_virt
								(adr),
								device->in_buff
								[count]->size);
					kfree(device->in_buff[count]);
					device->in_buff[count] = NULL;
				}
				dev_err(prev_dev, "request_buffer:not \
					enough memory\n");
				return -ENOMEM;
			}

			/* assign buffer's address in configuration */
			device->in_buff[count] = buffer;

			/* set buffers index and buf_type,size parameters */
			buffer->index = count;
			buffer->buf_type = PREV_BUF_IN;
			buffer->size = reqbufs->size;
			/* allocate memory for buffer of size passed
			   in reqbufs */
			buffer->offset =
			    (unsigned long)__get_free_pages(GFP_KERNEL |
							    GFP_DMA,
							    get_order
							    (reqbufs->size));

			/* if memory allocation fails, return error */
			if (!(buffer->offset)) {
				/* free all the buffer's space */
				kfree(buffer);
				device->in_buff[count] = NULL;
				while (--count >= device->in_numbuffers) {
					adr = device->in_buff[count]->offset;
					if (adr)
						prev_free_pages((unsigned long)
								phys_to_virt
								(adr),
								device->in_buff
								[count]->size);
					kfree(device->in_buff[count]);
					device->in_buff[count] = NULL;
				}
				dev_err(prev_dev, "request_buffer:not \
					enough memory\n");

				return -ENOMEM;
			}

			adr = (unsigned long)buffer->offset;
			size = PAGE_SIZE << (get_order(reqbufs->size));
			while (size > 0) {
				/* make sure the frame buffers
				   are never swapped out of memory */
				SetPageReserved(virt_to_page(adr));
				adr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			/* convert vertual address to physical */
			buffer->offset = (unsigned long)
			    virt_to_phys((void *)(buffer->offset));
		}
		device->in_numbuffers = reqbufs->count;
	}
Example #7
0
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern struct kernel_param __start___param[], __stop___param[];

	smp_setup_processor_id();

	/*
	 * Need to run as early as possible, to initialize the
	 * lockdep hash:
	 */
	lockdep_init();
	debug_objects_early_init();

	/*
	 * Set up the the initial canary ASAP:
	 */
	boot_init_stack_canary();

	cgroup_init_early();

	local_irq_disable();
	early_boot_irqs_off();
	early_init_irq_lock_class();

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	tick_init();
	boot_cpu_init();
	page_address_init();
	printk(KERN_NOTICE "%s", linux_banner);
	setup_arch(&command_line);
	mm_init_owner(&init_mm, &init_task);
	setup_command_line(command_line);
	setup_nr_cpu_ids();
	setup_per_cpu_areas();
	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */

	build_all_zonelists(NULL);
	page_alloc_init();

	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
	parse_early_param();
	parse_args("Booting kernel", static_command_line, __start___param,
		   __stop___param - __start___param,
		   &unknown_bootoption);
	/*
	 * These use large bootmem allocations and must precede
	 * kmem_cache_init()
	 */
	pidhash_init();
	vfs_caches_init_early();
	sort_main_extable();
	trap_init();
	mm_init();
	/*
	 * Set up the scheduler prior starting any interrupts (such as the
	 * timer interrupt). Full topology setup happens at smp_init()
	 * time - but meanwhile we still have a functioning scheduler.
	 */
	sched_init();
	/*
	 * Disable preemption - early bootup scheduling is extremely
	 * fragile until we cpu_idle() for the first time.
	 */
	preempt_disable();
	if (!irqs_disabled()) {
		printk(KERN_WARNING "start_kernel(): bug: interrupts were "
				"enabled *very* early, fixing it\n");
		local_irq_disable();
	}
	rcu_init();
	radix_tree_init();
	/* init some links before init_ISA_irqs() */
	early_irq_init();
	init_IRQ();
	prio_tree_init();
	init_timers();
	hrtimers_init();
	softirq_init();
	timekeeping_init();
	time_init();
	profile_init();
	if (!irqs_disabled())
		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
				 "enabled early\n");
	early_boot_irqs_on();
	local_irq_enable();

	/* Interrupts are enabled now so all GFP allocations are safe. */
	gfp_allowed_mask = __GFP_BITS_MASK;

	kmem_cache_init_late();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
	if (panic_later)
		panic(panic_later, panic_param);

	lockdep_info();

	/*
	 * Need to run this when irqs are enabled, because it wants
	 * to self-test [hard/soft]-irqs on/off lock inversion bugs
	 * too:
	 */
	locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",
		    page_to_pfn(virt_to_page((void *)initrd_start)),
		    min_low_pfn);
		initrd_start = 0;
	}
#endif
	page_cgroup_init();
	enable_debug_pagealloc();
	kmemtrace_init();
	kmemleak_init();
	debug_objects_mem_init();
	idr_init_cache();
	setup_per_cpu_pageset();
	numa_policy_init();
	if (late_time_init)
		late_time_init();
	sched_clock_init();
	calibrate_delay();
	pidmap_init();
	anon_vma_init();
#ifdef CONFIG_X86
	if (efi_enabled)
		efi_enter_virtual_mode();
#endif
	thread_info_cache_init();
	cred_init();
	fork_init(totalram_pages);
	proc_caches_init();
	buffer_init();
	key_init();
	security_init();
	dbg_late_init();
	vfs_caches_init(totalram_pages);
	signals_init();
	/* rootfs populating might need page-writeback */
	page_writeback_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	cgroup_init();
	cpuset_init();
	taskstats_init_early();
	delayacct_init();

	check_bugs();

	acpi_early_init(); /* before LAPIC and SMP init */
	sfi_init_late();

	ftrace_init();

	/* Do the rest non-__init'ed, we're now alive */
	rest_init();
}
Example #8
0
static int __init
dev_nvram_init(void)
{
	int order = 0, ret = 0;
	struct page *page, *end;
	unsigned int i;

	/* Allocate and reserve memory to mmap() */
	while ((PAGE_SIZE << order) < NVRAM_SPACE)
		order++;
	end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1);
	for (page = virt_to_page(nvram_buf); page <= end; page++)
		mem_map_reserve(page);

#ifdef CONFIG_MTD
	/* Find associated MTD device */
	for (i = 0; i < MAX_MTD_DEVICES; i++) {
		nvram_mtd = get_mtd_device(NULL, i);
		if (nvram_mtd) {
			if (!strcmp(nvram_mtd->name, "nvram") &&
			    nvram_mtd->size >= NVRAM_SPACE)
				break;
			put_mtd_device(nvram_mtd);
		}
	}
	if (i >= MAX_MTD_DEVICES)
		nvram_mtd = NULL;
#endif

	/* Initialize hash table lock */
	spin_lock_init(&nvram_lock);

	/* Initialize commit semaphore */
	init_MUTEX(&nvram_sem);

	/* Register char device */
	if ((nvram_major = devfs_register_chrdev(0, "nvram", &dev_nvram_fops)) < 0) {
		ret = nvram_major;
		goto err;
	}

	/* Initialize hash table */
	_nvram_init(sbh);

	/* Create /dev/nvram handle */
	nvram_handle = devfs_register(NULL, "nvram", DEVFS_FL_NONE, nvram_major, 0,
				      S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, &dev_nvram_fops, NULL);

	/* Set the SDRAM NCDL value into NVRAM if not already done */
	if (getintvar(NULL, "sdram_ncdl") == 0) {
		unsigned int ncdl;
		char buf[] = "0x00000000";

		if ((ncdl = sb_memc_get_ncdl(sbh))) {
			sprintf(buf, "0x%08x", ncdl);
			nvram_set("sdram_ncdl", buf);
			nvram_commit();
		}
	}

	return 0;

 err:
	dev_nvram_exit();
	return ret;
}
Example #9
0
/*
 * Send read data back to initiator.
 */
int ft_send_read_data(struct scst_cmd *cmd)
{
	struct ft_cmd *fcmd;
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	size_t remaining;
	u32 fh_off = 0;
	u32 frame_off;
	size_t frame_len = 0;
	size_t mem_len;
	u32 mem_off;
	size_t tlen;
	struct page *page;
	int use_sg;
	int error;
	void *to = NULL;
	u8 *from = NULL;
	int loop_limit = 10000;

	fcmd = scst_cmd_get_tgt_priv(cmd);
	ep = fc_seq_exch(fcmd->seq);
	lport = ep->lp;

	frame_off = fcmd->read_data_len;
	tlen = scst_cmd_get_resp_data_len(cmd);
	FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
		  ep->oid, ep->oxid, tlen, frame_off);
	if (tlen <= frame_off)
		return SCST_TGT_RES_SUCCESS;
	remaining = tlen - frame_off;
	if (remaining > UINT_MAX)
		FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
		       ep->oid, ep->oxid, tlen, frame_off);

	mem_len = scst_get_buf_first(cmd, &from);
	mem_off = 0;
	if (!mem_len) {
		FT_IO_DBG("mem_len 0\n");
		return SCST_TGT_RES_SUCCESS;
	}
	FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
		 ep->sid, ep->oxid, mem_len, frame_off, remaining);

	/*
	 * If we've already transferred some of the data, skip through
	 * the buffer over the data already sent and continue with the
	 * same sequence.  Otherwise, get a new sequence for the data.
	 */
	if (frame_off) {
		tlen = frame_off;
		while (mem_len <= tlen) {
			tlen -= mem_len;
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			if (!mem_len)
				return SCST_TGT_RES_SUCCESS;
		}
		mem_len -= tlen;
		mem_off = tlen;
	} else
		fcmd->seq = lport->tt.seq_start_next(fcmd->seq);

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4) && lport->sg_supp;

	while (remaining) {
		if (!loop_limit) {
			FT_ERR("hit loop limit.  remaining %zx mem_len %zx "
			       "frame_len %zx tlen %zx\n",
			       remaining, mem_len, frame_len, tlen);
			break;
		}
		loop_limit--;
		if (!mem_len) {
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			mem_off = 0;
			if (!mem_len) {
				FT_ERR("mem_len 0 from get_buf_next\n");
				break;
			}
		}
		if (!frame_len) {
			frame_len = fcmd->max_lso_payload;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp) {
				FT_IO_DBG("frame_alloc failed. "
					  "use_sg %d frame_len %zd\n",
					  use_sg, frame_len);
				break;
			}
			fr_max_payload(fp) = fcmd->max_payload;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
		}
		tlen = min(mem_len, frame_len);
		BUG_ON(!tlen);
		BUG_ON(tlen > remaining);
		BUG_ON(tlen > mem_len);
		BUG_ON(tlen > frame_len);

		if (use_sg) {
			page = virt_to_page(from + mem_off);
			get_page(page);
			tlen = min_t(size_t, tlen,
				     PAGE_SIZE - (mem_off & ~PAGE_MASK));
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, offset_in_page(from + mem_off),
					   tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
			frame_len -= tlen;
			if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
				frame_len = 0;
		} else {
			memcpy(to, from + mem_off, tlen);
			to += tlen;
			frame_len -= tlen;
		}

		mem_len -= tlen;
		mem_off += tlen;
		remaining -= tlen;
		frame_off += tlen;

		if (frame_len)
			continue;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP,
			       remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
			       (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
			       fh_off);
		error = lport->tt.seq_send(lport, fcmd->seq, fp);
		if (error) {
			WARN_ON(1);
			/* XXX For now, initiator will retry */
		} else
			fcmd->read_data_len = frame_off;
	}
	if (mem_len)
		scst_put_buf(cmd, from);
	if (remaining) {
		FT_IO_DBG("remaining read data %zd\n", remaining);
		return SCST_TGT_RES_QUEUE_FULL;
	}
	return SCST_TGT_RES_SUCCESS;
}
Example #10
0
void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void *) CKSEG1ADDR(phys_addr);

#ifdef CONFIG_DISCONTIGMEM
#if defined ( CONFIG_MIPS_BCM97438 )
       if (IS_PA_UPPER_RAM(phys_addr) && flags == _CACHE_UNCACHED) {
               printk(KERN_ERR "Upper DDR at %08lx cannot be mapped uncached\n", phys_addr);
               return NULL;
       }
#elif defined ( CONFIG_MIPS_BCM7440 )
        if (IS_PA_UPPER_RAM(phys_addr) && (flags == _CACHE_UNCACHED)) {
               printk(KERN_ERR "Upper/High DDR at %08lx cannot be mapped uncached\n", phys_addr);
               return NULL;
       }

#endif

#endif

#ifndef CONFIG_DISCONTIGMEM
  #ifdef CONFIG_MIPS_BRCM97XXX

  #if defined( CONFIG_MIPS_BCM7038A0 )
	if (((phys_addr >= 0xd0000000) && (phys_addr <= 0xe060000b)))
		
  #elif defined( CONFIG_MIPS_BCM7038B0 ) || defined( CONFIG_MIPS_BCM7038C0 ) \
  	|| defined( CONFIG_MIPS_BCM7400 ) 
	if (((phys_addr >= 0xd0000000) && (phys_addr <= 0xf060000b)))
		
  #elif defined( CONFIG_MIPS_BCM3560 ) \
  	|| defined( CONFIG_MIPS_BCM7401 ) || defined( CONFIG_MIPS_BCM7402 ) \
	|| defined( CONFIG_MIPS_BCM7118 ) || defined( CONFIG_MIPS_BCM7403 ) \
	|| defined( CONFIG_MIPS_BCM7452 )
  	if (((((unsigned long) (phys_addr)) >= 0xd0000000) && (((unsigned long) (phys_addr)) <= 0xf060000b)) ||
		(((unsigned long) (phys_addr)) >= 0xff400000))
		
  #else
	if (phys_addr >= 0xffe00000)
  #endif
  
    	return (void *) (phys_addr);
  #endif
#else
  /* 97438 Discontiguous memory model */
  #if defined ( CONFIG_MIPS_BCM97438 )
        if (((phys_addr >= 0xd0000000) && (phys_addr < 0xe0000000)) ||
               ((phys_addr >= 0xf0000000) && (phys_addr <= 0xf060000b)))
                        return (void *) (phys_addr);

       /* else upper ram area is handled just like lower ram, handled below */
  #elif defined ( CONFIG_MIPS_BCM7440 )
        if ((phys_addr >= 0xd0000000) && (phys_addr < 0xd8000000))
                /* 128 MB of PCI-MEM */
                return (void *) (phys_addr);
        if ((phys_addr >= 0xf0000000) && (phys_addr < 0xf2000000))
                /* 32 MB of PCI-IO */
                return (void *) (0xf8000000 + (phys_addr - 0xf0000000));

  #else
       #error "Unsupported discontigmem platform"
  #endif

#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void *) (offset + (char *)addr);
}
Example #11
0
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct packet_opt *po;
	struct sockaddr_ll *sll;
	struct tpacket_hdr *h;
	u8 * skb_head = skb->data;
	int skb_len = skb->len;
	unsigned snaplen;
	unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
	unsigned short macoff, netoff;
	struct sk_buff *copy_skb = NULL;

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto drop;

	sk = (struct sock *) pt->data;
	po = sk->protinfo.af_packet;

	if (dev->hard_header) {
		if (sk->type != SOCK_DGRAM)
			skb_push(skb, skb->data - skb->mac.raw);
		else if (skb->pkt_type == PACKET_OUTGOING) {
			/* Special case: outgoing packets have ll header at head */
			skb_pull(skb, skb->nh.raw - skb->data);
			if (skb->ip_summed == CHECKSUM_HW)
				status |= TP_STATUS_CSUMNOTREADY;
		}
	}

	snaplen = skb->len;

#ifdef CONFIG_FILTER
	if (sk->filter) {
		unsigned res = snaplen;
		struct sk_filter *filter;

		bh_lock_sock(sk);
		if ((filter = sk->filter) != NULL)
			res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
		bh_unlock_sock(sk);

		if (res == 0)
			goto drop_n_restore;
		if (snaplen > res)
			snaplen = res;
	}
#endif

	if (sk->type == SOCK_DGRAM) {
		macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
	} else {
		unsigned maclen = skb->nh.raw - skb->data;
		netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
		macoff = netoff - maclen;
	}

	if (macoff + snaplen > po->frame_size) {
		if (po->copy_thresh &&
		    atomic_read(&sk->rmem_alloc) + skb->truesize < (unsigned)sk->rcvbuf) {
			if (skb_shared(skb)) {
				copy_skb = skb_clone(skb, GFP_ATOMIC);
			} else {
				copy_skb = skb_get(skb);
				skb_head = skb->data;
			}
			if (copy_skb)
				skb_set_owner_r(copy_skb, sk);
		}
		snaplen = po->frame_size - macoff;
		if ((int)snaplen < 0)
			snaplen = 0;
	}
	if (snaplen > skb->len-skb->data_len)
		snaplen = skb->len-skb->data_len;

	spin_lock(&sk->receive_queue.lock);
	h = po->iovec[po->head];

	if (h->tp_status)
		goto ring_is_full;
	po->head = po->head != po->iovmax ? po->head+1 : 0;
	po->stats.tp_packets++;
	if (copy_skb) {
		status |= TP_STATUS_COPY;
		__skb_queue_tail(&sk->receive_queue, copy_skb);
	}
	if (!po->stats.tp_drops)
		status &= ~TP_STATUS_LOSING;
	spin_unlock(&sk->receive_queue.lock);

	memcpy((u8*)h + macoff, skb->data, snaplen);

	h->tp_len = skb->len;
	h->tp_snaplen = snaplen;
	h->tp_mac = macoff;
	h->tp_net = netoff;
	h->tp_sec = skb->stamp.tv_sec;
	h->tp_usec = skb->stamp.tv_usec;

	sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
	sll->sll_halen = 0;
	if (dev->hard_header_parse)
		sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
	sll->sll_family = AF_PACKET;
	sll->sll_hatype = dev->type;
	sll->sll_protocol = skb->protocol;
	sll->sll_pkttype = skb->pkt_type;
	sll->sll_ifindex = dev->ifindex;

	h->tp_status = status;
	mb();

	{
		struct page *p_start, *p_end;
		u8 *h_end = (u8 *)h + macoff + snaplen - 1;

		p_start = virt_to_page(h);
		p_end = virt_to_page(h_end);
		while (p_start <= p_end) {
			flush_dcache_page(p_start);
			p_start++;
		}
	}

	sk->data_ready(sk, 0);

drop_n_restore:
	if (skb_head != skb->data && skb_shared(skb)) {
		skb->data = skb_head;
		skb->len = skb_len;
	}
drop:
        kfree_skb(skb);
	return 0;

ring_is_full:
	po->stats.tp_drops++;
	spin_unlock(&sk->receive_queue.lock);

	sk->data_ready(sk, 0);
	if (copy_skb)
		kfree_skb(copy_skb);
	goto drop_n_restore;
}
Example #12
0
static int
dev_nvram_init(void)
{
	int order = 0, ret = 0;
	struct page *page, *end;
	osl_t *osh;
#if defined(CONFIG_MTD) || defined(CONFIG_MTD_MODULE)
	unsigned int i;
#endif

	/* Allocate and reserve memory to mmap() */
	while ((PAGE_SIZE << order) < nvram_space)
		order++;
	end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1);
	for (page = virt_to_page(nvram_buf); page <= end; page++) {
		SetPageReserved(page);
	}

#if defined(CONFIG_MTD) || defined(CONFIG_MTD_MODULE)
	/* Find associated MTD device */
	for (i = 0; i < MAX_MTD_DEVICES; i++) {
		nvram_mtd = get_mtd_device(NULL, i);
		if (!IS_ERR(nvram_mtd)) {
			if (!strcmp(nvram_mtd->name, "nvram") &&
			    nvram_mtd->size >= nvram_space) {
				break;
			}
			put_mtd_device(nvram_mtd);
		}
	}
	if (i >= MAX_MTD_DEVICES)
		nvram_mtd = NULL;
#endif

	/* Initialize hash table lock */
	spin_lock_init(&nvram_lock);

	/* Initialize commit semaphore */
	init_MUTEX(&nvram_sem);

	/* Register char device */
	if ((nvram_major = register_chrdev(0, "nvram", &dev_nvram_fops)) < 0) {
		ret = nvram_major;
		goto err;
	}

	if (si_osh(sih) == NULL) {
		osh = osl_attach(NULL, SI_BUS, FALSE);
		if (osh == NULL) {
			printk("Error allocating osh\n");
			unregister_chrdev(nvram_major, "nvram");
			goto err;
		}
		si_setosh(sih, osh);
	}

	/* Initialize hash table */
	_nvram_init(sih);

	/* Create /dev/nvram handle */
	nvram_class = class_create(THIS_MODULE, "nvram");
	if (IS_ERR(nvram_class)) {
		printk("Error creating nvram class\n");
		goto err;
	}

	/* Add the device nvram0 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
	class_device_create(nvram_class, NULL, MKDEV(nvram_major, 0), NULL, "nvram");
#else /* Linux 2.6.36 and above */
	device_create(nvram_class, NULL, MKDEV(nvram_major, 0), NULL, "nvram");
#endif	/* Linux 2.6.36 */

	return 0;

err:
	dev_nvram_exit();
	return ret;
}
static struct page *mtk_i2s0_pcm_page(struct snd_pcm_substream *substream,
                                      unsigned long offset)
{
    printk("%s \n", __func__);
    return virt_to_page(dummy_page[substream->stream]); /* the same page */
}
Example #14
0
static bool xen_page_pinned(void *ptr)
{
	struct page *page = virt_to_page(ptr);

	return PagePinned(page);
}
Example #15
0
/**
 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 * capable IOCB types.
 *
 * @sp: SRB command to process
 * @cmd_pkt: Command type 3 IOCB
 * @tot_dsds: Total number of segments to transfer
 */
void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
    uint16_t tot_dsds)
{
	uint16_t	avail_dsds;
	uint32_t	*cur_dsd;
	scsi_qla_host_t	*ha;
	struct scsi_cmnd *cmd;

	cmd = sp->cmd;

	/* Update entry type to indicate Command Type 3 IOCB */
	*((uint32_t *)(&cmd_pkt->entry_type)) =
	    __constant_cpu_to_le32(COMMAND_A64_TYPE);

	/* No data transfer */
	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
		return;
	}

	ha = sp->ha;

	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));

	/* Two DSDs are available in the Command Type 3 IOCB */
	avail_dsds = 2;
	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;

	/* Load data segments */
	if (cmd->use_sg != 0) {
		struct	scatterlist *cur_seg;
		struct	scatterlist *end_seg;

		cur_seg = (struct scatterlist *)cmd->request_buffer;
		end_seg = cur_seg + tot_dsds;
		while (cur_seg < end_seg) {
			dma_addr_t	sle_dma;
			cont_a64_entry_t *cont_pkt;

			/* Allocate additional continuation packets? */
			if (avail_dsds == 0) {
				/*
				 * Five DSDs are available in the Continuation
				 * Type 1 IOCB.
				 */
				cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
				cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
				avail_dsds = 5;
			}

			sle_dma = sg_dma_address(cur_seg);
			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
			avail_dsds--;

			cur_seg++;
		}
	} else {
		dma_addr_t	req_dma;
		struct page	*page;
		unsigned long	offset;

		page = virt_to_page(cmd->request_buffer);
		offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
		req_dma = pci_map_page(ha->pdev, page, offset,
		    cmd->request_bufflen, cmd->sc_data_direction);

		sp->dma_handle = req_dma;

		*cur_dsd++ = cpu_to_le32(LSD(req_dma));
		*cur_dsd++ = cpu_to_le32(MSD(req_dma));
		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
	}
}
Example #16
0
/*============================================================================
 * Do window creation here
 */
static int __create_slave_window(vme_slave_handle_t handle,
				 struct __vme_slave_window *window, int num,
				 uint32_t ctl, uint32_t vme_addr, size_t size,
				 void *phys_addr)
{
	uint32_t base, bound, to, off;
#ifndef ARCH
	struct page *page;
#endif
	int resolution;

	/* Windows 0 and 4 have a 4kb resolution, others have
	   64kb resolution
	 */
	resolution = (num % 4) ? 0x10000 : 0x1000;
	off = vme_addr % resolution;
	vme_addr -= off;
	size += off;
	size += (size % resolution) ? resolution - (size % resolution) : 0;

	/* If we're given the physical address, then use it,
	   otherwise, let the kernel allocate the memory
	   wherever it wants to.
	 */
	if (phys_addr) {
		phys_addr -= off;
		if ((uint32_t) phys_addr % resolution) {
			write_unlock(&slave_rwlock);
			printk(KERN_ERR "VME: Invalid physical address for "
			       "slave window %d\n", num);
			return -EINVAL;
		}
	} else {
		window->vptr = pci_alloc_consistent(universe_pci_dev, size,
						    &window->resource);
		if (NULL == window->vptr) {
			window->resource = 0;
			window->vptr = NULL;
			write_unlock(&slave_rwlock);
			printk(KERN_ERR "VME: Failed to allocate memory for "
			       "slave window %d\n", num);
			return -ENOMEM;
		}
#ifdef ARCH
	    memset(window->vptr, 0, size);
	}
#else
		/* The memory manager wants to remove the
		   allocated pages from main memory.  We don't
		   want that because the user ends up seeing
		   all zero's so we set the PG_RESERVED bit
		   on each page.
		 */
		for (page = virt_to_page(window->vptr);
		     page < virt_to_page(window->vptr + size); ++page)
		{		     
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,68)
			mem_map_reserve(page);
#else			
			SetPageReserved(page);
#endif			
		}

		phys_addr = (void *) virt_to_phys(window->vptr);
#endif
		base = vme_addr;
		bound = base + size;
#ifdef ARCH
	to = (uint32_t) window->resource - base;
	window->phys_base = (uint32_t) window->vptr;
#else
		to = (uint32_t) phys_addr - base;
	}
Example #17
0
static struct sk_buff * ata(struct aoedev *d, struct sk_buff *skb)
{
	struct aoe_hdr *aoe;
	struct aoe_atahdr *ata;
	struct aoereq *rq, *e;
	struct bio *bio;
	sector_t lba;
	int len, rw;
	struct page *page;
	ulong bcnt, offset;

	aoe = (struct aoe_hdr *) skb_mac_header(skb);
	ata = (struct aoe_atahdr *) aoe->data;
	lba = readlba(ata->lba);
	len = sizeof *aoe + sizeof *ata;
	switch (ata->cmdstat) {
	do {
	case ATA_CMD_PIO_READ:
		lba &= ATA_LBA28MAX;
	case ATA_CMD_PIO_READ_EXT:
		lba &= 0x0000FFFFFFFFFFFFULL;
		rw = READ;
		break;
	case ATA_CMD_PIO_WRITE:
		lba &= ATA_LBA28MAX;
	case ATA_CMD_PIO_WRITE_EXT:
		lba &= 0x0000FFFFFFFFFFFFULL;
		rw = WRITE;
	} while (0);
		if ((lba + ata->scnt) > d->scnt) {
			printk(KERN_ERR "sector I/O is out of range: %Lu (%d), max %Lu\n",
				(long long) lba, ata->scnt, d->scnt);
			ata->cmdstat = ATA_ERR;
			ata->errfeat = ATA_IDNF;
			break;
		}
		rq = d->reqs;
		e = rq + nelem(d->reqs);
		for (; rq<e; rq++)
			if (rq->skb == NULL)
				break;
		if (rq == e)
			goto drop;
		
		bio = bio_alloc(GFP_ATOMIC, 1);
		if (bio == NULL) {
			eprintk("can't alloc bio\n");
			goto drop;
		}
		rq->bio = bio;
		rq->d = d;

		bio->bi_sector = lba;
		bio->bi_bdev = d->blkdev;
		bio->bi_end_io = ata_io_complete;
		bio->bi_private = rq;

		page = virt_to_page(ata->data);
		bcnt = ata->scnt << 9;
		offset = offset_in_page(ata->data);

		if (bio_add_page(bio, page, bcnt, offset) < bcnt) {
			printk(KERN_ERR "Can't bio_add_page for %d sectors\n", ata->scnt);
			bio_put(bio);
			goto drop;
		}

		rq->skb = skb;
		atomic_inc(&d->busy);
		submit_bio(rw, bio);
		return NULL;
	default:
		printk(KERN_ERR "Unknown ATA command 0x%02X\n", ata->cmdstat);
		ata->cmdstat = ATA_ERR;
		ata->errfeat = ATA_ABORTED;
		break;
	case ATA_CMD_ID_ATA:
		len += ata_identify(d, ata);
	case ATA_CMD_FLUSH:
		ata->cmdstat = ATA_DRDY;
		ata->errfeat = 0;
		break;
	}
	skb_trim(skb, len);
	return skb;
drop:
	dev_kfree_skb(skb);
	return NULL;
}
/* Resets the struct page fields and frees the page */
static void free_zbud_page(struct zbud_header *zhdr)
{
	__free_page(virt_to_page(zhdr));
}
Example #19
0
static void rx_complete(struct urb *req)
{
	struct net_device *dev = req->context;
	struct usbpn_dev *pnd = netdev_priv(dev);
	struct page *page = virt_to_page(req->transfer_buffer);
	struct sk_buff *skb;
	unsigned long flags;

	switch (req->status) {
	case 0:
		spin_lock_irqsave(&pnd->rx_lock, flags);
		skb = pnd->rx_skb;
		if (!skb) {
			skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
			if (likely(skb)) {
				/* Can't use pskb_pull() on page in IRQ */
				memcpy(skb_put(skb, 1), page_address(page), 1);
				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
						page, 1, req->actual_length);
				page = NULL;
			}
		} else {
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
					page, 0, req->actual_length);
			page = NULL;
		}
		if (req->actual_length < PAGE_SIZE)
			pnd->rx_skb = NULL; /* Last fragment */
		else
			skb = NULL;
		spin_unlock_irqrestore(&pnd->rx_lock, flags);
		if (skb) {
			skb->protocol = htons(ETH_P_PHONET);
			skb_reset_mac_header(skb);
			__skb_pull(skb, 1);
			skb->dev = dev;
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += skb->len;

			netif_rx(skb);
		}
		goto resubmit;

	case -ENOENT:
	case -ECONNRESET:
	case -ESHUTDOWN:
		req = NULL;
		break;

	case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		dev_dbg(&dev->dev, "RX overflow\n");
		break;

	case -EILSEQ:
		dev->stats.rx_crc_errors++;
		break;
	}

	dev->stats.rx_errors++;
resubmit:
	if (page)
		netdev_free_page(dev, page);
	if (req)
		rx_submit(pnd, req, GFP_ATOMIC);
}
Example #20
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
#if !(USE_HPPA_IOREMAP)

	unsigned long end = phys_addr + size - 1;
	/* Support EISA addresses */
	if ((phys_addr >= 0x00080000 && end < 0x000fffff)
			|| (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
		phys_addr |= 0xfc000000;
	}

#ifdef CONFIG_DEBUG_IOREMAP
	return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
#else
	return (void __iomem *)phys_addr;
#endif

#else
	void *addr;
	struct vm_struct *area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
	   
		for (page = virt_to_page(t_addr); 
		     page <= virt_to_page(t_end); page++) {
			if(!PageReserved(page))
				return NULL;
		}
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;

	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
#endif
}
Example #21
0
int arch_domain_create(struct domain *d, unsigned int domcr_flags,
                       struct xen_arch_domainconfig *config)
{
    int rc;

    d->arch.relmem = RELMEM_not_started;

    /* Idle domains do not need this setup */
    if ( is_idle_domain(d) )
        return 0;

    ASSERT(config != NULL);
    if ( (rc = p2m_init(d)) != 0 )
        goto fail;

    rc = -ENOMEM;
    if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
        goto fail;

    /* Default the virtual ID to match the physical */
    d->arch.vpidr = boot_cpu_data.midr.bits;

    clear_page(d->shared_info);
    share_xen_page_with_guest(
        virt_to_page(d->shared_info), d, XENSHARE_writable);

    if ( (rc = domain_io_init(d)) != 0 )
        goto fail;

    if ( (rc = p2m_alloc_table(d)) != 0 )
        goto fail;

    switch ( config->gic_version )
    {
    case XEN_DOMCTL_CONFIG_GIC_NATIVE:
        switch ( gic_hw_version () )
        {
        case GIC_V2:
            config->gic_version = XEN_DOMCTL_CONFIG_GIC_V2;
            d->arch.vgic.version = GIC_V2;
            break;

        case GIC_V3:
            config->gic_version = XEN_DOMCTL_CONFIG_GIC_V3;
            d->arch.vgic.version = GIC_V3;
            break;

        default:
            BUG();
        }
        break;

    case XEN_DOMCTL_CONFIG_GIC_V2:
        d->arch.vgic.version = GIC_V2;
        break;

    case XEN_DOMCTL_CONFIG_GIC_V3:
        d->arch.vgic.version = GIC_V3;
        break;

    default:
        rc = -EOPNOTSUPP;
        goto fail;
    }

    if ( (rc = domain_vgic_init(d, config->nr_spis)) != 0 )
        goto fail;

    if ( (rc = domain_vtimer_init(d, config)) != 0 )
        goto fail;

    /*
     * The hardware domain will get a PPI later in
     * arch/arm/domain_build.c  depending on the
     * interrupt map of the hardware.
     */
    if ( !is_hardware_domain(d) )
    {
        d->arch.evtchn_irq = GUEST_EVTCHN_PPI;
        /* At this stage vgic_reserve_virq should never fail */
        if ( !vgic_reserve_virq(d, GUEST_EVTCHN_PPI) )
            BUG();
    }

    /*
     * Virtual UART is only used by linux early printk and decompress code.
     * Only use it for the hardware domain because the linux kernel may not
     * support multi-platform.
     */
    if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
        goto fail;

    if ( (rc = iommu_domain_init(d)) != 0 )
        goto fail;

    return 0;

fail:
    d->is_dying = DOMDYING_dead;
    arch_domain_destroy(d);

    return rc;
}
Example #22
0
static bool page_empty(void *ptr)
{
	struct page *ptr_page = virt_to_page(ptr);
	return page_count(ptr_page) == 1;
}
Example #23
0
static int us122l_create_usbmidi(struct snd_card *card)
{
	static struct snd_usb_midi_endpoint_info quirk_data = {
		.out_ep = 4,
		.in_ep = 3,
		.out_cables =	0x001,
		.in_cables =	0x001
	};
	static struct snd_usb_audio_quirk quirk = {
		.vendor_name =	"US122L",
		.product_name =	NAME_ALLCAPS,
		.ifnum = 	1,
		.type = QUIRK_MIDI_US122L,
		.data = &quirk_data
	};
	struct usb_device *dev = US122L(card)->dev;
	struct usb_interface *iface = usb_ifnum_to_if(dev, 1);

	return snd_usbmidi_create(card, iface,
				  &US122L(card)->midi_list, &quirk);
}

static int us144_create_usbmidi(struct snd_card *card)
{
	static struct snd_usb_midi_endpoint_info quirk_data = {
		.out_ep = 4,
		.in_ep = 3,
		.out_cables =	0x001,
		.in_cables =	0x001
	};
	static struct snd_usb_audio_quirk quirk = {
		.vendor_name =	"US144",
		.product_name =	NAME_ALLCAPS,
		.ifnum = 	0,
		.type = QUIRK_MIDI_US122L,
		.data = &quirk_data
	};
	struct usb_device *dev = US122L(card)->dev;
	struct usb_interface *iface = usb_ifnum_to_if(dev, 0);

	return snd_usbmidi_create(card, iface,
				  &US122L(card)->midi_list, &quirk);
}

/*
 * Wrapper for usb_control_msg().
 * Allocates a temp buffer to prevent dmaing from/to the stack.
 */
static int us122l_ctl_msg(struct usb_device *dev, unsigned int pipe,
			  __u8 request, __u8 requesttype,
			  __u16 value, __u16 index, void *data,
			  __u16 size, int timeout)
{
	int err;
	void *buf = NULL;

	if (size > 0) {
		buf = kmemdup(data, size, GFP_KERNEL);
		if (!buf)
			return -ENOMEM;
	}
	err = usb_control_msg(dev, pipe, request, requesttype,
			      value, index, buf, size, timeout);
	if (size > 0) {
		memcpy(data, buf, size);
		kfree(buf);
	}
	return err;
}

static void pt_info_set(struct usb_device *dev, u8 v)
{
	int ret;

	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
			      'I',
			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
			      v, 0, NULL, 0, 1000);
	snd_printdd(KERN_DEBUG "%i\n", ret);
}

static void usb_stream_hwdep_vm_open(struct vm_area_struct *area)
{
	struct us122l *us122l = area->vm_private_data;
	atomic_inc(&us122l->mmap_count);
	snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
}

static int usb_stream_hwdep_vm_fault(struct vm_area_struct *area,
				     struct vm_fault *vmf)
{
	unsigned long offset;
	struct page *page;
	void *vaddr;
	struct us122l *us122l = area->vm_private_data;
	struct usb_stream *s;

	mutex_lock(&us122l->mutex);
	s = us122l->sk.s;
	if (!s)
		goto unlock;

	offset = vmf->pgoff << PAGE_SHIFT;
	if (offset < PAGE_ALIGN(s->read_size))
		vaddr = (char *)s + offset;
	else {
		offset -= PAGE_ALIGN(s->read_size);
		if (offset >= PAGE_ALIGN(s->write_size))
			goto unlock;

		vaddr = us122l->sk.write_page + offset;
	}
	page = virt_to_page(vaddr);

	get_page(page);
	mutex_unlock(&us122l->mutex);

	vmf->page = page;

	return 0;
unlock:
	mutex_unlock(&us122l->mutex);
	return VM_FAULT_SIGBUS;
}

static void usb_stream_hwdep_vm_close(struct vm_area_struct *area)
{
	struct us122l *us122l = area->vm_private_data;
	atomic_dec(&us122l->mmap_count);
	snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
}

static const struct vm_operations_struct usb_stream_hwdep_vm_ops = {
	.open = usb_stream_hwdep_vm_open,
	.fault = usb_stream_hwdep_vm_fault,
	.close = usb_stream_hwdep_vm_close,
};


static int usb_stream_hwdep_open(struct snd_hwdep *hw, struct file *file)
{
	struct us122l	*us122l = hw->private_data;
	struct usb_interface *iface;
	snd_printdd(KERN_DEBUG "%p %p\n", hw, file);
	if (hw->used >= 2)
		return -EBUSY;

	if (!us122l->first)
		us122l->first = file;

	if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
	    us122l->dev->descriptor.idProduct == USB_ID_US144MKII) {
		iface = usb_ifnum_to_if(us122l->dev, 0);
		usb_autopm_get_interface(iface);
	}
	iface = usb_ifnum_to_if(us122l->dev, 1);
	usb_autopm_get_interface(iface);
	return 0;
}

static int usb_stream_hwdep_release(struct snd_hwdep *hw, struct file *file)
{
	struct us122l	*us122l = hw->private_data;
	struct usb_interface *iface;
	snd_printdd(KERN_DEBUG "%p %p\n", hw, file);

	if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
	    us122l->dev->descriptor.idProduct == USB_ID_US144MKII) {
		iface = usb_ifnum_to_if(us122l->dev, 0);
		usb_autopm_put_interface(iface);
	}
	iface = usb_ifnum_to_if(us122l->dev, 1);
	usb_autopm_put_interface(iface);
	if (us122l->first == file)
		us122l->first = NULL;
	mutex_lock(&us122l->mutex);
	if (us122l->master == file)
		us122l->master = us122l->slave;

	us122l->slave = NULL;
	mutex_unlock(&us122l->mutex);
	return 0;
}

static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
				 struct file *filp, struct vm_area_struct *area)
{
	unsigned long	size = area->vm_end - area->vm_start;
	struct us122l	*us122l = hw->private_data;
	unsigned long offset;
	struct usb_stream *s;
	int err = 0;
	bool read;

	offset = area->vm_pgoff << PAGE_SHIFT;
	mutex_lock(&us122l->mutex);
	s = us122l->sk.s;
	read = offset < s->read_size;
	if (read && area->vm_flags & VM_WRITE) {
		err = -EPERM;
		goto out;
	}
	snd_printdd(KERN_DEBUG "%lu %u\n", size,
		    read ? s->read_size : s->write_size);
	/* if userspace tries to mmap beyond end of our buffer, fail */
	if (size > PAGE_ALIGN(read ? s->read_size : s->write_size)) {
		snd_printk(KERN_WARNING "%lu > %u\n", size,
			   read ? s->read_size : s->write_size);
		err = -EINVAL;
		goto out;
	}

	area->vm_ops = &usb_stream_hwdep_vm_ops;
	area->vm_flags |= VM_RESERVED;
	area->vm_private_data = us122l;
	atomic_inc(&us122l->mmap_count);
out:
	mutex_unlock(&us122l->mutex);
	return err;
}
Example #24
0
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
	struct prism2_wep_data *wep = priv;
	u32 klen, len;
	u8 key[WEP_KEY_LEN + 3];
	u8 *pos;
	cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
	#if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED))
	struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
	#endif
	u32 crc;
	u8 *icv;
	struct scatterlist sg;
	if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
	    skb->len < hdr_len)
		return -1;

	len = skb->len - hdr_len;
	pos = skb_push(skb, 4);
	memmove(pos, pos + 4, hdr_len);
	pos += hdr_len;

	klen = 3 + wep->key_len;

	wep->iv++;

	
	if ((wep->iv & 0xff00) == 0xff00) {
		u8 B = (wep->iv >> 16) & 0xff;
		if (B >= 3 && B < klen)
			wep->iv += 0x0100;
	}

	
	*pos++ = key[0] = (wep->iv >> 16) & 0xff;
	*pos++ = key[1] = (wep->iv >> 8) & 0xff;
	*pos++ = key[2] = wep->iv & 0xff;
	*pos++ = wep->key_idx << 6;

	
	memcpy(key + 3, wep->key, wep->key_len);

	if (!tcb_desc->bHwSec)
	{

		
	#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
		crc = ~crc32_le(~0, pos, len);
	#else
		crc = ~ether_crc_le(len, pos);
	#endif
		icv = skb_put(skb, 4);
		icv[0] = crc;
		icv[1] = crc >> 8;
		icv[2] = crc >> 16;
		icv[3] = crc >> 24;

#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
		crypto_cipher_setkey(wep->tfm, key, klen);
		sg.page = virt_to_page(pos);
		sg.offset = offset_in_page(pos);
		sg.length = len + 4;
		crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
		return 0;
	#else
		crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
	#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
		sg.page = virt_to_page(pos);
		sg.offset = offset_in_page(pos);
		sg.length = len + 4;
	#else
		sg_init_one(&sg, pos, len+4);
	#endif
		return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
	#endif
	}
static struct page *mtk_bt_dai_capture_pcm_page(struct snd_pcm_substream *substream,
                                                unsigned long offset)
{
    printk("dummy_pcm_page \n");
    return virt_to_page(dummy_page[substream->stream]); /* the same page */
}
Example #26
0
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
			   dma_addr_t *dma_handle)
{
	void *memory;
	int gfp = GFP_ATOMIC;
	int i;
	unsigned long iommu_page;

	if (hwdev == NULL || hwdev->dma_mask < 0xffffffff || (no_iommu && !swiotlb))
		gfp |= GFP_DMA;

	/* 
	 * First try to allocate continuous and use directly if already 
	 * in lowmem. 
	 */ 
	size = round_up(size, PAGE_SIZE); 
	memory = (void *)__get_free_pages(gfp, get_order(size));
	if (memory == NULL) {
		return NULL; 
	} else {
		int high = 0, mmu;
		if (((unsigned long)virt_to_bus(memory) + size) > 0xffffffffUL)
			high = 1;
		mmu = high;
		if (force_mmu && !(gfp & GFP_DMA)) 
			mmu = 1;
		if (no_iommu) { 
#ifdef CONFIG_SWIOTLB
			if (swiotlb && high && hwdev) {
				unsigned long dma_mask = 0;
				if (hwdev->dma_mask == ~0UL) {
					hwdev->dma_mask = 0xffffffff;
					dma_mask = ~0UL;
				}
				*dma_handle = swiotlb_map_single(hwdev, memory, size,
						   		 PCI_DMA_FROMDEVICE);
				if (dma_mask)
					hwdev->dma_mask = dma_mask;
				memset(phys_to_virt(*dma_handle), 0, size); 
				free_pages((unsigned long)memory, get_order(size));
				return phys_to_virt(*dma_handle);
			}
#endif
			if (high) goto error;
			mmu = 0; 
		} 	
		memset(memory, 0, size); 
		if (!mmu) { 
			*dma_handle = virt_to_bus(memory);
			return memory;
		}
	} 

	size >>= PAGE_SHIFT;

	iommu_page = alloc_iommu(size);
	if (iommu_page == -1)
		goto error; 

   	/* Fill in the GATT, allocating pages as needed. */
	for (i = 0; i < size; i++) { 
		unsigned long phys_mem; 
		void *mem = memory + i*PAGE_SIZE;
		if (i > 0) 
			atomic_inc(&virt_to_page(mem)->count); 
		phys_mem = virt_to_phys(mem); 
		BUG_ON(phys_mem & ~PHYSICAL_PAGE_MASK); 
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); 
	} 

	flush_gart();
	*dma_handle = iommu_bus_base + (iommu_page << PAGE_SHIFT);
	return memory; 
	
 error:
	free_pages((unsigned long)memory, get_order(size)); 
	return NULL; 
}
Example #27
0
unsigned long __init prom_free_prom_memory(void)
{
    unsigned long freed = 0;
    unsigned long addr;
    int i;
#ifdef	CONFIG_REALTEK_RECLAIM_BOOT_MEM
    unsigned long dest;
    struct page *page;
    int count;
#endif

    for (i = 0; i < boot_mem_map.nr_map; i++) {
        if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
            continue;

        addr = boot_mem_map.map[i].addr;
        while (addr < boot_mem_map.map[i].addr
                + boot_mem_map.map[i].size) {
            ClearPageReserved(virt_to_page(__va(addr)));
            set_page_count(virt_to_page(__va(addr)), 1);
            free_page((unsigned long)__va(addr));
            addr += PAGE_SIZE;
            freed += PAGE_SIZE;
        }
    }
    printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
#ifdef	CONFIG_REALTEK_RECLAIM_BOOT_MEM
    if (!is_mars_cpu()) {
        // venus or neptune
        addr = F_ADDR1;
        if (debug_flag)
            dest = T_ADDR1;
        else
            dest = T_ADDR2;
    } else {
        // mars
        addr = F_ADDR2;
        if (debug_flag)
            dest = T_ADDR1;
        else
            dest = T_ADDR3;
    }
    printk("Reclaim bootloader memory from %x to %x\n", addr, dest);
    count = 0;
    while (addr < dest) {
        page = virt_to_page(addr);
        /*
        		printk("mem_map: %x, page: %x, size: %d \n", (int)mem_map, (int)page, sizeof(struct page));
        		if (PageReserved(page) != 1)
        			BUG();
        		if (page->_count.counter != -1)
        			BUG();
        */
        count++;

        __ClearPageReserved(page);
        set_page_count(page, 1);
        __free_page(page);

        addr += 0x1000; // 4KB
    }
    totalram_pages += count;
#endif
    return freed;
}
Example #28
0
struct page *kmap_atomic_to_page(void *ptr)
{
	return virt_to_page(ptr);
}
Example #29
0
/* Encode an XDR as an array of IB SGE
 *
 * Assumptions:
 * - head[0] is physically contiguous.
 * - tail[0] is physically contiguous.
 * - pages[] is not physically or virtually contiguous and consists of
 *   PAGE_SIZE elements.
 *
 * Output:
 * SGE[0]              reserved for RCPRDMA header
 * SGE[1]              data from xdr->head[]
 * SGE[2..sge_count-2] data from xdr->pages[]
 * SGE[sge_count-1]    data from xdr->tail.
 *
 * The max SGE we need is the length of the XDR / pagesize + one for
 * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
 * reserves a page for both the request and the reply header, and this
 * array is only concerned with the reply we are assured that we have
 * on extra page for the RPCRMDA header.
 */
static int fast_reg_xdr(struct svcxprt_rdma *xprt,
			struct xdr_buf *xdr,
			struct svc_rdma_req_map *vec)
{
	int sge_no;
	u32 sge_bytes;
	u32 page_bytes;
	u32 page_off;
	int page_no = 0;
	u8 *frva;
	struct svc_rdma_fastreg_mr *frmr;

	frmr = svc_rdma_get_frmr(xprt);
	if (IS_ERR(frmr))
		return -ENOMEM;
	vec->frmr = frmr;

	/* Skip the RPCRDMA header */
	sge_no = 1;

	/* Map the head. */
	frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
	vec->count = 2;
	sge_no++;

	/* Map the XDR head */
	frmr->kva = frva;
	frmr->direction = DMA_TO_DEVICE;
	frmr->access_flags = 0;
	frmr->map_len = PAGE_SIZE;
	frmr->page_list_len = 1;
	page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
	frmr->page_list->page_list[page_no] =
		ib_dma_map_page(xprt->sc_cm_id->device,
				virt_to_page(xdr->head[0].iov_base),
				page_off,
				PAGE_SIZE - page_off,
				DMA_TO_DEVICE);
	if (ib_dma_mapping_error(xprt->sc_cm_id->device,
				 frmr->page_list->page_list[page_no]))
		goto fatal_err;
	atomic_inc(&xprt->sc_dma_used);

	/* Map the XDR page list */
	page_off = xdr->page_base;
	page_bytes = xdr->page_len + page_off;
	if (!page_bytes)
		goto encode_tail;

	/* Map the pages */
	vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
	vec->sge[sge_no].iov_len = page_bytes;
	sge_no++;
	while (page_bytes) {
		struct page *page;

		page = xdr->pages[page_no++];
		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
		page_bytes -= sge_bytes;

		frmr->page_list->page_list[page_no] =
			ib_dma_map_page(xprt->sc_cm_id->device,
					page, page_off,
					sge_bytes, DMA_TO_DEVICE);
		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
					 frmr->page_list->page_list[page_no]))
			goto fatal_err;

		atomic_inc(&xprt->sc_dma_used);
		page_off = 0; /* reset for next time through loop */
		frmr->map_len += PAGE_SIZE;
		frmr->page_list_len++;
	}
	vec->count++;

 encode_tail:
	/* Map tail */
	if (0 == xdr->tail[0].iov_len)
		goto done;

	vec->count++;
	vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;

	if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
	    ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
		/*
		 * If head and tail use the same page, we don't need
		 * to map it again.
		 */
		vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
	} else {
		void *va;

		/* Map another page for the tail */
		page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
		va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
		vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;

		frmr->page_list->page_list[page_no] =
		    ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),
				    page_off,
				    PAGE_SIZE,
				    DMA_TO_DEVICE);
		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
					 frmr->page_list->page_list[page_no]))
			goto fatal_err;
		atomic_inc(&xprt->sc_dma_used);
		frmr->map_len += PAGE_SIZE;
		frmr->page_list_len++;
	}

 done:
	if (svc_rdma_fastreg(xprt, frmr))
		goto fatal_err;

	return 0;

 fatal_err:
	printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
	vec->frmr = NULL;
	svc_rdma_put_frmr(xprt, frmr);
	return -EIO;
}
Example #30
0
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
		   struct mlx4_buf *buf)
{
	dma_addr_t t;

	if (size <= max_direct) {
		buf->nbufs        = 1;
		buf->npages       = 1;
		buf->page_shift   = get_order(size) + PAGE_SHIFT;
		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
						       size, &t, GFP_KERNEL);
		if (!buf->direct.buf)
			return -ENOMEM;

		buf->direct.map = t;

		while (t & ((1 << buf->page_shift) - 1)) {
			--buf->page_shift;
			buf->npages *= 2;
		}

		memset(buf->direct.buf, 0, size);
	} else {
		int i;

		buf->direct.buf  = NULL;
		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		buf->npages      = buf->nbufs;
		buf->page_shift  = PAGE_SHIFT;
		buf->page_list   = kzalloc(buf->nbufs * sizeof *buf->page_list,
					   GFP_KERNEL);
		if (!buf->page_list)
			return -ENOMEM;

		for (i = 0; i < buf->nbufs; ++i) {
			buf->page_list[i].buf =
				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
						   &t, GFP_KERNEL);
			if (!buf->page_list[i].buf)
				goto err_free;

			buf->page_list[i].map = t;

			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
		}

		if (BITS_PER_LONG == 64) {
			struct page **pages;
			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
			if (!pages)
				goto err_free;
			for (i = 0; i < buf->nbufs; ++i)
				pages[i] = virt_to_page(buf->page_list[i].buf);
			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
			kfree(pages);
			if (!buf->direct.buf)
				goto err_free;
		}
	}

	return 0;

err_free:
	mlx4_buf_free(dev, size, buf);

	return -ENOMEM;
}