Example #1
0
static int __devinit xenkbd_probe(struct xenbus_device *dev,
				  const struct xenbus_device_id *id)
{
	int ret, i;
	struct xenkbd_info *info;
	struct input_dev *kbd, *ptr;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
		return -ENOMEM;
	}
	dev_set_drvdata(&dev->dev, info);
	info->xbdev = dev;
	info->irq = -1;
	snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);

	info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
	if (!info->page)
		goto error_nomem;

	
	kbd = input_allocate_device();
	if (!kbd)
		goto error_nomem;
	kbd->name = "Xen Virtual Keyboard";
	kbd->phys = info->phys;
	kbd->id.bustype = BUS_PCI;
	kbd->id.vendor = 0x5853;
	kbd->id.product = 0xffff;
	kbd->evbit[0] = BIT(EV_KEY);
	for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
		set_bit(i, kbd->keybit);
	for (i = KEY_OK; i < KEY_MAX; i++)
		set_bit(i, kbd->keybit);

	ret = input_register_device(kbd);
	if (ret) {
		input_free_device(kbd);
		xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
		goto error;
	}
	info->kbd = kbd;

	
	ptr = input_allocate_device();
	if (!ptr)
		goto error_nomem;
	ptr->name = "Xen Virtual Pointer";
	ptr->phys = info->phys;
	ptr->id.bustype = BUS_PCI;
	ptr->id.vendor = 0x5853;
	ptr->id.product = 0xfffe;
	ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
	for (i = BTN_LEFT; i <= BTN_TASK; i++)
		set_bit(i, ptr->keybit);
	ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
	input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
	input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);

	ret = input_register_device(ptr);
	if (ret) {
		input_free_device(ptr);
		xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
		goto error;
	}
	info->ptr = ptr;

	ret = xenkbd_connect_backend(dev, info);
	if (ret < 0)
		goto error;

	return 0;

 error_nomem:
	ret = -ENOMEM;
	xenbus_dev_fatal(dev, ret, "allocating device memory");
 error:
	xenkbd_remove(dev);
	return ret;
}
Example #2
0
int proc_audio_delay_bitstream_write(struct file *file, const char __user *buf, unsigned long count, void *data)
{
	char    *page;
	char    *myString;
	ssize_t  ret = -ENOMEM;

#ifdef VERY_VERBOSE
	printk("%s %ld - ", __FUNCTION__, count);
#endif

	page = (char *)__get_free_page(GFP_KERNEL);
	if (page)
	{
		int number = 0;
		struct snd_kcontrol **kcontrol       = pseudoGetControls(&number);
		struct snd_kcontrol  *single_control = NULL;
		int vLoop;
		int delay = 0;

		ret = -EFAULT;
		if(file == NULL && data == NULL)
			strncpy(page, buf, count);
		else
		{
			if (copy_from_user(page, buf, count))
			goto out;
		}

		myString = (char *) kmalloc(count + 1, GFP_KERNEL);
		strncpy(myString, page, count);
		myString[count] = '\0';

#ifdef VERY_VERBOSE
		printk("%s\n", myString);
#endif
		sscanf(myString, "%x", &delay);

		if (delay != 0)
			delay /= 90;

		for (vLoop = 0; vLoop < number; vLoop++)
		{
			if (kcontrol[vLoop]->private_value == PSEUDO_ADDR(master_latency))
			{
				single_control = kcontrol[vLoop];
				//printk("Find master_latency control at %p\n", single_control);
			break;
			}
		}

		if ((kcontrol != NULL) && (single_control != NULL))
		{
			struct snd_ctl_elem_value ucontrol;

			//printk("Pseudo Mixer controls = %p\n", kcontrol);
			ucontrol.value.integer.value[0] = delay;
			snd_pseudo_integer_put(single_control, &ucontrol);
		}
		else
		{
			printk("Pseudo Mixer does not deliver controls\n");
		}

		kfree(myString);
	}

	ret = count;
out:

	free_page((unsigned long)page);
	return ret;
}
Example #3
0
/*
 * Parse a comments section looking for clues as to the system this
 * was compiled on so we can get the system call interface right.
 */
static int
coff_parse_comments(struct file *fp, COFF_SCNHDR *sect, long *personality)
{
	u_long			offset, nbytes;
	int			hits = 0, err;
	char			*buffer;

	if (!(buffer = (char *)__get_free_page(GFP_KERNEL)))
		return 0;

	/*
	 * Fetch the size of the section.  There must be something in there
	 * or the section wouldn't exist at all.  We only bother with the
	 * first 8192 characters though.  There isn't any point getting too
	 * carried away!
	 */
	if ((nbytes = COFF_LONG(sect->s_size)) > 8192)
		nbytes = 8192;

	offset = COFF_LONG(sect->s_scnptr);
	while (nbytes > 0) {
		u_long		count, start = 0;
		char		*p;

		err = kernel_read(fp, offset, buffer,
				nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);

		if (err <= 0) {
			 free_page((u_long) buffer);
			 return 0;
		}

		p = buffer;
		for (count = 0; count < err; count++) {
			coff_clue		*clue;
			char			c;
			
			c = *(buffer + PAGE_SIZE - 1);
			*(buffer + PAGE_SIZE - 1) = '\0';

			for (clue = coff_clues; clue->len; clue++) {
				if ((clue->len < 0 && strstr(p, clue->text)) ||
				    (clue->len > 0 && !strncmp(p, clue->text, clue->len))) {
					*personality &= clue->mask_and;
					*personality |= clue->mask_or;
					if (clue->terminal) {
						free_page((u_long)buffer);
						return 1;
					}
					hits++;
				}
			}
			*(buffer + PAGE_SIZE - 1) = c;

			while (*p && count < err)
				p++, count++;
			if (count < err) {
				p++;
				count++;
				start = count;
			}
		}

		/*
		 * If we didn't find an end ofstring at all this page
		 * probably isn't useful string data.
		 */
		if (start == 0)
			start = err;

		nbytes -= start;
		offset += start;
	}

	free_page((u_long)buffer);
	return (hits);
}
Example #4
0
static int xenkbd_probe(struct xenbus_device *dev,
				  const struct xenbus_device_id *id)
{
	int ret, i, abs;
	struct xenkbd_info *info;
	struct input_dev *kbd, *ptr;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
		return -ENOMEM;
	}
	dev_set_drvdata(&dev->dev, info);
	info->xbdev = dev;
	info->irq = -1;
	info->gref = -1;
	snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);

	info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
	if (!info->page)
		goto error_nomem;

	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
		abs = 0;
	if (abs)
		xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1");

	/* keyboard */
	kbd = input_allocate_device();
	if (!kbd)
		goto error_nomem;
	kbd->name = "Xen Virtual Keyboard";
	kbd->phys = info->phys;
	kbd->id.bustype = BUS_PCI;
	kbd->id.vendor = 0x5853;
	kbd->id.product = 0xffff;

	__set_bit(EV_KEY, kbd->evbit);
	for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
		__set_bit(i, kbd->keybit);
	for (i = KEY_OK; i < KEY_MAX; i++)
		__set_bit(i, kbd->keybit);

	ret = input_register_device(kbd);
	if (ret) {
		input_free_device(kbd);
		xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
		goto error;
	}
	info->kbd = kbd;

	/* pointing device */
	ptr = input_allocate_device();
	if (!ptr)
		goto error_nomem;
	ptr->name = "Xen Virtual Pointer";
	ptr->phys = info->phys;
	ptr->id.bustype = BUS_PCI;
	ptr->id.vendor = 0x5853;
	ptr->id.product = 0xfffe;

	if (abs) {
		__set_bit(EV_ABS, ptr->evbit);
		input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
		input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
	} else {
		input_set_capability(ptr, EV_REL, REL_X);
		input_set_capability(ptr, EV_REL, REL_Y);
	}
	input_set_capability(ptr, EV_REL, REL_WHEEL);

	__set_bit(EV_KEY, ptr->evbit);
	for (i = BTN_LEFT; i <= BTN_TASK; i++)
		__set_bit(i, ptr->keybit);

	ret = input_register_device(ptr);
	if (ret) {
		input_free_device(ptr);
		xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
		goto error;
	}
	info->ptr = ptr;

	ret = xenkbd_connect_backend(dev, info);
	if (ret < 0)
		goto error;

	return 0;

 error_nomem:
	ret = -ENOMEM;
	xenbus_dev_fatal(dev, ret, "allocating device memory");
 error:
	xenkbd_remove(dev);
	return ret;
}
Example #5
0
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
}
Example #6
0
static int __init aes_s390_init(void)
{
	int ret;

	if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
		keylen_flag |= AES_KEYLEN_128;
	if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
		keylen_flag |= AES_KEYLEN_192;
	if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
		keylen_flag |= AES_KEYLEN_256;

	if (!keylen_flag)
		return -EOPNOTSUPP;

	/* z9 109 and z9 BC/EC only support 128 bit key length */
	if (keylen_flag == AES_KEYLEN_128)
		pr_info("AES hardware acceleration is only available for"
			" 128-bit keys\n");

	ret = crypto_register_alg(&aes_alg);
	if (ret)
		goto aes_err;

	ret = crypto_register_alg(&ecb_aes_alg);
	if (ret)
		goto ecb_aes_err;

	ret = crypto_register_alg(&cbc_aes_alg);
	if (ret)
		goto cbc_aes_err;

	if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KM_XTS_256_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
		ret = crypto_register_alg(&xts_aes_alg);
		if (ret)
			goto xts_aes_err;
	}

	if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
		if (!ctrblk) {
			ret = -ENOMEM;
			goto ctr_aes_err;
		}
		ret = crypto_register_alg(&ctr_aes_alg);
		if (ret) {
			free_page((unsigned long) ctrblk);
			goto ctr_aes_err;
		}
	}

out:
	return ret;

ctr_aes_err:
	crypto_unregister_alg(&xts_aes_alg);
xts_aes_err:
	crypto_unregister_alg(&cbc_aes_alg);
cbc_aes_err:
	crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
	crypto_unregister_alg(&aes_alg);
aes_err:
	goto out;
}
Example #7
0
int proc_avs_0_colorformat_write(struct file *file, const char __user *buf,
                           unsigned long count, void *data)
{
	char 		*page;
	char		*myString;
	ssize_t 	ret = -ENOMEM;

	printk("%s %ld - ", __FUNCTION__, count);

	page = (char *)__get_free_page(GFP_KERNEL);
	if (page)
	{
		struct stmfb_info *info = stmfb_get_fbinfo_ptr();

		struct stmfbio_output_configuration outputConfig;
		int err = 0;
		int alpha = 0;
		int hdmi_colour = 0;
		int scart_colour = 0;
		int hdmi0scart1yuv2 = 1;

		ret = -EFAULT;
		if (copy_from_user(page, buf, count))
			goto out;

		myString = (char *) kmalloc(count + 1, GFP_KERNEL);
		strncpy(myString, page, count);
		myString[count] = '\0';

		printk("%s\n", myString);

		sscanf(myString, "%d", &alpha);

//0rgb 1yuv 2422
		if (strncmp("hdmi_rgb", page, count - 1) == 0)
		{
                        hdmi_colour = 0;
			hdmi0scart1yuv2 = 0;
		} else if (strncmp("hdmi_yuv", page, count - 1) == 0)
		{
                        hdmi_colour = 1;
			hdmi0scart1yuv2 = 0;
		} else if (strncmp("hdmi_422", page, count - 1) == 0)
		{
                        hdmi_colour = 2;
			hdmi0scart1yuv2 = 0;
		} else if (strncmp("rgb", page, count - 1) == 0)
		{
                        scart_colour = SAA_MODE_RGB;
			hdmi0scart1yuv2 = 1;
		} else if (strncmp("cvbs", page, count - 1) == 0)
		{
                        scart_colour = SAA_MODE_FBAS;
			hdmi0scart1yuv2 = 1;
		} else if (strncmp("svideo", page, count - 1) == 0)
		{
                        scart_colour = SAA_MODE_SVIDEO;
			hdmi0scart1yuv2 = 1;
		} else if  (strncmp("yuv", page, count - 1) == 0)
		{
			hdmi0scart1yuv2 = 2;
		}

		if (hdmi0scart1yuv2 == 0) {
			outputConfig.outputid = 1;

			stmfb_get_output_configuration(&outputConfig,info);

			outputConfig.caps = 0;
			outputConfig.activate = 0; //STMFBIO_ACTIVATE_IMMEDIATE;

			outputConfig.caps |= STMFBIO_OUTPUT_CAPS_HDMI_CONFIG;
			outputConfig.hdmi_config &= ~(STMFBIO_OUTPUT_HDMI_YUV|STMFBIO_OUTPUT_HDMI_422);

			switch(hdmi_colour)
			{
				case 1:
					outputConfig.hdmi_config |= STMFBIO_OUTPUT_HDMI_YUV;
					break;
				case 2:
					outputConfig.hdmi_config |= (STMFBIO_OUTPUT_HDMI_YUV|STMFBIO_OUTPUT_HDMI_422);
					break;
				default:
					break;
			}

			err = stmfb_set_output_configuration(&outputConfig, info);
		} else if (hdmi0scart1yuv2 == 1) {
			avs_command_kernel(SAAIOSMODE, (void*) scart_colour);

			outputConfig.outputid = 1;

			stmfb_get_output_configuration(&outputConfig,info);

			outputConfig.caps = 0;
			outputConfig.activate = 0; //STMFBIO_ACTIVATE_IMMEDIATE;

			outputConfig.caps |= STMFBIO_OUTPUT_CAPS_ANALOGUE_CONFIG;
			outputConfig.analogue_config = 0;

			switch(scart_colour)
			{
				case SAA_MODE_RGB:
					outputConfig.analogue_config |= (STMFBIO_OUTPUT_ANALOGUE_RGB|STMFBIO_OUTPUT_ANALOGUE_CVBS);
					break;
				case SAA_MODE_FBAS:
					outputConfig.analogue_config |= STMFBIO_OUTPUT_ANALOGUE_CVBS;
					break;
				case SAA_MODE_SVIDEO:
					outputConfig.analogue_config |= STMFBIO_OUTPUT_ANALOGUE_YC;
					break;
				default:
					break;
			}

			err = stmfb_set_output_configuration(&outputConfig, info);
			if (err != 0) {
				printk("SET SCART COLOR - %ld - ", count);
			}

		} else {
			outputConfig.outputid = 1;

			stmfb_get_output_configuration(&outputConfig,info);

			outputConfig.caps = 0;
			outputConfig.activate = 0; //STMFBIO_ACTIVATE_IMMEDIATE;

			outputConfig.caps |= STMFBIO_OUTPUT_CAPS_ANALOGUE_CONFIG;
			outputConfig.analogue_config = 0;

			outputConfig.analogue_config |= STMFBIO_OUTPUT_ANALOGUE_YPrPb;

			err = stmfb_set_output_configuration(&outputConfig, info);
			if (err != 0) {
				printk("SET SCART COLOR - %ld - ", count);
			}
		}

		//if(ioctl(fbfd, STMFBIO_SET_OUTPUT_CONFIG, &outputConfig)<0)
		//perror("setting output configuration failed");

		kfree(myString);
	}

	ret = count;
out:

	free_page((unsigned long)page);
	return ret;
}
static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	int err = 0;
	int make_nomedia_in_obb = 0;
	struct dentry *lower_dentry;
	struct dentry *lower_parent_dentry = NULL;
	struct path lower_path;
	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
	const struct cred *saved_cred = NULL;
	struct sdcardfs_inode_info *pi = SDCARDFS_I(dir);
	char *page_buf;
	char *nomedia_dir_name;
	char *nomedia_fullpath;
	int fullpath_namelen;
	int touch_err = 0;
#if 1
        struct iattr newattrs;
#endif

	int has_rw = get_caller_has_rw_locked(sbi->pkgl_id, sbi->options.derive);

	if(!check_caller_access_to_name(dir, dentry->d_name.name, sbi->options.derive, 1, has_rw)) {
		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n" 
						 "  dentry: %s, task:%s\n",
						 __func__, dentry->d_name.name, current->comm);
		err = -EACCES;
		goto out_eacces;
	}

	/* save current_cred and override it */
	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);

	/* check disk space */
	if (!check_min_free_space(dentry, 0, 1)) {
		printk(KERN_INFO "sdcardfs: No minimum free space.\n");
		err = -ENOSPC;
		goto out_revert;
	}

	/* the lower_dentry is negative here */
	sdcardfs_get_lower_path(dentry, &lower_path);
	lower_dentry = lower_path.dentry;
	lower_parent_dentry = lock_parent(lower_dentry);

	err = mnt_want_write(lower_path.mnt);
	if (err) {
		unlock_dir(lower_parent_dentry);
		goto out_unlock;
	}

	/* set last 16bytes of mode field to 0775 */
	mode = (mode & S_IFMT) | 00775;
	err = vfs_mkdir(lower_parent_dentry->d_inode, lower_dentry, mode);

	if (err) {
		unlock_dir(lower_parent_dentry);
		goto out;
	}

#if 1
        mutex_lock(&lower_dentry->d_inode->i_mutex);
        newattrs.ia_mode = (mode & S_IALLUGO) | (lower_dentry->d_inode->i_mode & ~S_IALLUGO);
        newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
        notify_change(lower_dentry, &newattrs);
        mutex_unlock(&lower_dentry->d_inode->i_mutex);
#endif

	/* if it is a local obb dentry, setup it with the base obbpath */
	if(need_graft_path(dentry)) {

		err = setup_obb_dentry(dentry, &lower_path);
		if(err) {
			/* if the sbi->obbpath is not available, the lower_path won't be
			 * changed by setup_obb_dentry() but the lower path is saved to
             * its orig_path. this dentry will be revalidated later.
			 * but now, the lower_path should be NULL */
			sdcardfs_put_reset_lower_path(dentry);

			/* the newly created lower path which saved to its orig_path or
			 * the lower_path is the base obbpath.
             * therefore, an additional path_get is required */
			path_get(&lower_path);
		} else
			make_nomedia_in_obb = 1;
	}

	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
	if (err) {
		unlock_dir(lower_parent_dentry);
		goto out;
	}

	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
	fsstack_copy_inode_size(dir, lower_parent_dentry->d_inode);
	/* update number of links on parent directory */
	set_nlink(dir, sdcardfs_lower_inode(dir)->i_nlink);

	unlock_dir(lower_parent_dentry);

	if ((sbi->options.derive == DERIVE_UNIFIED) && (!strcasecmp(dentry->d_name.name, "obb"))
		&& (pi->perm == PERM_ANDROID) && (pi->userid == 0))
		make_nomedia_in_obb = 1;

	/* When creating /Android/data and /Android/obb, mark them as .nomedia */
	if (make_nomedia_in_obb ||
		((pi->perm == PERM_ANDROID) && (!strcasecmp(dentry->d_name.name, "data")))) {

		page_buf = (char *)__get_free_page(GFP_KERNEL);
		if (!page_buf) {
			printk(KERN_ERR "sdcardfs: failed to allocate page buf\n");
			goto out;
		}

		nomedia_dir_name = d_absolute_path(&lower_path, page_buf, PAGE_SIZE);
		if (IS_ERR(nomedia_dir_name)) {
			free_page((unsigned long)page_buf);
			printk(KERN_ERR "sdcardfs: failed to get .nomedia dir name\n");
			goto out;
		}

		fullpath_namelen = page_buf + PAGE_SIZE - nomedia_dir_name - 1;
		fullpath_namelen += strlen("/.nomedia");
		nomedia_fullpath = kzalloc(fullpath_namelen + 1, GFP_KERNEL);
		if (!nomedia_fullpath) {
			free_page((unsigned long)page_buf);
			printk(KERN_ERR "sdcardfs: failed to allocate .nomedia fullpath buf\n");
			goto out;
		}

		strcpy(nomedia_fullpath, nomedia_dir_name);
		free_page((unsigned long)page_buf);
		strcat(nomedia_fullpath, "/.nomedia");
		touch_err = touch(nomedia_fullpath, 0664);
		if (touch_err) {
			printk(KERN_ERR "sdcardfs: failed to touch(%s): %d\n",
							nomedia_fullpath, touch_err);
			kfree(nomedia_fullpath);
			goto out;
		}
		kfree(nomedia_fullpath);
	}
out:
	mnt_drop_write(lower_path.mnt);
out_unlock:
     if (!strcmp(dentry->d_name.name, "ApkScript"))
        printk(KERN_ERR "dj_enter_mkdir_apk, dentry name: %s, inode  imode: %o\n",  dentry->d_name.name, dentry->d_inode->i_mode);

      if (!strcmp(dentry->d_name.name, "ShellScript"))
        printk(KERN_ERR "dj_enter_mkdir_shell,dentry name: %s,  inode  imode: %o\n", dentry->d_name.name, dentry->d_inode->i_mode);

	sdcardfs_put_lower_path(dentry, &lower_path);
out_revert:
	REVERT_CRED(saved_cred);
out_eacces:
	return err;
}
Example #9
0
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
				     long npages, unsigned long uaddr,
				     enum dma_data_direction direction)
{
	u64 rc;
	u64 proto_tce;
	u64 *tcep;
	u64 rpn;
	long l, limit;

	if (TCE_PAGE_FACTOR == 0 && npages == 1)
		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
					   direction);

	tcep = __get_cpu_var(tce_page);

	/* This is safe to do since interrupts are off when we're called
	 * from iommu_alloc{,_sg}()
	 */
	if (!tcep) {
		tcep = (u64 *)__get_free_page(GFP_ATOMIC);
		/* If allocation fails, fall back to the loop implementation */
		if (!tcep)
			return tce_build_pSeriesLP(tbl, tcenum, npages,
						   uaddr, direction);
		__get_cpu_var(tce_page) = tcep;
	}

	tcenum <<= TCE_PAGE_FACTOR;
	npages <<= TCE_PAGE_FACTOR;

	rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
	proto_tce = TCE_PCI_READ;
	if (direction != DMA_TO_DEVICE)
		proto_tce |= TCE_PCI_WRITE;

	/* We can map max one pageful of TCEs at a time */
	do {
		/*
		 * Set up the page with TCE data, looping through and setting
		 * the values.
		 */
		limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);

		for (l = 0; l < limit; l++) {
			tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
			rpn++;
		}

		rc = plpar_tce_put_indirect((u64)tbl->it_index,
					    (u64)tcenum << 12,
					    (u64)virt_to_abs(tcep),
					    limit);

		npages -= limit;
		tcenum += limit;
	} while (npages > 0 && !rc);

	if (rc && printk_ratelimit()) {
		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%lx\n", (u64)npages);
		printk("\ttce[0] val = 0x%lx\n", tcep[0]);
		show_stack(current, (unsigned long *)__get_SP());
	}
}
Example #10
0
static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct RioCommand rio_cmd;
	struct rio_usb_data *rio = &rio_instance;
	void __user *data;
	unsigned char *buffer;
	int result, requesttype;
	int retries;
	int retval=0;

	lock_kernel();
	mutex_lock(&(rio->lock));
        /* Sanity check to make sure rio is connected, powered, etc */
        if (rio->present == 0 || rio->rio_dev == NULL) {
		retval = -ENODEV;
		goto err_out;
	}

	switch (cmd) {
	case RIO_RECV_COMMAND:
		data = (void __user *) arg;
		if (data == NULL)
			break;
		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
			retval = -EFAULT;
			goto err_out;
		}
		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
			retval = -EINVAL;
			goto err_out;
		}
		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
		if (buffer == NULL) {
			retval = -ENOMEM;
			goto err_out;
		}
		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
			retval = -EFAULT;
			free_page((unsigned long) buffer);
			goto err_out;
		}

		requesttype = rio_cmd.requesttype | USB_DIR_IN |
		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
		dbg
		    ("sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x",
		     requesttype, rio_cmd.request, rio_cmd.value,
		     rio_cmd.index, rio_cmd.length);
		/* Send rio control message */
		retries = 3;
		while (retries) {
			result = usb_control_msg(rio->rio_dev,
						 usb_rcvctrlpipe(rio-> rio_dev, 0),
						 rio_cmd.request,
						 requesttype,
						 rio_cmd.value,
						 rio_cmd.index, buffer,
						 rio_cmd.length,
						 jiffies_to_msecs(rio_cmd.timeout));
			if (result == -ETIMEDOUT)
				retries--;
			else if (result < 0) {
				err("Error executing ioctrl. code = %d", result);
				retries = 0;
			} else {
				dbg("Executed ioctl. Result = %d (data=%02x)",
				     result, buffer[0]);
				if (copy_to_user(rio_cmd.buffer, buffer,
						 rio_cmd.length)) {
					free_page((unsigned long) buffer);
					retval = -EFAULT;
					goto err_out;
				}
				retries = 0;
			}

			/* rio_cmd.buffer contains a raw stream of single byte
			   data which has been returned from rio.  Data is
			   interpreted at application level.  For data that
			   will be cast to data types longer than 1 byte, data
			   will be little_endian and will potentially need to
			   be swapped at the app level */

		}
		free_page((unsigned long) buffer);
		break;

	case RIO_SEND_COMMAND:
		data = (void __user *) arg;
		if (data == NULL)
			break;
		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
			retval = -EFAULT;
			goto err_out;
		}
		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
			retval = -EINVAL;
			goto err_out;
		}
		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
		if (buffer == NULL) {
			retval = -ENOMEM;
			goto err_out;
		}
		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
			free_page((unsigned long)buffer);
			retval = -EFAULT;
			goto err_out;
		}

		requesttype = rio_cmd.requesttype | USB_DIR_OUT |
		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
		dbg("sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x",
		     requesttype, rio_cmd.request, rio_cmd.value,
		     rio_cmd.index, rio_cmd.length);
		/* Send rio control message */
		retries = 3;
		while (retries) {
			result = usb_control_msg(rio->rio_dev,
						 usb_sndctrlpipe(rio-> rio_dev, 0),
						 rio_cmd.request,
						 requesttype,
						 rio_cmd.value,
						 rio_cmd.index, buffer,
						 rio_cmd.length,
						 jiffies_to_msecs(rio_cmd.timeout));
			if (result == -ETIMEDOUT)
				retries--;
			else if (result < 0) {
				err("Error executing ioctrl. code = %d", result);
				retries = 0;
			} else {
				dbg("Executed ioctl. Result = %d", result);
				retries = 0;

			}

		}
		free_page((unsigned long) buffer);
		break;

	default:
		retval = -ENOTTY;
		break;
	}


err_out:
	mutex_unlock(&(rio->lock));
	unlock_kernel();
	return retval;
}
Example #11
0
static void scsifront_free(struct vscsifrnt_info *info)
{
	struct Scsi_Host *host = info->host;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
	if (host->shost_state != SHOST_DEL) {
#else
	if (!test_bit(SHOST_DEL, &host->shost_state)) {
#endif
		scsi_remove_host(info->host);
	}

	if (info->ring_ref != GRANT_INVALID_REF) {
		gnttab_end_foreign_access(info->ring_ref,
					(unsigned long)info->ring.sring);
		info->ring_ref = GRANT_INVALID_REF;
		info->ring.sring = NULL;
	}

	if (info->irq)
		unbind_from_irqhandler(info->irq, info);
	info->irq = 0;

	scsi_host_put(info->host);
}


static int scsifront_alloc_ring(struct vscsifrnt_info *info)
{
	struct xenbus_device *dev = info->dev;
	struct vscsiif_sring *sring;
	int err = -ENOMEM;


	info->ring_ref = GRANT_INVALID_REF;

	/***** Frontend to Backend ring start *****/
	sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
	if (!sring) {
		xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)");
		return err;
	}
	SHARED_RING_INIT(sring);
	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);

	err = xenbus_grant_ring(dev, virt_to_mfn(sring));
	if (err < 0) {
		free_page((unsigned long) sring);
		info->ring.sring = NULL;
		xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)");
		goto free_sring;
	}
	info->ring_ref = err;

	err = bind_listening_port_to_irqhandler(
			dev->otherend_id, scsifront_intr,
			IRQF_SAMPLE_RANDOM, "scsifront", info);

	if (err <= 0) {
		xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler");
		goto free_sring;
	}
	info->irq = err;

	return 0;

/* free resource */
free_sring:
	scsifront_free(info);

	return err;
}


static int scsifront_init_ring(struct vscsifrnt_info *info)
{
	struct xenbus_device *dev = info->dev;
	struct xenbus_transaction xbt;
	int err;

	DPRINTK("%s\n",__FUNCTION__);

	err = scsifront_alloc_ring(info);
	if (err)
		return err;
	DPRINTK("%u %u\n", info->ring_ref, info->evtchn);

again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
		xenbus_dev_fatal(dev, err, "starting transaction");
	}

	err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
				info->ring_ref);
	if (err) {
		xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
		goto fail;
	}

	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
				irq_to_evtchn_port(info->irq));

	if (err) {
		xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
		goto fail;
	}

	err = xenbus_transaction_end(xbt, 0);
	if (err) {
		if (err == -EAGAIN)
			goto again;
		xenbus_dev_fatal(dev, err, "completing transaction");
		goto free_sring;
	}

	return 0;

fail:
	xenbus_transaction_end(xbt, 1);
free_sring:
	/* free resource */
	scsifront_free(info);
	
	return err;
}


static int scsifront_probe(struct xenbus_device *dev,
				const struct xenbus_device_id *id)
{
	struct vscsifrnt_info *info;
	struct Scsi_Host *host;
	int i, err = -ENOMEM;
	char name[DEFAULT_TASK_COMM_LEN];

	host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
	if (!host) {
		xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
		return err;
	}
	info = (struct vscsifrnt_info *) host->hostdata;
	info->host = host;


	dev_set_drvdata(&dev->dev, info);
	info->dev  = dev;

	for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
		info->shadow[i].next_free = i + 1;
		init_waitqueue_head(&(info->shadow[i].wq_reset));
		info->shadow[i].wait_reset = 0;
	}
	info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff;

	err = scsifront_init_ring(info);
	if (err) {
		scsi_host_put(host);
		return err;
	}

	init_waitqueue_head(&info->wq);
	spin_lock_init(&info->io_lock);
	spin_lock_init(&info->shadow_lock);

	snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no);

	info->kthread = kthread_run(scsifront_schedule, info, name);
	if (IS_ERR(info->kthread)) {
		err = PTR_ERR(info->kthread);
		info->kthread = NULL;
		printk(KERN_ERR "scsifront: kthread start err %d\n", err);
		goto free_sring;
	}

	host->max_id      = VSCSIIF_MAX_TARGET;
	host->max_channel = 0;
	host->max_lun     = VSCSIIF_MAX_LUN;
	host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;

	err = scsi_add_host(host, &dev->dev);
	if (err) {
		printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err);
		goto free_sring;
	}

	xenbus_switch_state(dev, XenbusStateInitialised);

	return 0;

free_sring:
	/* free resource */
	scsifront_free(info);
	return err;
}

static int scsifront_remove(struct xenbus_device *dev)
{
	struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);

	DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename);

	if (info->kthread) {
		kthread_stop(info->kthread);
		info->kthread = NULL;
	}

	scsifront_free(info);
	
	return 0;
}


static int scsifront_disconnect(struct vscsifrnt_info *info)
{
	struct xenbus_device *dev = info->dev;
	struct Scsi_Host *host = info->host;

	DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename);

	/* 
	  When this function is executed,  all devices of 
	  Frontend have been deleted. 
	  Therefore, it need not block I/O before remove_host.
	*/

	scsi_remove_host(host);
	xenbus_frontend_closed(dev);

	return 0;
}

#define VSCSIFRONT_OP_ADD_LUN	1
#define VSCSIFRONT_OP_DEL_LUN	2

static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
{
	struct xenbus_device *dev = info->dev;
	int i, err = 0;
	char str[64], state_str[64];
	char **dir;
	unsigned int dir_n = 0;
	unsigned int device_state;
	unsigned int hst, chn, tgt, lun;
	struct scsi_device *sdev;

	dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
	if (IS_ERR(dir))
		return;

	for (i = 0; i < dir_n; i++) {
		/* read status */
		snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
		err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
			&device_state);
		if (XENBUS_EXIST_ERR(err))
			continue;
		
		/* virtual SCSI device */
		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
		err = xenbus_scanf(XBT_NIL, dev->otherend, str,
			"%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
		if (XENBUS_EXIST_ERR(err))
			continue;

		/* front device state path */
		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);

		switch (op) {
		case VSCSIFRONT_OP_ADD_LUN:
			if (device_state == XenbusStateInitialised) {
				sdev = scsi_device_lookup(info->host, chn, tgt, lun);
				if (sdev) {
					printk(KERN_ERR "scsifront: Device already in use.\n");
					scsi_device_put(sdev);
					xenbus_printf(XBT_NIL, dev->nodename,
						state_str, "%d", XenbusStateClosed);
				} else {
					scsi_add_device(info->host, chn, tgt, lun);
					xenbus_printf(XBT_NIL, dev->nodename,
						state_str, "%d", XenbusStateConnected);
				}
			}
			break;
		case VSCSIFRONT_OP_DEL_LUN:
			if (device_state == XenbusStateClosing) {
				sdev = scsi_device_lookup(info->host, chn, tgt, lun);
				if (sdev) {
					scsi_remove_device(sdev);
					scsi_device_put(sdev);
					xenbus_printf(XBT_NIL, dev->nodename,
						state_str, "%d", XenbusStateClosed);
				}
			}
			break;
		default:
			break;
		}
	}
	
	kfree(dir);
	return;
}




static void scsifront_backend_changed(struct xenbus_device *dev,
				enum xenbus_state backend_state)
{
	struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);

	DPRINTK("%p %u %u\n", dev, dev->state, backend_state);

	switch (backend_state) {
	case XenbusStateUnknown:
	case XenbusStateInitialising:
	case XenbusStateInitWait:
	case XenbusStateClosed:
		break;

	case XenbusStateInitialised:
		break;

	case XenbusStateConnected:
		if (xenbus_read_driver_state(dev->nodename) ==
			XenbusStateInitialised) {
			scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
		}
		
		if (dev->state == XenbusStateConnected)
			break;
			
		xenbus_switch_state(dev, XenbusStateConnected);
		break;

	case XenbusStateClosing:
		scsifront_disconnect(info);
		break;

	case XenbusStateReconfiguring:
		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
		xenbus_switch_state(dev, XenbusStateReconfiguring);
		break;

	case XenbusStateReconfigured:
		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
		xenbus_switch_state(dev, XenbusStateConnected);
		break;
	}
}


static struct xenbus_device_id scsifront_ids[] = {
	{ "vscsi" },
	{ "" }
};


static struct xenbus_driver scsifront_driver = {
	.name			= "vscsi",
	.ids			= scsifront_ids,
	.probe			= scsifront_probe,
	.remove			= scsifront_remove,
/* 	.resume			= scsifront_resume, */
	.otherend_changed	= scsifront_backend_changed,
};

int scsifront_xenbus_init(void)
{
	return xenbus_register_frontend(&scsifront_driver);
}

void scsifront_xenbus_unregister(void)
{
	xenbus_unregister_driver(&scsifront_driver);
}
Example #12
0
/* Initialize the CPM Ethernet on SCC.
 */
int __init scc_enet_init(void)
{
	struct net_device *dev;
	struct scc_enet_private *cep;
	int i, j;
	unsigned char	*eap;
	unsigned long	mem_addr;
	bd_t		*bd;
	volatile	cbd_t		*bdp;
	volatile	cpm8260_t	*cp;
	volatile	scc_t		*sccp;
	volatile	scc_enet_t	*ep;
	volatile	immap_t		*immap;
	volatile	iop8260_t	*io;

	cp = cpmp;	/* Get pointer to Communication Processor */

	immap = (immap_t *)IMAP_ADDR;	/* and to internal registers */
	io = &immap->im_ioport;

	bd = (bd_t *)__res;

	/* Allocate some private information.
	*/
	cep = (struct scc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
	if (cep == NULL)
		return -ENOMEM;

	__clear_user(cep,sizeof(*cep));
	spin_lock_init(&cep->lock);

	/* Create an Ethernet device instance.
	*/
	dev = init_etherdev(0, 0);

	/* Get pointer to SCC area in parameter RAM.
	*/
	ep = (scc_enet_t *)(&immap->im_dprambase[PROFF_ENET]);

	/* And another to the SCC register area.
	*/
	sccp = (volatile scc_t *)(&immap->im_scc[SCC_ENET]);
	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */

	/* Disable receive and transmit in case someone left it running.
	*/
	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	/* Configure port C and D pins for SCC Ethernet.  This
	 * won't work for all SCC possibilities....it will be
	 * board/port specific.
	 */
	io->iop_pparc |=
		(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK);
	io->iop_pdirc &=
		~(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK);
	io->iop_psorc &= 
		~(PC_ENET_RENA | PC_ENET_TXCLK | PC_ENET_RXCLK);
	io->iop_psorc |= PC_ENET_CLSN;

	io->iop_ppard |= (PD_ENET_RXD | PD_ENET_TXD | PD_ENET_TENA);
	io->iop_pdird |= (PD_ENET_TXD | PD_ENET_TENA);
	io->iop_pdird &= ~PD_ENET_RXD;
	io->iop_psord |= PD_ENET_TXD;
	io->iop_psord &= ~(PD_ENET_RXD | PD_ENET_TENA);

	/* Configure Serial Interface clock routing.
	 * First, clear all SCC bits to zero, then set the ones we want.
	 */
	immap->im_cpmux.cmx_scr &= ~CMX_CLK_MASK;
	immap->im_cpmux.cmx_scr |= CMX_CLK_ROUTE;

	/* Allocate space for the buffer descriptors in the DP ram.
	 * These are relative offsets in the DP ram address space.
	 * Initialize base addresses for the buffer descriptors.
	 */
	i = m8260_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
	ep->sen_genscc.scc_rbase = i;
	cep->rx_bd_base = (cbd_t *)&immap->im_dprambase[i];

	i = m8260_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
	ep->sen_genscc.scc_tbase = i;
	cep->tx_bd_base = (cbd_t *)&immap->im_dprambase[i];

	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
	cep->cur_rx = cep->rx_bd_base;

	ep->sen_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB;
	ep->sen_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB;

	/* Set maximum bytes per receive buffer.
	 * This appears to be an Ethernet frame size, not the buffer
	 * fragment size.  It must be a multiple of four.
	 */
	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;

	/* Set CRC preset and mask.
	*/
	ep->sen_cpres = 0xffffffff;
	ep->sen_cmask = 0xdebb20e3;

	ep->sen_crcec = 0;	/* CRC Error counter */
	ep->sen_alec = 0;	/* alignment error counter */
	ep->sen_disfc = 0;	/* discard frame counter */

	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
	ep->sen_retlim = 15;	/* Retry limit threshold */

	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */

	ep->sen_maxd1 = PKT_MAXBLR_SIZE;	/* maximum DMA1 length */
	ep->sen_maxd2 = PKT_MAXBLR_SIZE;	/* maximum DMA2 length */

	/* Clear hash tables.
	*/
	ep->sen_gaddr1 = 0;
	ep->sen_gaddr2 = 0;
	ep->sen_gaddr3 = 0;
	ep->sen_gaddr4 = 0;
	ep->sen_iaddr1 = 0;
	ep->sen_iaddr2 = 0;
	ep->sen_iaddr3 = 0;
	ep->sen_iaddr4 = 0;

	/* Set Ethernet station address.
	 *
	 * This is supplied in the board information structure, so we
	 * copy that into the controller.
	 */
	eap = (unsigned char *)&(ep->sen_paddrh);
	for (i=5; i>=0; i--)
		*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];

	ep->sen_pper = 0;	/* 'cause the book says so */
	ep->sen_taddrl = 0;	/* temp address (LSB) */
	ep->sen_taddrm = 0;
	ep->sen_taddrh = 0;	/* temp address (MSB) */

	/* Now allocate the host memory pages and initialize the
	 * buffer descriptors.
	 */
	bdp = cep->tx_bd_base;
	for (i=0; i<TX_RING_SIZE; i++) {

		/* Initialize the BD for every fragment in the page.
		*/
		bdp->cbd_sc = 0;
		bdp->cbd_bufaddr = 0;
		bdp++;
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	bdp = cep->rx_bd_base;
	for (i=0; i<CPM_ENET_RX_PAGES; i++) {

		/* Allocate a page.
		*/
		mem_addr = __get_free_page(GFP_KERNEL);

		/* Initialize the BD for every fragment in the page.
		*/
		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
			bdp->cbd_bufaddr = __pa(mem_addr);
			mem_addr += CPM_ENET_RX_FRSIZE;
			bdp++;
		}
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	/* Let's re-initialize the channel now.  We have to do it later
	 * than the manual describes because we have just now finished
	 * the BD initialization.
	 */
	cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0,
			CPM_CR_INIT_TRX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);

	cep->skb_cur = cep->skb_dirty = 0;

	sccp->scc_scce = 0xffff;	/* Clear any pending events */

	/* Enable interrupts for transmit error, complete frame
	 * received, and any transmit buffer we have also set the
	 * interrupt flag.
	 */
	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);

	/* Install our interrupt handler.
	*/
	request_irq(SIU_INT_ENET, scc_enet_interrupt, 0, "enet", dev);

	/* Set GSMR_H to enable all normal operating modes.
	 * Set GSMR_L to enable Ethernet to MC68160.
	 */
	sccp->scc_gsmrh = 0;
	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);

	/* Set sync/delimiters.
	*/
	sccp->scc_dsr = 0xd555;

	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
	 * start frame search 22 bit times after RENA.
	 */
	sccp->scc_pmsr = (SCC_PSMR_ENCRC | SCC_PSMR_NIB22);

	/* It is now OK to enable the Ethernet transmitter.
	 * Unfortunately, there are board implementation differences here.
	 */
	io->iop_pparc &= ~(PC_EST8260_ENET_LOOPBACK |
				PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
	io->iop_psorc &= ~(PC_EST8260_ENET_LOOPBACK |
				PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
	io->iop_pdirc |= (PC_EST8260_ENET_LOOPBACK |
				PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
	io->iop_pdatc &= ~(PC_EST8260_ENET_LOOPBACK | PC_EST8260_ENET_SQE);
	io->iop_pdatc |= PC_EST8260_ENET_NOTFD;

	dev->base_addr = (unsigned long)ep;
	dev->priv = cep;

	/* The CPM Ethernet specific entries in the device structure. */
	dev->open = scc_enet_open;
	dev->hard_start_xmit = scc_enet_start_xmit;
	dev->tx_timeout = scc_enet_timeout;
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->stop = scc_enet_close;
	dev->get_stats = scc_enet_get_stats;
	dev->set_multicast_list = set_multicast_list;

	/* And last, enable the transmit and receive processing.
	*/
	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	printk("%s: SCC ENET Version 0.1, ", dev->name);
	for (i=0; i<5; i++)
		printk("%02x:", dev->dev_addr[i]);
	printk("%02x\n", dev->dev_addr[5]);

	return 0;
}
static int notify_exec(struct mm_struct *mm)
{
	int ret = 0;
	char *buf, *path;
	struct vm_area_struct *vma;
	struct file *exe_file;

	if (!sim_is_simulator())
		return 1;

	buf = (char *) __get_free_page(GFP_KERNEL);
	if (buf == NULL)
		return 0;

	exe_file = get_mm_exe_file(mm);
	if (exe_file == NULL)
		goto done_free;

	path = file_path(exe_file, buf, PAGE_SIZE);
	if (IS_ERR(path))
		goto done_put;

	down_read(&mm->mmap_sem);
	for (vma = current->mm->mmap; ; vma = vma->vm_next) {
		if (vma == NULL) {
			up_read(&mm->mmap_sem);
			goto done_put;
		}
		if (vma->vm_file == exe_file)
			break;
	}

	/*
	 * Notify simulator of an ET_DYN object so we know the load address.
	 * The somewhat cryptic overuse of SIM_CONTROL_DLOPEN allows us
	 * to be backward-compatible with older simulator releases.
	 */
	if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) {
		char buf[64];
		int i;

		snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start);
		for (i = 0; ; ++i) {
			char c = buf[i];
			__insn_mtspr(SPR_SIM_CONTROL,
				     (SIM_CONTROL_DLOPEN
				      | (c << _SIM_CONTROL_OPERATOR_BITS)));
			if (c == '\0') {
				ret = 1; /* success */
				break;
			}
		}
	}
	up_read(&mm->mmap_sem);

	sim_notify_exec(path);
done_put:
	fput(exe_file);
done_free:
	free_page((unsigned long)buf);
	return ret;
}
Example #14
0
int __init m8xx_enet_init(void)
{
	struct device *dev;
	struct cpm_enet_private *cep;
	int i, j;
	unsigned char	*eap;
	unsigned long	mem_addr;
	pte_t		*pte;
	bd_t		*bd;
	volatile	cbd_t		*bdp;
	volatile	cpm8xx_t	*cp;
	volatile	scc_t		*sccp;
	volatile	scc_enet_t	*ep;
	volatile	immap_t		*immap;

	cp = cpmp;	/* Get pointer to Communication Processor */

	immap = (immap_t *)IMAP_ADDR;	/* and to internal registers */

	bd = (bd_t *)res;

	/* Allocate some private information.
	*/
	cep = (struct cpm_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
	/*memset(cep, 0, sizeof(*cep));*/
	__clear_user(cep,sizeof(*cep));

	/* Create an Ethernet device instance.
	*/
	dev = init_etherdev(0, 0);

	/* Get pointer to SCC area in parameter RAM.
	*/
	ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);

	/* And another to the SCC register area.
	*/
	sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */

	/* Disable receive and transmit in case EPPC-Bug started it.
	*/
	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	/* Cookbook style from the MPC860 manual.....
	 * Not all of this is necessary if EPPC-Bug has initialized
	 * the network.
	 * So far we are lucky, all board configurations use the same
	 * pins, or at least the same I/O Port for these functions.....
	 * It can't last though......
	 */

	/* Configure port A pins for Txd and Rxd.
	*/
	immap->im_ioport.iop_papar |= (PA_ENET_RXD | PA_ENET_TXD);
	immap->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD);
	immap->im_ioport.iop_paodr &= ~PA_ENET_TXD;

	/* Configure port C pins to enable CLSN and RENA.
	*/
	immap->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA);
	immap->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA);
	immap->im_ioport.iop_pcso |= (PC_ENET_CLSN | PC_ENET_RENA);

	/* Configure port A for TCLK and RCLK.
	*/
	immap->im_ioport.iop_papar |= (PA_ENET_TCLK | PA_ENET_RCLK);
	immap->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK);

	/* Configure Serial Interface clock routing.
	 * First, clear all SCC bits to zero, then set the ones we want.
	 */
	cp->cp_sicr &= ~SICR_ENET_MASK;
	cp->cp_sicr |= SICR_ENET_CLKRT;

	/* Manual says set SDDR, but I can't find anything with that
	 * name.  I think it is a misprint, and should be SDCR.  This
	 * has already been set by the communication processor initialization.
	 */

	/* Allocate space for the buffer descriptors in the DP ram.
	 * These are relative offsets in the DP ram address space.
	 * Initialize base addresses for the buffer descriptors.
	 */
	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
	ep->sen_genscc.scc_rbase = i;
	cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[i];

	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
	ep->sen_genscc.scc_tbase = i;
	cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[i];

	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
	cep->cur_rx = cep->rx_bd_base;

	/* Issue init Rx BD command for SCC.
	 * Manual says to perform an Init Rx parameters here.  We have
	 * to perform both Rx and Tx because the SCC may have been
	 * already running.
	 * In addition, we have to do it later because we don't yet have
	 * all of the BD control/status set properly.
	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);
	 */

	/* Initialize function code registers for big-endian.
	*/
	ep->sen_genscc.scc_rfcr = SCC_EB;
	ep->sen_genscc.scc_tfcr = SCC_EB;

	/* Set maximum bytes per receive buffer.
	 * This appears to be an Ethernet frame size, not the buffer
	 * fragment size.  It must be a multiple of four.
	 */
	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;

	/* Set CRC preset and mask.
	*/
	ep->sen_cpres = 0xffffffff;
	ep->sen_cmask = 0xdebb20e3;

	ep->sen_crcec = 0;	/* CRC Error counter */
	ep->sen_alec = 0;	/* alignment error counter */
	ep->sen_disfc = 0;	/* discard frame counter */

	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
	ep->sen_retlim = 15;	/* Retry limit threshold */

	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */

	ep->sen_maxd1 = PKT_MAXBUF_SIZE;	/* maximum DMA1 length */
	ep->sen_maxd2 = PKT_MAXBUF_SIZE;	/* maximum DMA2 length */

	/* Clear hash tables.
	*/
	ep->sen_gaddr1 = 0;
	ep->sen_gaddr2 = 0;
	ep->sen_gaddr3 = 0;
	ep->sen_gaddr4 = 0;
	ep->sen_iaddr1 = 0;
	ep->sen_iaddr2 = 0;
	ep->sen_iaddr3 = 0;
	ep->sen_iaddr4 = 0;

	/* Set Ethernet station address.
	 *
	 * If we performed a MBX diskless boot, the Ethernet controller
	 * has been initialized and we copy the address out into our
	 * own structure.
	 */
	eap = (unsigned char *)&(ep->sen_paddrh);
#ifndef CONFIG_MBX
	for (i=5; i>=0; i--)
		*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
#else
	for (i=5; i>=0; i--)
		dev->dev_addr[i] = *eap++;
#endif

	ep->sen_pper = 0;	/* 'cause the book says so */
	ep->sen_taddrl = 0;	/* temp address (LSB) */
	ep->sen_taddrm = 0;
	ep->sen_taddrh = 0;	/* temp address (MSB) */

	/* Now allocate the host memory pages and initialize the
	 * buffer descriptors.
	 */
	bdp = cep->tx_bd_base;
	for (i=0; i<TX_RING_SIZE; i++) {

		/* Initialize the BD for every fragment in the page.
		*/
		bdp->cbd_sc = 0;
		bdp->cbd_bufaddr = 0;
		bdp++;
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	bdp = cep->rx_bd_base;
	for (i=0; i<CPM_ENET_RX_PAGES; i++) {

		/* Allocate a page.
		*/
		mem_addr = __get_free_page(GFP_KERNEL);

		/* Make it uncached.
		*/
		pte = va_to_pte(&init_task, mem_addr);
		pte_val(*pte) |= _PAGE_NO_CACHE;
		flush_tlb_page(current->mm->mmap, mem_addr);

		/* Initialize the BD for every fragment in the page.
		*/
		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
			bdp->cbd_bufaddr = __pa(mem_addr);
			mem_addr += CPM_ENET_RX_FRSIZE;
			bdp++;
		}
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	/* Let's re-initialize the channel now.  We have to do it later
	 * than the manual describes because we have just now finished
	 * the BD initialization.
	 */
	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);

	cep->skb_cur = cep->skb_dirty = 0;

	sccp->scc_scce = 0xffff;	/* Clear any pending events */

	/* Enable interrupts for transmit error, complete frame
	 * received, and any transmit buffer we have also set the
	 * interrupt flag.
	 */
	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);

	/* Install our interrupt handler.
	*/
	cpm_install_handler(CPMVEC_ENET, cpm_enet_interrupt, dev);

	/* Set GSMR_H to enable all normal operating modes.
	 * Set GSMR_L to enable Ethernet to MC68160.
	 */
	sccp->scc_gsmrh = 0;
	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);

	/* Set sync/delimiters.
	*/
	sccp->scc_dsr = 0xd555;

	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
	 * start frame search 22 bit times after RENA.
	 */
	sccp->scc_pmsr = (SCC_PMSR_ENCRC | SCC_PMSR_NIB22);

	/* It is now OK to enable the Ethernet transmitter.
	 * Unfortunately, there are board implementation differences here.
	 */
#ifdef CONFIG_MBX
	immap->im_ioport.iop_pcpar |= PC_ENET_TENA;
	immap->im_ioport.iop_pcdir &= ~PC_ENET_TENA;
#endif

#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC)
	cp->cp_pbpar |= PB_ENET_TENA;
	cp->cp_pbdir |= PB_ENET_TENA;

	/* And while we are here, set the configuration to enable ethernet.
	*/
	*((volatile uint *)RPX_CSR_ADDR) &= ~BCSR0_ETHLPBK;
	*((volatile uint *)RPX_CSR_ADDR) |=
			(BCSR0_ETHEN | BCSR0_COLTESTDIS | BCSR0_FULLDPLXDIS);
#endif

#ifdef CONFIG_BSEIP
	cp->cp_pbpar |= PB_ENET_TENA;
	cp->cp_pbdir |= PB_ENET_TENA;

	/* BSE uses port B and C for PHY control.
	*/
	cp->cp_pbpar &= ~(PB_BSE_POWERUP | PB_BSE_FDXDIS);
	cp->cp_pbdir |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
	cp->cp_pbdat |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);

	immap->im_ioport.iop_pcpar &= ~PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcdir |= PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcso &= ~PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcdat &= ~PC_BSE_LOOPBACK;
#endif

	dev->base_addr = (unsigned long)ep;
	dev->priv = cep;
	dev->name = "CPM_ENET";

	/* The CPM Ethernet specific entries in the device structure. */
	dev->open = cpm_enet_open;
	dev->hard_start_xmit = cpm_enet_start_xmit;
	dev->stop = cpm_enet_close;
	dev->get_stats = cpm_enet_get_stats;
	dev->set_multicast_list = set_multicast_list;

	/* And last, enable the transmit and receive processing.
	*/
	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	printk("CPM ENET Version 0.1, ");
	for (i=0; i<5; i++)
		printk("%02x:", dev->dev_addr[i]);
	printk("%02x\n", dev->dev_addr[5]);

	return 0;
}
Example #15
0
static int goldfish_pipe_device_init(struct platform_device *pdev,
				     struct goldfish_pipe_dev *dev)
{
	int err;

	tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task,
		     (unsigned long)dev);

	err = devm_request_irq(&pdev->dev, dev->irq,
			       goldfish_pipe_interrupt,
			       IRQF_SHARED, "goldfish_pipe", dev);
	if (err) {
		dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
		return err;
	}

	init_miscdevice(&dev->miscdev);
	err = misc_register(&dev->miscdev);
	if (err) {
		dev_err(&pdev->dev, "unable to register v2 device\n");
		return err;
	}

	dev->pdev_dev = &pdev->dev;
	dev->first_signalled_pipe = NULL;
	dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
	dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
			     GFP_KERNEL);
	if (!dev->pipes) {
		misc_deregister(&dev->miscdev);
		return -ENOMEM;
	}

	/*
	 * We're going to pass two buffers, open_command_params and
	 * signalled_pipe_buffers, to the host. This means each of those buffers
	 * needs to be contained in a single physical page. The easiest choice
	 * is to just allocate a page and place the buffers in it.
	 */
	BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
	dev->buffers = (struct goldfish_pipe_dev_buffers *)
		__get_free_page(GFP_KERNEL);
	if (!dev->buffers) {
		kfree(dev->pipes);
		misc_deregister(&dev->miscdev);
		return -ENOMEM;
	}

	/* Send the buffer addresses to the host */
	write_pa_addr(&dev->buffers->signalled_pipe_buffers,
		      dev->base + PIPE_REG_SIGNAL_BUFFER,
		      dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);

	writel(MAX_SIGNALLED_PIPES,
	       dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);

	write_pa_addr(&dev->buffers->open_command_params,
		      dev->base + PIPE_REG_OPEN_BUFFER,
		      dev->base + PIPE_REG_OPEN_BUFFER_HIGH);

	platform_set_drvdata(pdev, dev);
	return 0;
}
static int cxacru_bind(struct usbatm_data *usbatm_instance,
		       struct usb_interface *intf, const struct usb_device_id *id)
{
	struct cxacru_data *instance;
	struct usb_device *usb_dev = interface_to_usbdev(intf);
	struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
	int ret;

	
	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
	if (!instance) {
		dbg("cxacru_bind: no memory for instance data");
		return -ENOMEM;
	}

	instance->usbatm = usbatm_instance;
	instance->modem_type = (struct cxacru_modem_type *) id->driver_info;

	mutex_init(&instance->poll_state_serialize);
	instance->poll_state = CXPOLL_STOPPED;
	instance->line_status = -1;
	instance->adsl_status = -1;

	mutex_init(&instance->adsl_state_serialize);

	instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
	if (!instance->rcv_buf) {
		dbg("cxacru_bind: no memory for rcv_buf");
		ret = -ENOMEM;
		goto fail;
	}
	instance->snd_buf = (u8 *) __get_free_page(GFP_KERNEL);
	if (!instance->snd_buf) {
		dbg("cxacru_bind: no memory for snd_buf");
		ret = -ENOMEM;
		goto fail;
	}
	instance->rcv_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!instance->rcv_urb) {
		dbg("cxacru_bind: no memory for rcv_urb");
		ret = -ENOMEM;
		goto fail;
	}
	instance->snd_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!instance->snd_urb) {
		dbg("cxacru_bind: no memory for snd_urb");
		ret = -ENOMEM;
		goto fail;
	}

	if (!cmd_ep) {
		dbg("cxacru_bind: no command endpoint");
		ret = -ENODEV;
		goto fail;
	}

	if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
			== USB_ENDPOINT_XFER_INT) {
		usb_fill_int_urb(instance->rcv_urb,
			usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
			instance->rcv_buf, PAGE_SIZE,
			cxacru_blocking_completion, &instance->rcv_done, 1);

		usb_fill_int_urb(instance->snd_urb,
			usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
			instance->snd_buf, PAGE_SIZE,
			cxacru_blocking_completion, &instance->snd_done, 4);
	} else {
		usb_fill_bulk_urb(instance->rcv_urb,
			usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
			instance->rcv_buf, PAGE_SIZE,
			cxacru_blocking_completion, &instance->rcv_done);

		usb_fill_bulk_urb(instance->snd_urb,
			usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
			instance->snd_buf, PAGE_SIZE,
			cxacru_blocking_completion, &instance->snd_done);
	}

	mutex_init(&instance->cm_serialize);

	INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);

	usbatm_instance->driver_data = instance;

	usbatm_instance->flags = (cxacru_card_status(instance) ? 0 : UDSL_SKIP_HEAVY_INIT);

	return 0;

 fail:
	free_page((unsigned long) instance->snd_buf);
	free_page((unsigned long) instance->rcv_buf);
	usb_free_urb(instance->snd_urb);
	usb_free_urb(instance->rcv_urb);
	kfree(instance);

	return ret;
}
Example #17
0
static int __init des_s390_init(void)
{
	int ret;

	if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
	    !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
		return -EOPNOTSUPP;

	ret = crypto_register_alg(&des_alg);
	if (ret)
		goto des_err;
	ret = crypto_register_alg(&ecb_des_alg);
	if (ret)
		goto ecb_des_err;
	ret = crypto_register_alg(&cbc_des_alg);
	if (ret)
		goto cbc_des_err;
	ret = crypto_register_alg(&des3_alg);
	if (ret)
		goto des3_err;
	ret = crypto_register_alg(&ecb_des3_alg);
	if (ret)
		goto ecb_des3_err;
	ret = crypto_register_alg(&cbc_des3_alg);
	if (ret)
		goto cbc_des3_err;

	if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
		ret = crypto_register_alg(&ctr_des_alg);
		if (ret)
			goto ctr_des_err;
		ret = crypto_register_alg(&ctr_des3_alg);
		if (ret)
			goto ctr_des3_err;
		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
		if (!ctrblk) {
			ret = -ENOMEM;
			goto ctr_mem_err;
		}
	}
out:
	return ret;

ctr_mem_err:
	crypto_unregister_alg(&ctr_des3_alg);
ctr_des3_err:
	crypto_unregister_alg(&ctr_des_alg);
ctr_des_err:
	crypto_unregister_alg(&cbc_des3_alg);
cbc_des3_err:
	crypto_unregister_alg(&ecb_des3_alg);
ecb_des3_err:
	crypto_unregister_alg(&des3_alg);
des3_err:
	crypto_unregister_alg(&cbc_des_alg);
cbc_des_err:
	crypto_unregister_alg(&ecb_des_alg);
ecb_des_err:
	crypto_unregister_alg(&des_alg);
des_err:
	goto out;
}
Example #18
0
static int do_nodes_add(struct hotplug_context *ctx)
{
	struct rpc_communicator *comm;
	char *page;
	kerrighed_node_t node;
	int ret;

	ret = hotplug_start_request(ctx);
	if (ret)
		goto out;

	mutex_lock(&hotplug_mutex);

	ret = check_add_req(ctx);
	if (ret)
		goto out_finish;

	ret = -ENOMEM;
	page = (char *)__get_free_page(GFP_KERNEL);
	if (!page)
		goto out_finish;

	ret = krgnodelist_scnprintf(page, PAGE_SIZE, ctx->node_set.v);
	BUG_ON(ret >= PAGE_SIZE);
	printk("kerrighed: [ADD] Adding nodes %s ...\n", page);

	free_page((unsigned long)page);

	comm = ctx->ns->rpc_comm;
	ret = rpc_connect_mask(comm, &ctx->node_set.v);
	if (ret)
		goto out_finish;

	atomic_set(&nr_to_wait,
		   num_online_krgnodes() + krgnodes_weight(ctx->node_set.v));

	/*
	 * Send request to all new members
	 * Current limitation: only not-started nodes can be added to a
	 * running cluster (ie: a node can't move from a subcluster to another one)
	 */
	ret = do_cluster_start(ctx);
	if (ret)
		goto err_close;

	/* Send request to all members of the current cluster */
	for_each_online_krgnode(node)
		rpc_async(NODE_ADD, comm, node,
			  &ctx->node_set, sizeof(ctx->node_set));

	wait_event(all_added_wqh, atomic_read(&nr_to_wait) == 0);
	printk("kerrighed: [ADD] Adding nodes succeeded.\n");

out_finish:
	mutex_unlock(&hotplug_mutex);
	hotplug_finish_request(ctx);
out:
	if (ret)
		printk(KERN_ERR "kerrighed: [ADD] Adding nodes failed! err=%d\n",
		       ret);
	return ret;

err_close:
	rpc_close_mask(comm, &ctx->node_set.v);
	goto out_finish;
}
Example #19
0
int proc_avs_0_volume_write(struct file *file, const char __user *buf,
                           unsigned long count, void *data)
{
#define cMaxAttenuationE2			63

   int logarithmicAttenuation[cMaxAttenuationE2] =
	{
		 0,  0,  1,  1,  1,  1,  2,  2,  2,  3,  3,  3,  4,  4,  4,  5,
		 5,  5,  6,  6,  6,  7,  7,  8,  8,  9,  9,  9, 10, 10, 11, 11,
		12, 13, 13, 14, 14, 15, 16, 16, 17, 18, 19, 19, 20, 21, 22, 23,
		24, 25, 27, 28, 29, 31, 33, 35, 37, 40, 43, 47, 51, 58, 70
	};

	char 		*page;
	char		*myString;
	ssize_t 	ret = -ENOMEM;

	printk("%s %ld - ", __FUNCTION__, count);

	page = (char *)__get_free_page(GFP_KERNEL);
	if (page)
	{
		int number = 0;
		struct snd_kcontrol ** kcontrol = pseudoGetControls(&number);
		struct snd_kcontrol *single_control = NULL;
		int vLoop;
		int volume = 0;

		ret = -EFAULT;
		if(file == NULL && data == NULL)
			strncpy(page, buf, count);
		else
		{
			if (copy_from_user(page, buf, count))
				goto out;
		}

		myString = (char *) kmalloc(count + 1, GFP_KERNEL);
		strncpy(myString, page, count);
		myString[count] = '\0';

		printk("%s\n", myString);

		sscanf(myString, "%d", &volume);

#if defined(CUBEREVO) || defined(CUBEREVO_MINI) || defined(CUBEREVO_MINI2) || defined(CUBEREVO_MINI_FTA) || defined(CUBEREVO_250HD) || defined(CUBEREVO_2000HD) || defined(CUBEREVO_9500HD) || defined(TF7700) || defined(UFS912) || defined(UFS922) || defined(HL101) || defined(VIP1_V2) || defined(VIP2_V1) || defined(HOMECAST5101) || defined(ATEVIO7500) || defined(HS7810A) || defined(HS7110) || defined(WHITEBOX) || defined(IPBOX9900) || defined(IPBOX99) || defined(IPBOX55) || defined(ADB_BOX)
      current_volume = volume;
#else
/* Dagobert: 04.10.2009: e2 delivers values from 0 to 63 db. the ak4705
 * needs values which are a little bit more sophisticated.
 * the range is from mute (value 0) to -60 db (value 1) to +6db (value 34)
 * which is represented by 6 bits in 2db steps
 *
 * so we have a value range of 0 - 34. in my opinion the algo must be:
 *
 * current_volume = ((volume * 100) / 63 * 34) / 100;
 * current_volume = 34 - current_volume;
 *
 * mybe someone could test it.
 *
 */

 current_volume = 31 - volume / 4;
#endif

		for (vLoop = 0; vLoop < number; vLoop++)
		{
			if (kcontrol[vLoop]->private_value == PSEUDO_ADDR(master_volume))
			{
				single_control = kcontrol[vLoop];
				//printk("Find master_volume control at %p\n", single_control);
				break;
			}
		}

		if ((kcontrol != NULL) && (single_control != NULL))
		{
			struct snd_ctl_elem_value ucontrol;

			current_e2_volume = volume;

/* Dagobert: 04.10.2009: e2 delivers values from 0 to 63 db. the current
 * pseudo_mixer needs a value from 0 to "-70".
 * ->see pseudo_mixer.c line 722.
 */

/* Dagobert: 06.10.2009: Volume is a logarithmical value ...

  scale range

        volume = ((volume * 100) / cMaxVolumeE2 * cMaxVolumePlayer) / 100;
        volume = 0 - volume;
*/
        volume = 0 - logarithmicAttenuation[current_e2_volume];

		  //printk("Pseudo Mixer controls = %p\n", kcontrol);
		  ucontrol.value.integer.value[0] = volume;
		  ucontrol.value.integer.value[1] = volume;
		  ucontrol.value.integer.value[2] = volume;
		  ucontrol.value.integer.value[3] = volume;
		  ucontrol.value.integer.value[4] = volume;
		  ucontrol.value.integer.value[5] = volume;

		  snd_pseudo_integer_put(single_control, &ucontrol);

		} else
		{
			printk("Pseudo Mixer does not deliver controls\n");
		}

		if(current_input == SCART)
	  		avs_command_kernel(AVSIOSVOL, (void*) current_volume);

		kfree(myString);
	}

	ret = count;
out:

	free_page((unsigned long)page);
	return ret;
}
Example #20
0
/* This is the main crypto function - zero-copy edition */
static int
__crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
{
	struct scatterlist *dst_sg, *auth_sg, *src_sg;
	struct crypt_auth_op *caop = &kcaop->caop;
	int ret = 0;

	if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
		if (unlikely(ses_ptr->cdata.init != 0 &&
			(ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0)))
		{
			dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
			return -EINVAL;
		}

		ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg);
		if (unlikely(ret)) {
			dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
			return ret;
		}

		ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
			   dst_sg, caop->len);

		release_user_pages(ses_ptr);
	} else { /* TLS and normal cases. Here auth data are usually small
	          * so we just copy them to a free page, instead of trying
	          * to map them.
	          */
		unsigned char* auth_buf = NULL;
		struct scatterlist tmp;

		if (unlikely(caop->auth_len > PAGE_SIZE)) {
			dprintk(1, KERN_ERR, "auth data len is excessive.\n");
			return -EINVAL;
		}

		auth_buf = (char *)__get_free_page(GFP_KERNEL);
		if (unlikely(!auth_buf)) {
			dprintk(1, KERN_ERR, "unable to get a free page.\n");
			return -ENOMEM;
		}

		if (caop->auth_src && caop->auth_len > 0) {
			if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
				dprintk(1, KERN_ERR, "unable to copy auth data from userspace.\n");
				ret = -EFAULT;
				goto free_auth_buf;
			}

			sg_init_one(&tmp, auth_buf, caop->auth_len);
			auth_sg = &tmp;
		} else {
			auth_sg = NULL;
		}

		if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
			ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg);
			if (unlikely(ret)) {
				dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
				goto free_auth_buf;
			}

			ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
				   dst_sg, caop->len);
		} else {
			int dst_len;

			if (unlikely(ses_ptr->cdata.init == 0 ||
					ses_ptr->cdata.stream == 0 ||
					ses_ptr->cdata.aead == 0))
			{
				dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
				ret = -EINVAL;
				goto free_auth_buf;
			}

			if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
			else dst_len = caop->len;

			ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
					  kcaop->task, kcaop->mm, &src_sg, &dst_sg);
			if (unlikely(ret)) {
				dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
				goto free_auth_buf;
			}

			ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
					   src_sg, dst_sg, caop->len);
		}

		release_user_pages(ses_ptr);

free_auth_buf:
		free_page((unsigned long)auth_buf);
	}

	return ret;
}
Example #21
0
static int fifo_open(struct inode * inode,struct file * filp)
{
	int retval = 0;
	unsigned long page;

	switch( filp->f_mode ) {

	case 1:
	/*
	 *  O_RDONLY
	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
	 *  opened, even when there is no process writing the FIFO.
	 */
		filp->f_op = &connecting_fifo_fops;
		if (!PIPE_READERS(*inode)++)
			wake_up_interruptible(&PIPE_WAIT(*inode));
		if (!(filp->f_flags & O_NONBLOCK) && !PIPE_WRITERS(*inode)) {
			PIPE_RD_OPENERS(*inode)++;
			while (!PIPE_WRITERS(*inode)) {
				if (current->signal & ~current->blocked) {
					retval = -ERESTARTSYS;
					break;
				}
				interruptible_sleep_on(&PIPE_WAIT(*inode));
			}
			if (!--PIPE_RD_OPENERS(*inode))
				wake_up_interruptible(&PIPE_WAIT(*inode));
		}
		while (PIPE_WR_OPENERS(*inode))
			interruptible_sleep_on(&PIPE_WAIT(*inode));
		if (PIPE_WRITERS(*inode))
			filp->f_op = &read_fifo_fops;
		if (retval && !--PIPE_READERS(*inode))
			wake_up_interruptible(&PIPE_WAIT(*inode));
		break;
	
	case 2:
	/*
	 *  O_WRONLY
	 *  POSIX.1 says that O_NONBLOCK means return -1 with
	 *  errno=ENXIO when there is no process reading the FIFO.
	 */
		if ((filp->f_flags & O_NONBLOCK) && !PIPE_READERS(*inode)) {
			retval = -ENXIO;
			break;
		}
		filp->f_op = &write_fifo_fops;
		if (!PIPE_WRITERS(*inode)++)
			wake_up_interruptible(&PIPE_WAIT(*inode));
		if (!PIPE_READERS(*inode)) {
			PIPE_WR_OPENERS(*inode)++;
			while (!PIPE_READERS(*inode)) {
				if (current->signal & ~current->blocked) {
					retval = -ERESTARTSYS;
					break;
				}
				interruptible_sleep_on(&PIPE_WAIT(*inode));
			}
			if (!--PIPE_WR_OPENERS(*inode))
				wake_up_interruptible(&PIPE_WAIT(*inode));
		}
		while (PIPE_RD_OPENERS(*inode))
			interruptible_sleep_on(&PIPE_WAIT(*inode));
		if (retval && !--PIPE_WRITERS(*inode))
			wake_up_interruptible(&PIPE_WAIT(*inode));
		break;
	
	case 3:
	/*
	 *  O_RDWR
	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
	 *  This implementation will NEVER block on a O_RDWR open, since
	 *  the process can at least talk to itself.
	 */
		filp->f_op = &rdwr_fifo_fops;
		if (!PIPE_READERS(*inode)++)
			wake_up_interruptible(&PIPE_WAIT(*inode));
		while (PIPE_WR_OPENERS(*inode))
			interruptible_sleep_on(&PIPE_WAIT(*inode));
		if (!PIPE_WRITERS(*inode)++)
			wake_up_interruptible(&PIPE_WAIT(*inode));
		while (PIPE_RD_OPENERS(*inode))
			interruptible_sleep_on(&PIPE_WAIT(*inode));
		break;

	default:
		retval = -EINVAL;
	}
	if (retval || PIPE_BASE(*inode))
		return retval;
	page = __get_free_page(GFP_KERNEL);
	if (PIPE_BASE(*inode)) {
		free_page(page);
		return 0;
	}
	if (!page)
		return -ENOMEM;
	PIPE_LOCK(*inode) = 0;
	PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
	PIPE_BASE(*inode) = (char *) page;
	return 0;
}
Example #22
0
static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
                               size_t length, loff_t *ppos)
{
    int host, channel, id, lun;
    char *buffer, *p;
    int err;

    if (!buf || length > PAGE_SIZE)
        return -EINVAL;

    buffer = (char *)__get_free_page(GFP_KERNEL);
    if (!buffer)
        return -ENOMEM;

    err = -EFAULT;
    if (copy_from_user(buffer, buf, length))
        goto out;

    err = -EINVAL;
    if (length < PAGE_SIZE)
        buffer[length] = '\0';
    else if (buffer[PAGE_SIZE-1])
        goto out;

    /*
     * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
     * with  "0 1 2 3" replaced by your "Host Channel Id Lun".
     */
    if (!strncmp("scsi add-single-device", buffer, 22)) {
        p = buffer + 23;

        host = simple_strtoul(p, &p, 0);
        channel = simple_strtoul(p + 1, &p, 0);
        id = simple_strtoul(p + 1, &p, 0);
        lun = simple_strtoul(p + 1, &p, 0);

        err = scsi_add_single_device(host, channel, id, lun);

        /*
         * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
         * with  "0 1 2 3" replaced by your "Host Channel Id Lun".
         */
    } else if (!strncmp("scsi remove-single-device", buffer, 25)) {
        p = buffer + 26;

        host = simple_strtoul(p, &p, 0);
        channel = simple_strtoul(p + 1, &p, 0);
        id = simple_strtoul(p + 1, &p, 0);
        lun = simple_strtoul(p + 1, &p, 0);

        err = scsi_remove_single_device(host, channel, id, lun);
    }

    /*
     * convert success returns so that we return the
     * number of bytes consumed.
     */
    if (!err)
        err = length;

out:
    free_page((unsigned long)buffer);
    return err;
}
Example #23
0
static int speedtch_upload_firmware(struct speedtch_instance_data *instance,
                     const struct firmware *fw1,
                     const struct firmware *fw2)
{
    unsigned char *buffer;
    struct usbatm_data *usbatm = instance->usbatm;
    struct usb_device *usb_dev = usbatm->usb_dev;
    int actual_length;
    int ret = 0;
    int offset;

    usb_dbg(usbatm, "%s entered\n", __func__);

    if (!(buffer = (unsigned char *)__get_free_page(GFP_KERNEL))) {
        ret = -ENOMEM;
        usb_dbg(usbatm, "%s: no memory for buffer!\n", __func__);
        goto out;
    }

    if (!usb_ifnum_to_if(usb_dev, 2)) {
        ret = -ENODEV;
        usb_dbg(usbatm, "%s: interface not found!\n", __func__);
        goto out_free;
    }

    /* URB 7 */
    if (dl_512_first) {    /* some modems need a read before writing the firmware */
        ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
                   buffer, 0x200, &actual_length, 2000);

        if (ret < 0 && ret != -ETIMEDOUT)
            usb_warn(usbatm, "%s: read BLOCK0 from modem failed (%d)!\n", __func__, ret);
        else
            usb_dbg(usbatm, "%s: BLOCK0 downloaded (%d bytes)\n", __func__, ret);
    }

    /* URB 8 : both leds are static green */
    for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
        int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
        memcpy(buffer, fw1->data + offset, thislen);

        ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
                   buffer, thislen, &actual_length, DATA_TIMEOUT);

        if (ret < 0) {
            usb_err(usbatm, "%s: write BLOCK1 to modem failed (%d)!\n", __func__, ret);
            goto out_free;
        }
        usb_dbg(usbatm, "%s: BLOCK1 uploaded (%zu bytes)\n", __func__, fw1->size);
    }

    /* USB led blinking green, ADSL led off */

    /* URB 11 */
    ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
               buffer, 0x200, &actual_length, DATA_TIMEOUT);

    if (ret < 0) {
        usb_err(usbatm, "%s: read BLOCK2 from modem failed (%d)!\n", __func__, ret);
        goto out_free;
    }
    usb_dbg(usbatm, "%s: BLOCK2 downloaded (%d bytes)\n", __func__, actual_length);

    /* URBs 12 to 139 - USB led blinking green, ADSL led off */
    for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
        int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
        memcpy(buffer, fw2->data + offset, thislen);

        ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
                   buffer, thislen, &actual_length, DATA_TIMEOUT);

        if (ret < 0) {
            usb_err(usbatm, "%s: write BLOCK3 to modem failed (%d)!\n", __func__, ret);
            goto out_free;
        }
    }
    usb_dbg(usbatm, "%s: BLOCK3 uploaded (%zu bytes)\n", __func__, fw2->size);

    /* USB led static green, ADSL led static red */

    /* URB 142 */
    ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
               buffer, 0x200, &actual_length, DATA_TIMEOUT);

    if (ret < 0) {
        usb_err(usbatm, "%s: read BLOCK4 from modem failed (%d)!\n", __func__, ret);
        goto out_free;
    }

    /* success */
    usb_dbg(usbatm, "%s: BLOCK4 downloaded (%d bytes)\n", __func__, actual_length);

    /* Delay to allow firmware to start up. We can do this here
       because we're in our own kernel thread anyway. */
    msleep_interruptible(1000);

    if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
        usb_err(usbatm, "%s: setting interface to %d failed (%d)!\n", __func__, instance->params.altsetting, ret);
        goto out_free;
    }

    /* Enable software buffering, if requested */
    if (sw_buffering)
        speedtch_set_swbuff(instance, 1);

    /* Magic spell; don't ask us what this does */
    speedtch_test_sequence(instance);

    ret = 0;

out_free:
    free_page((unsigned long)buffer);
out:
    return ret;
}
Example #24
0
static noinline_for_stack
struct dentry *decode_by_path(struct super_block *sb, aufs_bindex_t bindex,
			      ino_t ino, __u32 *fh, int fh_len,
			      struct au_nfsd_si_lock *nsi_lock)
{
	struct dentry *dentry, *h_parent, *root;
	struct super_block *h_sb;
	char *pathname, *p;
	struct vfsmount *h_mnt;
	struct au_branch *br;
	int err;
	struct nameidata nd;
	struct path path;

	LKTRTrace("b%d\n", bindex);
	SiMustAnyLock(sb);

	br = au_sbr(sb, bindex);
	/* au_br_get(br); */
	h_mnt = br->br_mnt;
	h_sb = h_mnt->mnt_sb;
	LKTRTrace("%s, h_decode_fh\n", au_sbtype(h_sb));
	h_parent = au_call_decode_fh(h_mnt, fh + Fh_tail, fh_len - Fh_tail,
				     fh[Fh_h_type], h_acceptable,
				     /*context*/NULL);
	dentry = h_parent;
	if (unlikely(!h_parent || IS_ERR(h_parent))) {
		AuWarn1("%s decode_fh failed, %ld\n",
			au_sbtype(h_sb), PTR_ERR(h_parent));
		goto out;
	}
	dentry = NULL;
	if (unlikely(au_test_anon(h_parent))) {
		AuWarn1("%s decode_fh returned a disconnected dentry\n",
			au_sbtype(h_sb));
		goto out_h_parent;
	}

	dentry = ERR_PTR(-ENOMEM);
	pathname = (void *)__get_free_page(GFP_NOFS);
	if (unlikely(!pathname))
		goto out_h_parent;

	root = sb->s_root;
	path.mnt = h_mnt;
	di_read_lock_parent(root, !AuLock_IR);
	path.dentry = au_h_dptr(root, bindex);
	di_read_unlock(root, !AuLock_IR);
	p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
	dentry = (void *)p;
	if (IS_ERR(p))
		goto out_pathname;

	LKTRTrace("%s\n", p);
	si_read_unlock(sb);
	err = vfsub_path_lookup(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd);
	dentry = ERR_PTR(err);
	if (unlikely(err))
		goto out_relock;

	dentry = ERR_PTR(-ENOENT);
	AuDebugOn(au_test_anon(nd.dentry));
	if (unlikely(!nd.dentry->d_inode))
		goto out_nd;

	if (ino != nd.dentry->d_inode->i_ino) {
		path.mnt = nd.mnt;
		path.dentry = nd.dentry;
		dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
	} else
		dentry = dget(nd.dentry);

 out_nd:
	path_release(&nd);
 out_relock:
	if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
		if (!IS_ERR(dentry)) {
			dput(dentry);
			dentry = ERR_PTR(-ESTALE);
		}
 out_pathname:
	free_page((unsigned long)pathname);
 out_h_parent:
	dput(h_parent);
 out:
	/* au_br_put(br); */
	AuTraceErrPtr(dentry);
	return dentry;
}
Example #25
0
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	return (pte_t *)__get_free_page(PGALLOC_GFP);
	//return (pte_t *)alloc_pte_page();
}
Example #26
0
static int mixcom_write_proc(struct file *file, const char *buffer,
	u_long count, void *data)
{
	struct proc_dir_entry *entry = (struct proc_dir_entry *)data;
	struct net_device *dev = (struct net_device *)entry->parent->data;
	struct comx_channel *ch = dev->priv;
	struct mixcom_privdata *hw = ch->HW_privdata;
	char *page;
	int value;

	if (!(page = (char *)__get_free_page(GFP_KERNEL))) {
		return -ENOMEM;
	}

	copy_from_user(page, buffer, count = min_t(unsigned long, count, PAGE_SIZE));
	if (*(page + count - 1) == '\n') {
		*(page + count - 1) = 0;
	}

	if (strcmp(entry->name, FILENAME_IO) == 0) {
		value = simple_strtoul(page, NULL, 0);
		if (value != 0x180 && value != 0x280 && value != 0x380) {
			printk(KERN_ERR "MIXCOM: incorrect io address!\n");
		} else {
			dev->base_addr = MIXCOM_DEV_BASE(value,hw->channel);
		}
	} else if (strcmp(entry->name, FILENAME_IRQ) == 0) {
		value = simple_strtoul(page, NULL, 0); 
		if (value < 0 || value > 15 || mixcom_set_irq[value]==0xFF) {
			printk(KERN_ERR "MIXCOM: incorrect irq value!\n");
		} else {
			dev->irq = value;	
		}
	} else if (strcmp(entry->name, FILENAME_CLOCK) == 0) {
		if (strncmp("ext", page, 3) == 0) {
			hw->clock = 0;
		} else {
			int kbps;

			kbps = simple_strtoul(page, NULL, 0);
			if (!kbps) {
				hw->clock = 0;
			} else {
				hw->clock = kbps;
			}
			if (hw->clock < 32 || hw->clock > 2000) {
				hw->clock = 0;
				printk(KERN_ERR "MIXCOM: invalid clock rate!\n");
			}
		}
		if (ch->init_status & HW_OPEN && ch->HW_set_clock) {
			ch->HW_set_clock(dev);
		}
	} else if (strcmp(entry->name, FILENAME_CHANNEL) == 0) {
		value = simple_strtoul(page, NULL, 0);
        	if (value > 2) {
                	printk(KERN_ERR "Invalid channel number\n");
	        } else {
        		dev->base_addr+=(hw->channel - value) * MIXCOM_CHANNEL_OFFSET;
	        	hw->channel = value;
		}	        
	} else {
		printk(KERN_ERR "hw_read_proc: internal error, filename %s\n", 
			entry->name);
		return -EBADF;
	}

	setup_twin(dev);

	free_page((unsigned long)page);
	return count;
}
static int __devinit serial_m3110_probe(struct spi_device *spi)
{
	struct uart_max3110 *max;
	void *buffer;
	u16 res;
	int ret = 0;

	max = kzalloc(sizeof(*max), GFP_KERNEL);
	if (!max)
		return -ENOMEM;

	/* Set spi info */
	spi->bits_per_word = 16;
	max->clock = MAX3110_HIGH_CLK;

	spi_setup(spi);

	max->port.type = PORT_MAX3100;
	max->port.fifosize = 2;		/* Only have 16b buffer */
	max->port.ops = &serial_m3110_ops;
	max->port.line = 0;
	max->port.dev = &spi->dev;
	max->port.uartclk = 115200;

	max->spi = spi;
	strcpy(max->name, spi->modalias);
	max->irq = (u16)spi->irq;

	mutex_init(&max->thread_mutex);
	mutex_init(&max->io_mutex);

	max->word_7bits = 0;
	max->parity = 0;
	max->baud = 0;

	max->cur_conf = 0;
	max->uart_flags = 0;

	/* Check if reading configuration register returns something sane */

	res = RC_TAG;
	ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
	if (ret < 0 || res == 0 || res == 0xffff) {
		dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
									res);
		ret = -ENODEV;
		goto err_get_page;
	}

	buffer = (void *)__get_free_page(GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_get_page;
	}
	max->con_xmit.buf = buffer;
	max->con_xmit.head = 0;
	max->con_xmit.tail = 0;

	init_waitqueue_head(&max->wq);

	max->main_thread = kthread_run(max3110_main_thread,
					max, "max3110_main");
	if (IS_ERR(max->main_thread)) {
		ret = PTR_ERR(max->main_thread);
		goto err_kthread;
	}

	spi_set_drvdata(spi, max);
	pmax = max;

	/* Give membase a psudo value to pass serial_core's check */
	max->port.membase = (void *)0xff110000;
	uart_add_one_port(&serial_m3110_reg, &max->port);

	return 0;

err_kthread:
	free_page((unsigned long)buffer);
err_get_page:
	kfree(max);
	return ret;
}
Example #28
0
/**
 *	goldfish_pipe_open - open a channel to the AVD
 *	@inode: inode of device
 *	@file: file struct of opener
 *
 *	Create a new pipe link between the emulator and the use application.
 *	Each new request produces a new pipe.
 *
 *	Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
 *	right now so this is fine. A move to 64bit will need this addressing
 */
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
	struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
	unsigned long flags;
	int id;
	int status;

	/* Allocate new pipe kernel object */
	struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);

	if (!pipe)
		return -ENOMEM;

	pipe->dev = dev;
	mutex_init(&pipe->lock);
	init_waitqueue_head(&pipe->wake_queue);

	/*
	 * Command buffer needs to be allocated on its own page to make sure
	 * it is physically contiguous in host's address space.
	 */
	BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
	pipe->command_buffer =
		(struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
	if (!pipe->command_buffer) {
		status = -ENOMEM;
		goto err_pipe;
	}

	spin_lock_irqsave(&dev->lock, flags);

	id = get_free_pipe_id_locked(dev);
	if (id < 0) {
		status = id;
		goto err_id_locked;
	}

	dev->pipes[id] = pipe;
	pipe->id = id;
	pipe->command_buffer->id = id;

	/* Now tell the emulator we're opening a new pipe. */
	dev->buffers->open_command_params.rw_params_max_count =
			MAX_BUFFERS_PER_COMMAND;
	dev->buffers->open_command_params.command_buffer_ptr =
			(u64)(unsigned long)__pa(pipe->command_buffer);
	status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
	spin_unlock_irqrestore(&dev->lock, flags);
	if (status < 0)
		goto err_cmd;
	/* All is done, save the pipe into the file's private data field */
	file->private_data = pipe;
	return 0;

err_cmd:
	spin_lock_irqsave(&dev->lock, flags);
	dev->pipes[id] = NULL;
err_id_locked:
	spin_unlock_irqrestore(&dev->lock, flags);
	free_page((unsigned long)pipe->command_buffer);
err_pipe:
	kfree(pipe);
	return status;
}
Example #29
0
int sunos_poll(struct poll * ufds, size_t nfds, int timeout)
{
        int i,j, count, fdcount, error, retflag;
	struct poll * fdpnt;
	struct poll * fds, *fds1;
	select_table wait_table, *wait;
	struct select_table_entry *entry;

	if ((error = verify_area(VERIFY_READ, ufds, nfds*sizeof(struct poll))))
		return error;

	if (nfds > NR_OPEN)
		return -EINVAL;

	if (!(entry = (struct select_table_entry*)__get_free_page(GFP_KERNEL))
	|| !(fds = (struct poll *)kmalloc(nfds*sizeof(struct poll), GFP_KERNEL)))
		return -ENOMEM;

	memcpy_fromfs(fds, ufds, nfds*sizeof(struct poll));

	if (timeout < 0)
		current->timeout = 0x7fffffff;
	else {
		current->timeout = jiffies + POLL_ROUND_UP(timeout, (1000/HZ));
		if (current->timeout <= jiffies)
			current->timeout = 0;
	}

	count = 0;
	wait_table.nr = 0;
	wait_table.entry = entry;
	wait = &wait_table;

	for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) {
		i = fdpnt->fd;
		fdpnt->revents = 0;
		if (!current->files->fd[i] || !current->files->fd[i]->f_inode)
			fdpnt->revents = POLLNVAL;
	}
repeat:
	current->state = TASK_INTERRUPTIBLE;
	for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) {
		i = fdpnt->fd;

		if(i < 0) continue;
		if (!current->files->fd[i] || !current->files->fd[i]->f_inode) continue;

		if ((fdpnt->events & LINUX_POLLIN)
		&& check(SEL_IN, wait, current->files->fd[i])) {
			if (fdpnt->events & POLLIN)
				retflag = POLLIN;
			if (fdpnt->events & POLLRDNORM)
				retflag = POLLRDNORM;
			fdpnt->revents |= retflag; 
			count++;
			wait = NULL;
		}

		if ((fdpnt->events & LINUX_POLLOUT) &&
		check(SEL_OUT, wait, current->files->fd[i])) {
			fdpnt->revents |= (LINUX_POLLOUT & fdpnt->events);
			count++;
			wait = NULL;
		}

		if (check(SEL_EX, wait, current->files->fd[i])) {
			fdpnt->revents |= POLLHUP;
			count++;
			wait = NULL;
		}
	}

	if ((current->signal & (~current->blocked)))
		return -EINTR;

	wait = NULL;
	if (!count && current->timeout > jiffies) {
		schedule();
		goto repeat;
	}

	free_wait(&wait_table);
	free_page((unsigned long) entry);

	/* OK, now copy the revents fields back to user space. */
	fds1 = fds;
	fdcount = 0;
	for(i=0; i < (int)nfds; i++, ufds++, fds++) {
		if (fds->revents) {
			fdcount++;
		}
		put_fs_word(fds->revents, &ufds->revents);
	}
	kfree(fds1);
	current->timeout = 0;
	current->state = TASK_RUNNING;
	return fdcount;
}
pmd_t *get_pointer_table (void)
{
	pmd_t *pmdp = NULL;
	unsigned long flags;
	struct ptable_desc *dp = ptable_list.next;
	int i;

	/*
	 * For a pointer table for a user process address space, a
	 * table is taken from a page allocated for the purpose.  Each
	 * page can hold 8 pointer tables.  The page is remapped in
	 * virtual address space to be noncacheable.
	 */
	if (PD_NONEFREE (dp)) {

		if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
			return 0;
		}

		if (!(dp->page = __get_free_page (GFP_KERNEL))) {
			kfree (dp);
			return 0;
		}

		nocache_page (dp->page);

		dp->alloced = 0;
		/* put at head of list */
		save_flags(flags);
		cli();
		dp->next = ptable_list.next;
		dp->prev = ptable_list.next->prev;
		ptable_list.next->prev = dp;
		ptable_list.next = dp;
		restore_flags(flags);
	}

	for (i = 0; i < 8; i++)
		if (PD_TABLEFREE (dp, i)) {
			PD_MARKUSED (dp, i);
			pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
			break;
		}

	if (PD_NONEFREE (dp)) {
		/* move to end of list */
		save_flags(flags);
		cli();
		dp->prev->next = dp->next;
		dp->next->prev = dp->prev;

		dp->next = ptable_list.next->prev;
		dp->prev = ptable_list.prev;
		ptable_list.prev->next = dp;
		ptable_list.prev = dp;
		restore_flags(flags);
	}

	memset (pmdp, 0, PTABLE_SIZE);

	return pmdp;
}