コード例 #1
0
static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
				   size_t count)
{
	struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
	unsigned long tmp;
	char *p = (char *)page;

	tmp = simple_strtoul(p, &p, 0);
	if (!p || (*p && (*p != '\n')))
		return -EINVAL;

	if (tmp >= O2NM_MAX_NODES)
		return -ERANGE;

	/* once we're in the cl_nodes tree networking can look us up by
	 * node number and try to use our address and port attributes
	 * to connect to this node.. make sure that they've been set
	 * before writing the node attribute? */
	if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
		return -EINVAL; /* XXX */

	write_lock(&cluster->cl_nodes_lock);
	if (cluster->cl_nodes[tmp])
		p = NULL;
	else  {
		cluster->cl_nodes[tmp] = node;
		node->nd_num = tmp;
		set_bit(tmp, cluster->cl_nodes_bitmap);
	}
	write_unlock(&cluster->cl_nodes_lock);
	if (p == NULL)
		return -EEXIST;

	return count;
}
コード例 #2
0
ファイル: ledtrig-netdev.c プロジェクト: 4pao/openwrt
static ssize_t led_interval_store(struct device *dev,
				  struct device_attribute *attr, const char *buf, size_t size)
{
	struct led_classdev *led_cdev = dev_get_drvdata(dev);
	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
	int ret = -EINVAL;
	char *after;
	unsigned long value = simple_strtoul(buf, &after, 10);
	size_t count = after - buf;

	if (*after && isspace(*after))
		count++;

	/* impose some basic bounds on the timer interval */
	if (count == size && value >= 5 && value <= 10000) {
		write_lock(&trigger_data->lock);
		trigger_data->interval = msecs_to_jiffies(value);
		set_baseline_state(trigger_data); // resets timer
		write_unlock(&trigger_data->lock);
		ret = count;
	}

	return ret;
}
コード例 #3
0
/**
 * integrity_inode_get - find or allocate an iint associated with an inode
 * @inode: pointer to the inode
 * @return: allocated iint
 *
 * Caller must lock i_mutex
 */
struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
{
	struct rb_node **p;
	struct rb_node *node, *parent = NULL;
	struct integrity_iint_cache *iint, *test_iint;

	iint = integrity_iint_find(inode);
	if (iint)
		return iint;

	iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
	if (!iint)
		return NULL;

	write_lock(&integrity_iint_lock);

	p = &integrity_iint_tree.rb_node;
	while (*p) {
		parent = *p;
		test_iint = rb_entry(parent, struct integrity_iint_cache,
				     rb_node);
		if (inode < test_iint->inode)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	iint->inode = inode;
	node = &iint->rb_node;
	inode->i_flags |= S_IMA;
	rb_link_node(node, parent, p);
	rb_insert_color(node, &integrity_iint_tree);

	write_unlock(&integrity_iint_lock);
	return iint;
}
コード例 #4
0
void chroot_to_physical_root(struct prev_root *prev_root)
{
	struct krg_namespace *krg_ns = find_get_krg_ns();
	struct fs_struct *fs = current->fs;
	struct path root, prev_pwd;

	BUG_ON(!krg_ns);
	put_krg_ns(krg_ns);
//	BUG_ON(fs->users != 1);

	get_physical_root(&root);
	write_lock(&fs->lock);
	prev_root->path = fs->root;
	fs->root = root;
	path_get(&root);
	prev_pwd = fs->pwd;
	fs->pwd = root;
	write_unlock(&fs->lock);
	path_put(&prev_pwd);

	BUG_ON(prev_root->path.mnt->mnt_ns != current->nsproxy->mnt_ns);
	prev_root->nsproxy = current->nsproxy;
	rcu_assign_pointer(current->nsproxy, &krg_ns->root_nsproxy);
}
コード例 #5
0
void __iounmap(volatile void __iomem *addr)
{
#ifndef CONFIG_SMP
	struct vm_struct **p, *tmp;
#endif
	unsigned int section_mapping = 0;

	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);

#ifndef CONFIG_SMP
	/*
	 * If this is a section based mapping we need to handle it
	 * specially as the VM subsystem does not know how to handle
	 * such a beast. We need the lock here b/c we need to clear
	 * all the mappings before the area can be reclaimed
	 * by someone else.
	 */
	write_lock(&vmlist_lock);
	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
		if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
				*p = tmp->next;
				unmap_area_sections((unsigned long)tmp->addr,
						    tmp->size);
				kfree(tmp);
				section_mapping = 1;
			}
			break;
		}
	}
	write_unlock(&vmlist_lock);
#endif

	if (!section_mapping)
		vunmap((void __force *)addr);
}
コード例 #6
0
ファイル: ip6_flowlabel.c プロジェクト: jing-git/rt-n56u
static void ip6_fl_gc(unsigned long dummy)
{
	int i;
	unsigned long now = jiffies;
	unsigned long sched = 0;

	write_lock(&ip6_fl_lock);

	for (i=0; i<=FL_HASH_MASK; i++) {
		struct ip6_flowlabel *fl, **flp;
		flp = &fl_ht[i];
		while ((fl=*flp) != NULL) {
			if (atomic_read(&fl->users) == 0) {
				unsigned long ttd = fl->lastuse + fl->linger;
				if (time_after(ttd, fl->expires))
					fl->expires = ttd;
				ttd = fl->expires;
				if (time_after_eq(now, ttd)) {
					*flp = fl->next;
					fl_free(fl);
					atomic_dec(&fl_size);
					continue;
				}
				if (!sched || time_before(ttd, sched))
					sched = ttd;
			}
			flp = &fl->next;
		}
	}
	if (!sched && atomic_read(&fl_size))
		sched = now + FL_MAX_LINGER;
	if (sched) {
		mod_timer(&ip6_fl_gc_timer, sched);
	}
	write_unlock(&ip6_fl_lock);
}
コード例 #7
0
ファイル: search.c プロジェクト: mgross029/android_kernel
/* Remove references, if any, to @node from coord cache */
void cbk_cache_invalidate(const znode * node /* node to remove from cache */ ,
			  reiser4_tree * tree/* tree to remove node from */)
{
	cbk_cache_slot *slot;
	cbk_cache *cache;
	int i;

	assert("nikita-350", node != NULL);
	assert("nikita-1479", LOCK_CNT_GTZ(rw_locked_tree));

	cache = &tree->cbk_cache;
	assert("nikita-2470", cbk_cache_invariant(cache));

	write_lock(&(cache->guard));
	for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) {
		if (slot->node == node) {
			list_move_tail(&slot->lru, &cache->lru);
			slot->node = NULL;
			break;
		}
	}
	write_unlock(&(cache->guard));
	assert("nikita-2471", cbk_cache_invariant(cache));
}
コード例 #8
0
ファイル: ami304_compass.c プロジェクト: Feeyo/LG_Optimus2X
static int ami304_ioctl(struct inode *inode, struct file *file, unsigned int cmd,unsigned long arg)
{
	char strbuf[AMI304_BUFSIZE];
	int controlbuf[10];
	void __user *data;
	int retval=0;
	int mode=0;
#if DEBUG_AMI304
	printk(KERN_ERR  "ami304 - %s \n",__FUNCTION__);
#endif

	//check the authority is root or not
	if (!capable(CAP_SYS_ADMIN)) {
		retval = -EPERM;
		goto err_out;
	}

	switch (cmd) {
		case AMI304_IOCTL_INIT:
			read_lock(&ami304_data.lock);
			mode = ami304_data.mode;
			read_unlock(&ami304_data.lock);
			AMI304_Init(mode);
			break;

		case AMI304_IOCTL_READ_CHIPINFO:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			AMI304_ReadChipInfo(strbuf, AMI304_BUFSIZE);
			if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304_IOCTL_READ_SENSORDATA:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			AMI304_ReadSensorData(strbuf, AMI304_BUFSIZE);
			if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304_IOCTL_READ_POSTUREDATA:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			AMI304_ReadPostureData(strbuf, AMI304_BUFSIZE);
			if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304_IOCTL_READ_CALIDATA:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			AMI304_ReadCaliData(strbuf, AMI304_BUFSIZE);
			if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304_IOCTL_READ_CONTROL:
			read_lock(&ami304mid_data.ctrllock);
			memcpy(controlbuf, &ami304mid_data.controldata[0], sizeof(controlbuf));
			read_unlock(&ami304mid_data.ctrllock);
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_to_user(data, controlbuf, sizeof(controlbuf))) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304_IOCTL_SET_CONTROL:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(controlbuf, data, sizeof(controlbuf))) {
				retval = -EFAULT;
				goto err_out;
			}
			write_lock(&ami304mid_data.ctrllock);
			memcpy(&ami304mid_data.controldata[0], controlbuf, sizeof(controlbuf));
			write_unlock(&ami304mid_data.ctrllock);
			break;

		case AMI304_IOCTL_SET_MODE:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(&mode, data, sizeof(mode))) {
				retval = -EFAULT;
				goto err_out;
			}
			AMI304_SetMode(mode);
			break;

		default:
			printk(KERN_ERR "%s not supported = 0x%04x", __FUNCTION__, cmd);
			retval = -ENOIOCTLCMD;
			break;
	}

err_out:
	return retval;
}
コード例 #9
0
ファイル: fork.c プロジェクト: genua/anoubis_os
/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
	int err = 0;
	struct fs_struct *fs, *new_fs = NULL;
	struct sighand_struct *new_sigh = NULL;
	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
	struct files_struct *fd, *new_fd = NULL;
	struct nsproxy *new_nsproxy = NULL;
	int do_sysvsem = 0;

	check_unshare_flags(&unshare_flags);

	/* Return -EINVAL for all unsupported flags */
	err = -EINVAL;
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
		goto bad_unshare_out;

	/*
	 * CLONE_NEWIPC must also detach from the undolist: after switching
	 * to a new ipc namespace, the semaphore arrays from the old
	 * namespace are unreachable.
	 */
	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
		do_sysvsem = 1;
	if ((err = unshare_thread(unshare_flags)))
		goto bad_unshare_out;
	if ((err = unshare_fs(unshare_flags, &new_fs)))
		goto bad_unshare_cleanup_thread;
	if ((err = unshare_sighand(unshare_flags, &new_sigh)))
		goto bad_unshare_cleanup_fs;
	if ((err = unshare_vm(unshare_flags, &new_mm)))
		goto bad_unshare_cleanup_sigh;
	if ((err = unshare_fd(unshare_flags, &new_fd)))
		goto bad_unshare_cleanup_vm;
	if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
			new_fs)))
		goto bad_unshare_cleanup_fd;

	if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
		if (do_sysvsem) {
			/*
			 * CLONE_SYSVSEM is equivalent to sys_exit().
			 */
			exit_sem(current);
		}

		if (new_nsproxy) {
			switch_task_namespaces(current, new_nsproxy);
			new_nsproxy = NULL;
		}

		task_lock(current);

		if (new_fs) {
			fs = current->fs;
			write_lock(&fs->lock);
			current->fs = new_fs;
			if (--fs->users)
				new_fs = NULL;
			else
				new_fs = fs;
			write_unlock(&fs->lock);
		}

		if (new_mm) {
			mm = current->mm;
			active_mm = current->active_mm;
			current->mm = new_mm;
			current->active_mm = new_mm;
			activate_mm(active_mm, new_mm);
			new_mm = mm;
		}

		if (new_fd) {
			fd = current->files;
			current->files = new_fd;
			new_fd = fd;
		}

		task_unlock(current);
	}

	if (new_nsproxy)
		put_nsproxy(new_nsproxy);

bad_unshare_cleanup_fd:
	if (new_fd)
		put_files_struct(new_fd);

bad_unshare_cleanup_vm:
	if (new_mm)
		mmput(new_mm);

bad_unshare_cleanup_sigh:
	if (new_sigh)
		if (atomic_dec_and_test(&new_sigh->count))
			kmem_cache_free(sighand_cachep, new_sigh);

bad_unshare_cleanup_fs:
	if (new_fs)
		free_fs_struct(new_fs);

bad_unshare_cleanup_thread:
bad_unshare_out:
	return err;
}
コード例 #10
0
ファイル: zfcp_erp.c プロジェクト: Addision/LVS
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
	int retval;
	struct zfcp_adapter *adapter = erp_action->adapter;
	unsigned long flags;

	read_lock_irqsave(&zfcp_data.config_lock, flags);
	write_lock(&adapter->erp_lock);

	zfcp_erp_strategy_check_fsfreq(erp_action);

	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
		zfcp_erp_action_dequeue(erp_action);
		retval = ZFCP_ERP_DISMISSED;
		goto unlock;
	}

	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
		retval = ZFCP_ERP_FAILED;
		goto check_target;
	}

	zfcp_erp_action_to_running(erp_action);

	/* no lock to allow for blocking operations */
	write_unlock(&adapter->erp_lock);
	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
	retval = zfcp_erp_strategy_do_action(erp_action);
	read_lock_irqsave(&zfcp_data.config_lock, flags);
	write_lock(&adapter->erp_lock);

	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
		retval = ZFCP_ERP_CONTINUES;

	switch (retval) {
	case ZFCP_ERP_NOMEM:
		if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
			++adapter->erp_low_mem_count;
			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
		}
		if (adapter->erp_total_count == adapter->erp_low_mem_count)
			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
		else {
			zfcp_erp_strategy_memwait(erp_action);
			retval = ZFCP_ERP_CONTINUES;
		}
		goto unlock;

	case ZFCP_ERP_CONTINUES:
		if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
			--adapter->erp_low_mem_count;
			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
		}
		goto unlock;
	}

check_target:
	retval = zfcp_erp_strategy_check_target(erp_action, retval);
	zfcp_erp_action_dequeue(erp_action);
	retval = zfcp_erp_strategy_statechange(erp_action, retval);
	if (retval == ZFCP_ERP_EXIT)
		goto unlock;
	if (retval == ZFCP_ERP_SUCCEEDED)
		zfcp_erp_strategy_followup_success(erp_action);
	if (retval == ZFCP_ERP_FAILED)
		zfcp_erp_strategy_followup_failed(erp_action);

 unlock:
	write_unlock(&adapter->erp_lock);
	read_unlock_irqrestore(&zfcp_data.config_lock, flags);

	if (retval != ZFCP_ERP_CONTINUES)
		zfcp_erp_action_cleanup(erp_action, retval);

	return retval;
}
コード例 #11
0
ファイル: qsd_lib.c プロジェクト: hocks/lustre-release
/*
 * Release a qsd_instance. Companion of qsd_init(). This releases all data
 * structures associated with the quota slave (on-disk objects, lquota entry
 * tables, ...).
 * This function should be called when the OSD is shutting down.
 *
 * \param env - is the environment passed by the caller
 * \param qsd - is the qsd instance to shutdown
 */
void qsd_fini(const struct lu_env *env, struct qsd_instance *qsd)
{
	int	qtype;
	ENTRY;

	if (unlikely(qsd == NULL))
		RETURN_EXIT;

	CDEBUG(D_QUOTA, "%s: initiating QSD shutdown\n", qsd->qsd_svname);
	write_lock(&qsd->qsd_lock);
	qsd->qsd_stopping = true;
	write_unlock(&qsd->qsd_lock);

	/* remove qsd proc entry */
	if (qsd->qsd_proc != NULL) {
		lprocfs_remove(&qsd->qsd_proc);
		qsd->qsd_proc = NULL;
	}

	/* stop the writeback thread */
	qsd_stop_upd_thread(qsd);

	/* shutdown the reintegration threads */
	for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
		if (qsd->qsd_type_array[qtype] == NULL)
			continue;
		qsd_stop_reint_thread(qsd->qsd_type_array[qtype]);
	}

	if (qsd->qsd_ns != NULL) {
		qsd->qsd_ns = NULL;
	}

	/* free per-quota type data */
	for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++)
		qsd_qtype_fini(env, qsd, qtype);

	/* deregister connection to the quota master */
	qsd->qsd_exp_valid = false;
	lustre_deregister_lwp_item(&qsd->qsd_exp);

	/* release per-filesystem information */
	if (qsd->qsd_fsinfo != NULL) {
		mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
		/* remove from the list of fsinfo */
		cfs_list_del_init(&qsd->qsd_link);
		mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);
		qsd_put_fsinfo(qsd->qsd_fsinfo);
		qsd->qsd_fsinfo = NULL;
	}

	/* release quota root directory */
	if (qsd->qsd_root != NULL) {
		lu_object_put(env, &qsd->qsd_root->do_lu);
		qsd->qsd_root = NULL;
	}

	/* release reference on dt_device */
	if (qsd->qsd_dev != NULL) {
		lu_ref_del(&qsd->qsd_dev->dd_lu_dev.ld_reference, "qsd", qsd);
		lu_device_put(&qsd->qsd_dev->dd_lu_dev);
		qsd->qsd_dev = NULL;
	}

	CDEBUG(D_QUOTA, "%s: QSD shutdown completed\n", qsd->qsd_svname);
	OBD_FREE_PTR(qsd);
	EXIT;
}
コード例 #12
0
static int star_motion_ioctl(struct inode *inode, struct file *file, unsigned int cmd,unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	unsigned char   	data[MAX_MOTION_DATA_LENGTH]={0,};	
	int                        buf[5] = {0,};
	int     		     	flag = 0;
	int                        delay = 0;
	
	unsigned char   	tempbuf[200]={0,};     /* MPU3050 i2c MAX data length */
       int 				ret = 0;
   unsigned char value;

	switch (cmd) 
	{
	case MOTION_IOCTL_ENABLE_DISABLE:
		 /*
			0 : disable sensor
			1:  orientation (tilt)
			2:  accelerometer
			3: tap
			4: shake
		*/
		 //printk(".............star_motion_ioctl................\n"); 
		flag = STAR_SENSOR_NONE;

		if(atomic_read(&accel_flag)){
		 //printk(".............if(atomic_read(&snap_flag)){................\n"); 			
			flag |= STAR_ACCELEROMETER;
		}

		if(atomic_read(&tilt_flag)){
		 //printk(".............if(atomic_read(&tilt_flag)){................\n"); 
			flag |= STAR_TILT;
		}
		
		if(atomic_read(&shake_flag)){
		 //printk(".............if(atomic_read(&shake_flag)){................\n"); 
			flag |= STAR_SHAKE;
		}

		if(atomic_read(&tap_flag)){
		 //printk(".............if(atomic_read(&tap_flag)){................\n"); 			
			flag |= STAR_TAP;
		}

		if(atomic_read(&flip_flag)){
		 //printk(".............if(atomic_read(&flip_flag)){................\n"); 			
			flag |= STAR_FLIP;
		}

		if(atomic_read(&snap_flag)){
		 //printk(".............if(atomic_read(&snap_flag)){................\n"); 			
			flag |= STAR_SNAP;
		}
		
		if(atomic_read(&gyro_flag)){
		 //printk(".............if(atomic_read(&snap_flag)){................\n"); 			
			flag |= STAR_GYRO;
		}
		
		 if (copy_to_user(argp,&flag, sizeof(flag)))
		 {
		 		 //printk(".............MOTION_IOCTL_SNAP................\n"); 
			return -EFAULT;
		 } 
		 break;
	case MOTION_IOCTL_ACCEL_RAW:
		  if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		   }		  
		 /* 	buf[0], [1], [2] = accel_x,  accel_y,  accel_z;	 */
		 
		   atomic_set(&accel_x,buf[0]);  
		   atomic_set(&accel_y,buf[1]);
		   atomic_set(&accel_z,buf[2]);
		   
		   //motion_send_tilt_detection(buf[0],buf[1],buf[2]);
   		 //printk(".............MOTION_IOCTL_TILT................\n"); 
		   break;  		 		
	case MOTION_IOCTL_GYRO_RAW:
		  if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		   }		  
		 /* 	buf[0], [1], [2] = gyro_x,  gyro_y,  gyro_z; */
		 
		   atomic_set(&gyro_x,buf[0]);  
		   atomic_set(&gyro_y,buf[1]);
		   atomic_set(&gyro_z,buf[2]);
		   
		   //motion_send_tilt_detection(buf[0],buf[1],buf[2]);
   		 //printk(".............MOTION_IOCTL_TILT................\n"); 
		   break;  		 		   			   
	case MOTION_IOCTL_SENSOR_DELAY:
		  delay = atomic_read(&tilt_delay);

		  //printk("MOTION_IOCTL_SENSOR_DELAY[%d]",delay);
		  
		 if (copy_to_user(argp, &delay, sizeof(delay)))
		 {
		 	 return -EFAULT;
		 }
		 break;
		 
	case MOTION_IOCTL_TILT:
		  if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		   }		  
		 /* 	buf[0], [1], [2] = roll,  pitch,  yaw;	 */
		 
		   atomic_set(&tilt_roll,buf[0]);  
		   atomic_set(&tilt_pitch,buf[1]);
		   atomic_set(&tilt_yaw,buf[2]);
		   //motion_send_tilt_detection(buf[0],buf[1],buf[2]);
   		 //printk(".............MOTION_IOCTL_TILT................\n"); 
		   break;  		 		   
       case MOTION_IOCTL_SHAKE:   
	   	  if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		   }		  
		 /* 			buf[0] = event;   		  */
		 //printk(".............MOTION_IOCTL_SHAKE................\n"); 		  
		   motion_send_shake_detection(buf[0]);
		 break;
	case MOTION_IOCTL_FLIP:
		 if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		   }		  
		 //printk(".............MOTION_IOCTL_FLIP................\n"); 		

		   motion_send_flip_detection(buf[0]);
		 break;
	case MOTION_IOCTL_TAP:         
		 if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		   }		  
		 /* 
			buf[0] = type;   
			buf[1] = direction;			 
		  */
  		 //printk(".............MOTION_IOCTL_TAP................\n"); 
		   motion_send_tap_detection(buf[0],buf[1]);
		 break;
	case MOTION_IOCTL_SNAP:    
		 if (copy_from_user(&buf, argp, sizeof(buf)))
		  {
			return -EFAULT;
		  }
		 /* 
			buf[0] = direction;
		  */
		 //printk(".............MOTION_IOCTL_SNAP................\n"); 
		 motion_send_snap_detection(buf[0]);
		 break;	 
	case MOTION_IOCTL_MPU3050_SLEEP_MODE:
		 //printk(".............MOTION_IOCTL_MPU3050_SLEEP_MODE................\n");
		 //motion_gyro_sensor_sleep_mode();
		 motion_sensor_power_off();
		 
		 //twl4030_i2c_write_u8(0x13, 0x00,0x1b );
		 //msleep(100);	
		 break;
	case MOTION_IOCTL_MPU3050_SLEEP_WAKE_UP:
		 //printk(".............MOTION_IOCTL_MPU3050_SLEEP_WAKE_UP................\n");
		 //motion_gyro_sensor_sleep_wake_up();
		 
		 motion_sensor_power_on();
		 break;	 
	case MOTION_IOCTL_MPU3050_I2C_READ:
		//printk(".............MOTION_IOCTL_MPU3050_I2C_READ................\n");
		if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
		{
			//printk("FAIL!!!!!!copy_from_user.................MOTION_IOCTL_MPU3050_I2C_READ");
			return -EFAULT;
		}

		write_lock(&getbuflock);
		memcpy(&accelrwbuf[0], rwbuf, sizeof(rwbuf));
		write_unlock(&getbuflock); 	

		if (rwbuf[1] < 1)
		{
			printk("EINVAL ERROR......I2C SLAVE MOTION_IOCTL_I2C_READ : rwbuf[1] < 1...\n");
			
		        return -EINVAL;
		}

		if(rwbuf[0] == GYRO_I2C_SLAVE_ADDR)
		{
			//printk("############ (_0_)############ rwbuf[2]: %d(%x) / rwbuf[3]: %d(%x)/ rwbuf[1] : %d(%x)\n",rwbuf[2],rwbuf[2], rwbuf[3],rwbuf[3], rwbuf[1], rwbuf[1]);
			NvGyroAccelI2CGetRegs(star_motion_dev->hOdmGyroAccel, rwbuf[2] ,&rwbuf[3] , rwbuf[1]);
			if (ret < 0){
				printk("MOTION_IOCTL_I2C_READ : GYRO_I2C_SLAVE_ADDR Address ERROR[%d]\n",rwbuf[0]);
				return -EINVAL;
			}

			if (copy_to_user(argp, &rwbuf, sizeof(rwbuf)))
			{
		       printk("EINVAL ERROR.### GYRO ### I2C SLAVE MOTION_IOCTL_I2C_READ : rwbuf[1] < 1...\n");
		 	 return -EFAULT;
			}
		}
		else if(accelrwbuf[0] == 0x0F)
		{
			//printk("#### (_0_) #### accelrwbuf[2]: %d(%x) / accelrwbuf[3]: %d(%x)/ accelrwbuf[1] : %d(%x)\n",accelrwbuf[2],accelrwbuf[2], accelrwbuf[3],accelrwbuf[3], accelrwbuf[1], accelrwbuf[1]);
			//printk("######################## accel get(read) ##########################\n");
			if ((!accelrwbuf)){
				printk("### EEROR #### accelrwbuf is NULL pointer \n");
				return -1;
			} else{
				NvAccelerometerI2CGetRegsPassThrough (accelrwbuf[2] ,&accelrwbuf[3] , accelrwbuf[1]);
			}
			
			if (ret < 0){
				printk("MOTION_IOCTL_I2C_READ : ACCEL_I2C_SLAVE_ADDR Address ERROR[%d]\n",accelrwbuf[0]);
				return -EINVAL;
			}

			if (copy_to_user(argp, &accelrwbuf, sizeof(accelrwbuf)))
			{
		       printk("EINVAL ERROR  ### ACCEL ## I2C SLAVE MOTION_IOCTL_I2C_READ : rwbuf[1] < 1...\n");
		 	 return -EFAULT;
			}
		}
		#if 0 /**/
		else if(accelrwbuf[0] == 0x0e)
		{
			//printk("#### (_0_) #### accelrwbuf[2]: %d(%x) / accelrwbuf[3]: %d(%x)/ accelrwbuf[1] : %d(%x)\n",accelrwbuf[2],accelrwbuf[2], accelrwbuf[3],accelrwbuf[3], accelrwbuf[1], accelrwbuf[1]);
			//printk("######################## accel get(read) ##########################\n");
			if ((!accelrwbuf)){
				printk("### EEROR #### accelrwbuf is NULL pointer \n");
				return -1;
			} else{
				NvGyroAccelI2CGetRegs (accelrwbuf[2] ,&accelrwbuf[3] , accelrwbuf[1]);
			}
			
			if (ret < 0){
				printk("MOTION_IOCTL_I2C_READ : ACCEL_I2C_SLAVE_ADDR Address ERROR[%d]\n",accelrwbuf[0]);
				return -EINVAL;
			}

			if (copy_to_user(argp, &accelrwbuf, sizeof(accelrwbuf)))
			{
		       printk("EINVAL ERROR  ### ACCEL ## I2C SLAVE MOTION_IOCTL_I2C_READ : rwbuf[1] < 1...\n");
		 	 return -EFAULT;
			}
		}
		#endif
		else
		{
			printk("......I2C SLAVE ADDRESS ERROR!!!...[0x%x]...\n",buf[0]);
			return -EINVAL;
		}
		
		
			
		break;
	case MOTION_IOCTL_MPU3050_I2C_WRITE:
		//printk(".............MOTION_IOCTL_MPU3050_I2C_WRITE................\n");
		if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
		{
		       printk("EINVAL ERROR.....copy_from_user.I2C SLAVE MOTION_IOCTL_I2C_WRITE \n");
			return -EFAULT;
		}
		/*	
   		rwbuf[0] = slave_addr;  // slave addr - GYRO(0x68-MPU) 
     		rwbuf[1] = 2;                   // number of bytes to write +1
     		rwbuf[2] = reg;               // register address
     		rwbuf[3] = value;          // register value		
		*/
		if (rwbuf[1] < 2)
		{
		       printk("MOTION_IOCTL_WRITE ..length ERROR!!![%d].....\n",rwbuf[1]);
			return -EINVAL;
		}

		if(rwbuf[0] == GYRO_I2C_SLAVE_ADDR)
		{
			//ret = mpu3050_i2c_write(&rwbuf[2],rwbuf[1]);
			NvGyroAccelI2CSetRegs(star_motion_dev->hOdmGyroAccel, rwbuf[2] ,&rwbuf[3] , rwbuf[1]-1);
			if (ret < 0){
				 printk("MOTION_IOCTL_WRITE  : GYRO_I2C_SLAVE_ADDR Address ERROR[%d]\n",rwbuf[0]);
				return -EINVAL;
			}
		}
		else if(rwbuf[0] == 0x0F)
		{
			//ret = kxtf9_i2c_write(&rwbuf[2],rwbuf[1]);
			//printk("(_6_)rwbuf[2]: %d(%x) /  rwbuf[1] : %d(%x)\n",rwbuf[2],rwbuf[2], rwbuf[1], rwbuf[1]);
//			printk("######################## accel set(write) ##########################\n");
			NvAccelerometerI2CSetRegsPassThrough(rwbuf[2] ,&rwbuf[3] , rwbuf[1]-1);
			//printk("(_7_) rwbuf[3]: %d(%x) \n", rwbuf[3],rwbuf[3]);
			if (ret < 0){
				  printk("[KXTF9] MOTION_IOCTL_WRITE  : ACCEL_I2C_SLAVE_ADDR ERROR[%d]\n",rwbuf[0]);
				return -EINVAL;
			}
		}
		#if 0
		else if(rwbuf[0] == 0x0e)
		{
			//ret = kxtf9_i2c_write(&rwbuf[2],rwbuf[1]);
			//printk("(_6_)rwbuf[2]: %d(%x) /  rwbuf[1] : %d(%x)\n",rwbuf[2],rwbuf[2], rwbuf[1], rwbuf[1]);
//			printk("######################## accel set(write) ##########################\n");
			NvGyroAccelI2CSetRegs(rwbuf[2] ,&rwbuf[3] , rwbuf[1]-1);
			//printk("(_7_) rwbuf[3]: %d(%x) \n", rwbuf[3],rwbuf[3]);
			if (ret < 0){
				  printk("[KXTF9] MOTION_IOCTL_WRITE  : ACCEL_I2C_SLAVE_ADDR ERROR[%d]\n",rwbuf[0]);
				return -EINVAL;
			}
		}
		#endif
		else
		{
			printk("......I2C SLAVE ADDRESS ERROR!!!...[0x%x]...\n",buf[0]);
			return -EINVAL;
		}
		break;
	default:
		break;
	}

	return 0;
	
}
コード例 #13
0
int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval, 
		    int optlen)
{
	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
	int val, valbool;
	int retv = -ENOPROTOOPT;

	if(level==SOL_IP && sk->type != SOCK_RAW)
		return udp_prot.setsockopt(sk, level, optname, optval, optlen);

	if(level!=SOL_IPV6)
		goto out;

	if (optval == NULL)
		val=0;
	else if (get_user(val, (int *) optval))
		return -EFAULT;

	valbool = (val!=0);

	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->protocol != IPPROTO_UDP &&
			    sk->protocol != IPPROTO_TCP)
				break;

			if (sk->state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (!(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			if (sk->protocol == IPPROTO_TCP) {
				struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);

				local_bh_disable();
				sock_prot_dec_use(sk->prot);
				sock_prot_inc_use(&tcp_prot);
				local_bh_enable();
				sk->prot = &tcp_prot;
				tp->af_specific = &ipv4_specific;
				sk->socket->ops = &inet_stream_ops;
				sk->family = PF_INET;
				tcp_sync_mss(sk, tp->pmtu_cookie);
			} else {
				local_bh_disable();
				sock_prot_dec_use(sk->prot);
				sock_prot_inc_use(&udp_prot);
				local_bh_enable();
				sk->prot = &udp_prot;
				sk->socket->ops = &inet_dgram_ops;
				sk->family = PF_INET;
			}
			opt = xchg(&np->opt, NULL);
			if (opt)
				sock_kfree_s(sk, opt, opt->tot_len);
			pktopt = xchg(&np->pktoptions, NULL);
			if (pktopt)
				kfree_skb(pktopt);

			sk->destruct = inet_sock_destruct;
#ifdef INET_REFCNT_DEBUG
			atomic_dec(&inet6_sock_nr);
#endif
			MOD_DEC_USE_COUNT;
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_PKTINFO:
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_HOPLIMIT:
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_RTHDR:
		if (val < 0 || val > 2)
			goto e_inval;
		np->rxopt.bits.srcrt = val;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_AUTHHDR:
		np->rxopt.bits.authhdr = valbool;
		retv = 0;
		break;

	case IPV6_DSTOPTS:
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi fl;
		int junk;

		fl.fl6_flowlabel = 0;
		fl.oif = sk->bound_dev_if;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (opt == NULL)
			break;

		memset(opt, 0, sizeof(*opt));
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void*)(opt+1);

		retv = datagram_send_ctl(&msg, &fl, opt, &junk);
		if (retv)
			goto done;
update:
		retv = 0;
		if (sk->type == SOCK_STREAM) {
			if (opt) {
				struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
				if (!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
				    && sk->daddr != LOOPBACK4_IPV6) {
					tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
					tcp_sync_mss(sk, tp->pmtu_cookie);
				}
			}
			opt = xchg(&np->opt, opt);
			sk_dst_reset(sk);
		} else {
			write_lock(&sk->dst_lock);
			opt = xchg(&np->opt, opt);
			write_unlock(&sk->dst_lock);
			sk_dst_reset(sk);
		}

done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->type == SOCK_STREAM)
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_MULTICAST_IF:
		if (sk->type == SOCK_STREAM)
			goto e_inval;
		if (sk->bound_dev_if && sk->bound_dev_if != val)
			goto e_inval;

		if (__dev_get_by_index(val) == NULL) {
			retv = -ENODEV;
			break;
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_ROUTER_ALERT:
		retv = ip6_ra_control(sk, val, NULL);
		break;
	case IPV6_MTU_DISCOVER:
		if (val<0 || val>2)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;

#ifdef CONFIG_NETFILTER
	default:
		retv = nf_setsockopt(sk, PF_INET6, optname, optval, 
					    optlen);
		break;
#endif

	}
	release_sock(sk);

out:
	return retv;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
コード例 #14
0
ファイル: cache.c プロジェクト: tonicbupt/nessDB
/*
 * REQUIRES:
 * a) cache list read(write) lock
 *
 * PROCESSES:
 * a) lock hash chain
 * b) find pair via key
 * c) if found, lock the value and return
 * d) if not found:
 *	d0) list lock
 *	d1) add to cache/evict/clean list
 *	d2) value lock
 *	d3) list unlock
 *	d4) disk-mtx lock
 *	d5) fetch from disk
 *	d6) disk-mtx unlock
 */
int cache_get_and_pin(struct cache_file *cf,
                      NID k,
                      struct node **n,
                      enum lock_type locktype)
{
	struct cpair *p;
	struct cache *c = cf->cache;

TRY_PIN:
	/* make room for me, please */
	_make_room(c);

	cpair_locked_by_key(c->table, k);
	p = cpair_htable_find(c->table, k);
	cpair_unlocked_by_key(c->table, k);
	if (p) {
		if (locktype != L_READ) {
			if (!try_write_lock(&p->value_lock))
				goto TRY_PIN;
		} else {
			if (!try_read_lock(&p->value_lock))
				goto TRY_PIN;
		}

		*n = p->v;
		return NESS_OK;
	}

	cpair_locked_by_key(c->table, k);
	p = cpair_htable_find(c->table, k);
	if (p) {
		/*
		 * if we go here, means that someone got before us
		 * try pin again
		 */
		cpair_unlocked_by_key(c->table, k);
		goto TRY_PIN;
	}

	struct tree_callback *tcb = cf->tcb;
	struct tree *tree = (struct tree*)cf->args;
	int r = tcb->fetch_node(tree, k, n);
	if (r != NESS_OK) {
		__PANIC("fetch node from disk error, nid [%" PRIu64 "], errno %d",
		        k,
		        r);
		goto ERR;
	}

	p = cpair_new();
	cpair_init(p, *n, cf);

	/* add to cache list */
	write_lock(&c->clock_lock);
	_cache_insert(c, p);
	write_unlock(&c->clock_lock);

	if (locktype != L_READ) {
		write_lock(&p->value_lock);
	} else {
		read_lock(&p->value_lock);
	}

	cpair_unlocked_by_key(c->table, k);

	return NESS_OK;

ERR:
	return NESS_ERR;
}
コード例 #15
0
ファイル: reassembly.c プロジェクト: khenam/ardrone-kernel
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
			   struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int offset, end;

	if (fq->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	offset = ntohs(fhdr->frag_off) & ~0x7;
	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
				 IPSTATS_MIB_INHDRERRORS);
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
				  ((u8 *)&fhdr->frag_off -
				   skb_network_header(skb)));
		return -1;
	}

	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		const unsigned char *nh = skb_network_header(skb);
		skb->csum = csum_sub(skb->csum,
				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
						  0));
	}

	/* Is this the final fragment? */
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
			goto err;
		fq->q.last_in |= INET_FRAG_LAST_IN;
		fq->q.len = end;
	} else {
		/* Check if the fragment is rounded to 8 bytes.
		 * Required by the RFC.
		 */
		if (end & 0x7) {
			/* RFC2460 says always send parameter problem in
			 * this case. -DaveM
			 */
			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
					 IPSTATS_MIB_INHDRERRORS);
			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
					  offsetof(struct ipv6hdr, payload_len));
			return -1;
		}
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
			if (fq->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			fq->q.len = end;
		}
	}

	if (end == offset)
		goto err;

	/* Point into the IP datagram 'data' part. */
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
		goto err;

	if (pskb_trim_rcsum(skb, end - offset))
		goto err;

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = NULL;
	for(next = fq->q.fragments; next != NULL; next = next->next) {
		if (FRAG6_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

	/* We found where to put this one.  Check for overlap with
	 * preceding fragment, and, if needed, align things so that
	 * any overlaps are eliminated.
	 */
	if (prev) {
		int i = (FRAG6_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			if (end <= offset)
				goto err;
			if (!pskb_pull(skb, i))
				goto err;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	/* Look for overlap with succeeding segments.
	 * If we can merge fragments, do it.
	 */
	while (next && FRAG6_CB(next)->offset < end) {
		int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */

		if (i < next->len) {
			/* Eat head of the next overlapped fragment
			 * and leave the loop. The next ones cannot overlap.
			 */
			if (!pskb_pull(next, i))
				goto err;
			FRAG6_CB(next)->offset += i;	/* next fragment */
			fq->q.meat -= i;
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			/* Old fragment is completely overridden with
			 * new one drop it.
			 */
			next = next->next;

			if (prev)
				prev->next = next;
			else
				fq->q.fragments = next;

			fq->q.meat -= free_it->len;
			frag_kfree_skb(fq->q.net, free_it, NULL);
		}
	}

	FRAG6_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		fq->iif = dev->ifindex;
		skb->dev = NULL;
	}
	fq->q.stamp = skb->tstamp;
	fq->q.meat += skb->len;
	atomic_add(skb->truesize, &fq->q.net->mem);

	/* The first fragment.
	 * nhoffset is obtained from the first fragment, of course.
	 */
	if (offset == 0) {
		fq->nhoffset = nhoff;
		fq->q.last_in |= INET_FRAG_FIRST_IN;
	}

	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    fq->q.meat == fq->q.len)
		return ip6_frag_reasm(fq, prev, dev);

	write_lock(&ip6_frags.lock);
	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
	write_unlock(&ip6_frags.lock);
	return -1;

err:
	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
	kfree_skb(skb);
	return -1;
}
コード例 #16
0
ファイル: checkpoint.c プロジェクト: 020gzh/linux
/*
 * __jbd2_log_wait_for_space: wait until there is space in the journal.
 *
 * Called under j-state_lock *only*.  It will be unlocked if we have to wait
 * for a checkpoint to free up some space in the log.
 */
void __jbd2_log_wait_for_space(journal_t *journal)
{
	int nblocks, space_left;
	/* assert_spin_locked(&journal->j_state_lock); */

	nblocks = jbd2_space_needed(journal);
	while (jbd2_log_space_left(journal) < nblocks) {
		write_unlock(&journal->j_state_lock);
		mutex_lock(&journal->j_checkpoint_mutex);

		/*
		 * Test again, another process may have checkpointed while we
		 * were waiting for the checkpoint lock. If there are no
		 * transactions ready to be checkpointed, try to recover
		 * journal space by calling cleanup_journal_tail(), and if
		 * that doesn't work, by waiting for the currently committing
		 * transaction to complete.  If there is absolutely no way
		 * to make progress, this is either a BUG or corrupted
		 * filesystem, so abort the journal and leave a stack
		 * trace for forensic evidence.
		 */
		write_lock(&journal->j_state_lock);
		if (journal->j_flags & JBD2_ABORT) {
			mutex_unlock(&journal->j_checkpoint_mutex);
			return;
		}
		spin_lock(&journal->j_list_lock);
		nblocks = jbd2_space_needed(journal);
		space_left = jbd2_log_space_left(journal);
		if (space_left < nblocks) {
			int chkpt = journal->j_checkpoint_transactions != NULL;
			tid_t tid = 0;

			if (journal->j_committing_transaction)
				tid = journal->j_committing_transaction->t_tid;
			spin_unlock(&journal->j_list_lock);
			write_unlock(&journal->j_state_lock);
			if (chkpt) {
				jbd2_log_do_checkpoint(journal);
			} else if (jbd2_cleanup_journal_tail(journal) == 0) {
				/* We were able to recover space; yay! */
				;
			} else if (tid) {
				/*
				 * jbd2_journal_commit_transaction() may want
				 * to take the checkpoint_mutex if JBD2_FLUSHED
				 * is set.  So we need to temporarily drop it.
				 */
				mutex_unlock(&journal->j_checkpoint_mutex);
				jbd2_log_wait_commit(journal, tid);
				write_lock(&journal->j_state_lock);
				continue;
			} else {
				printk(KERN_ERR "%s: needed %d blocks and "
				       "only had %d space available\n",
				       __func__, nblocks, space_left);
				printk(KERN_ERR "%s: no way to get more "
				       "journal space in %s\n", __func__,
				       journal->j_devname);
				WARN_ON(1);
				jbd2_journal_abort(journal, 0);
			}
			write_lock(&journal->j_state_lock);
		} else {
			spin_unlock(&journal->j_list_lock);
		}
		mutex_unlock(&journal->j_checkpoint_mutex);
	}
}
コード例 #17
0
ファイル: ioctl.c プロジェクト: wojtekzozlak/Studia.Archiwum
long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct inode *inode = filp->f_dentry->d_inode;
	struct ext2_inode_info *ei = EXT2_I(inode);
	unsigned int flags;
	unsigned short rsv_window_size;
	int ret;

	ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg);

	switch (cmd) {
  case EXT2_FAKE_B_ALLOC:
    /* Fake allocation for ext2 filesystem.
     * */
    {
      struct ext2_fake_b_alloc_arg config;
      struct buffer_head bh_result;
      sector_t iblock, off;
      int ret = 0;

      ret = copy_from_user(&config, (struct ext2_fake_b_alloc_arg __user *)arg,
                           sizeof(struct ext2_fake_b_alloc_arg));
      if (ret != 0) {
        printk (KERN_DEBUG "can't copy from user");
        return -EIO;
      } else ret = 0;

      /* Allocate blocks. */
      off = config.efba_off;
      iblock = config.efba_off >> inode->i_blkbits;
      while ((iblock << inode->i_blkbits) <
                  (config.efba_off + config.efba_size)) {
        memset(&bh_result, 0, sizeof(struct ext2_fake_b_alloc_arg));
        ret = ext2_get_block(inode, iblock, &bh_result, 1);
        if (ret < 0) {
          printk (KERN_DEBUG "get_block_error %d, escaping", ret);
          break;
        }
        iblock++;
      }

      /* Set metadata */
      write_lock(&EXT2_I(inode)->i_meta_lock);
      if (ret == 0) {
        printk (KERN_DEBUG "ok, set size");
        inode->i_size = max_t(loff_t, inode->i_size,
                            config.efba_off + config.efba_size);
      } else if(iblock != config.efba_off >> inode->i_blkbits) {
        /* Partially allocated, size must be fixed.           *
         * But `i_blocks` should containt actual information. */
        inode->i_size = inode->i_blocks << inode->i_blkbits;
      }
      inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
      inode->i_version++;
      write_unlock(&EXT2_I(inode)->i_meta_lock);

      printk(KERN_DEBUG, "returning %d", ret);
      return ret;
    }

	case EXT2_IOC_GETFLAGS:
		ext2_get_inode_flags(ei);
		flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
		return put_user(flags, (int __user *) arg);
	case EXT2_IOC_SETFLAGS: {
		unsigned int oldflags;

		ret = mnt_want_write(filp->f_path.mnt);
		if (ret)
			return ret;

		if (!is_owner_or_cap(inode)) {
			ret = -EACCES;
			goto setflags_out;
		}

		if (get_user(flags, (int __user *) arg)) {
			ret = -EFAULT;
			goto setflags_out;
		}

		flags = ext2_mask_flags(inode->i_mode, flags);

		mutex_lock(&inode->i_mutex);
		/* Is it quota file? Do not allow user to mess with it */
		if (IS_NOQUOTA(inode)) {
			mutex_unlock(&inode->i_mutex);
			ret = -EPERM;
			goto setflags_out;
		}
		oldflags = ei->i_flags;

		/*
		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
		 * the relevant capability.
		 *
		 * This test looks nicer. Thanks to Pauline Middelink
		 */
		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
			if (!capable(CAP_LINUX_IMMUTABLE)) {
				mutex_unlock(&inode->i_mutex);
				ret = -EPERM;
				goto setflags_out;
			}
		}

		flags = flags & EXT2_FL_USER_MODIFIABLE;
		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
		ei->i_flags = flags;
		mutex_unlock(&inode->i_mutex);

		ext2_set_inode_flags(inode);
		inode->i_ctime = CURRENT_TIME_SEC;
		mark_inode_dirty(inode);
setflags_out:
		mnt_drop_write(filp->f_path.mnt);
		return ret;
	}
	case EXT2_IOC_GETVERSION:
		return put_user(inode->i_generation, (int __user *) arg);
	case EXT2_IOC_SETVERSION:
		if (!is_owner_or_cap(inode))
			return -EPERM;
		ret = mnt_want_write(filp->f_path.mnt);
		if (ret)
			return ret;
		if (get_user(inode->i_generation, (int __user *) arg)) {
			ret = -EFAULT;
		} else {
			inode->i_ctime = CURRENT_TIME_SEC;
			mark_inode_dirty(inode);
		}
		mnt_drop_write(filp->f_path.mnt);
		return ret;
	case EXT2_IOC_GETRSVSZ:
		if (test_opt(inode->i_sb, RESERVATION)
			&& S_ISREG(inode->i_mode)
			&& ei->i_block_alloc_info) {
			rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
			return put_user(rsv_window_size, (int __user *)arg);
		}
		return -ENOTTY;
	case EXT2_IOC_SETRSVSZ: {

		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
			return -ENOTTY;

		if (!is_owner_or_cap(inode))
			return -EACCES;

		if (get_user(rsv_window_size, (int __user *)arg))
			return -EFAULT;

		ret = mnt_want_write(filp->f_path.mnt);
		if (ret)
			return ret;

		if (rsv_window_size > EXT2_MAX_RESERVE_BLOCKS)
			rsv_window_size = EXT2_MAX_RESERVE_BLOCKS;

		/*
		 * need to allocate reservation structure for this inode
		 * before set the window size
		 */
		/*
		 * XXX What lock should protect the rsv_goal_size?
		 * Accessed in ext2_get_block only.  ext3 uses i_truncate.
		 */
		mutex_lock(&ei->truncate_mutex);
		if (!ei->i_block_alloc_info)
			ext2_init_block_alloc_info(inode);

		if (ei->i_block_alloc_info){
			struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
			rsv->rsv_goal_size = rsv_window_size;
		}
		mutex_unlock(&ei->truncate_mutex);
		mnt_drop_write(filp->f_path.mnt);
		return 0;
	}
	default:
		return -ENOTTY;
	}
}
コード例 #18
0
/*
 * Set the attributes of an object
 *
 * The transaction passed to this routine must have
 * dmu_tx_hold_bonus(tx, oid) called and then assigned
 * to a transaction group.
 */
static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
			const struct lu_attr *la, struct thandle *handle,
			struct lustre_capa *capa)
{
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	udmu_objset_t		*uos = &osd->od_objset;
	struct osd_thandle	*oh;
	struct osa_attr		*osa = &osd_oti_get(env)->oti_osa;
	sa_bulk_attr_t		*bulk;
	int			 cnt;
	int			 rc = 0;

	ENTRY;
	LASSERT(handle != NULL);
	LASSERT(dt_object_exists(dt));
	LASSERT(osd_invariant(obj));
	LASSERT(obj->oo_sa_hdl);

	oh = container_of0(handle, struct osd_thandle, ot_super);
	/* Assert that the transaction has been assigned to a
	   transaction group. */
	LASSERT(oh->ot_tx->tx_txg != 0);

	if (la->la_valid == 0)
		RETURN(0);

	OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 10);
	if (bulk == NULL)
		RETURN(-ENOMEM);

	/* do both accounting updates outside oo_attr_lock below */
	if ((la->la_valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
		/* Update user accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_objset.os, osd->od_iusr_oid,
					la->la_uid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_uid, rc);
		rc = -zap_increment_int(osd->od_objset.os, osd->od_iusr_oid,
					obj->oo_attr.la_uid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_uid, rc);
	}
	if ((la->la_valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
		/* Update group accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_objset.os, osd->od_igrp_oid,
					la->la_gid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_gid, rc);
		rc = -zap_increment_int(osd->od_objset.os, osd->od_igrp_oid,
					obj->oo_attr.la_gid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_gid, rc);
	}

	write_lock(&obj->oo_attr_lock);
	cnt = 0;
	if (la->la_valid & LA_ATIME) {
		osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL,
				 osa->atime, 16);
	}
	if (la->la_valid & LA_MTIME) {
		osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL,
				 osa->mtime, 16);
	}
	if (la->la_valid & LA_CTIME) {
		osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL,
				 osa->ctime, 16);
	}
	if (la->la_valid & LA_MODE) {
		/* mode is stored along with type, so read it first */
		obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
			(la->la_mode & ~S_IFMT);
		osa->mode = obj->oo_attr.la_mode;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL,
				 &osa->mode, 8);
	}
	if (la->la_valid & LA_SIZE) {
		osa->size = obj->oo_attr.la_size = la->la_size;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL,
				 &osa->size, 8);
	}
	if (la->la_valid & LA_NLINK) {
		osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL,
				 &osa->nlink, 8);
	}
	if (la->la_valid & LA_RDEV) {
		osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(uos), NULL,
				 &osa->rdev, 8);
	}
	if (la->la_valid & LA_FLAGS) {
		osa->flags = obj->oo_attr.la_flags = la->la_flags;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL,
				 &osa->flags, 8);
	}
	if (la->la_valid & LA_UID) {
		osa->uid = obj->oo_attr.la_uid = la->la_uid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL,
				 &osa->uid, 8);
	}
	if (la->la_valid & LA_GID) {
		osa->gid = obj->oo_attr.la_gid = la->la_gid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL,
				 &osa->gid, 8);
	}
	obj->oo_attr.la_valid |= la->la_valid;
	write_unlock(&obj->oo_attr_lock);

	rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);

	OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 10);
	RETURN(rc);
}
コード例 #19
0
/* We start counting in the buffer with entry 2 and increment for every
   entry (do not increment for . or .. entry) */
static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
	struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
	int rc = 0;
	int pos_in_buf = 0;
	loff_t first_entry_in_buffer;
	loff_t index_to_find = file->f_pos;
	struct cifsFileInfo *cifsFile = file->private_data;
	/* check if index in the buffer */

	if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
	   (num_to_ret == NULL))
		return -ENOENT;

	*ppCurrentEntry = NULL;
	first_entry_in_buffer =
		cifsFile->srch_inf.index_of_last_entry -
			cifsFile->srch_inf.entries_in_buffer;

	/* if first entry in buf is zero then is first buffer
	in search response data which means it is likely . and ..
	will be in this buffer, although some servers do not return
	. and .. for the root of a drive and for those we need
	to start two entries earlier */

	dump_cifs_file_struct(file, "In fce ");
	if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
	     is_dir_changed(file)) ||
	   (index_to_find < first_entry_in_buffer)) {
		/* close and restart search */
		cFYI(1, ("search backing up - close and restart search"));
		write_lock(&GlobalSMBSeslock);
		if (!cifsFile->srch_inf.endOfSearch &&
		    !cifsFile->invalidHandle) {
			cifsFile->invalidHandle = true;
			write_unlock(&GlobalSMBSeslock);
			CIFSFindClose(xid, pTcon, cifsFile->netfid);
		} else
			write_unlock(&GlobalSMBSeslock);
		if (cifsFile->srch_inf.ntwrk_buf_start) {
			cFYI(1, ("freeing SMB ff cache buf on search rewind"));
			if (cifsFile->srch_inf.smallBuf)
				cifs_small_buf_release(cifsFile->srch_inf.
						ntwrk_buf_start);
			else
				cifs_buf_release(cifsFile->srch_inf.
						ntwrk_buf_start);
			cifsFile->srch_inf.ntwrk_buf_start = NULL;
		}
		rc = initiate_cifs_search(xid, file);
		if (rc) {
			cFYI(1, ("error %d reinitiating a search on rewind",
				 rc));
			return rc;
		}
		cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
	}

	while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
	      (rc == 0) && !cifsFile->srch_inf.endOfSearch) {
		cFYI(1, ("calling findnext2"));
		rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
				  &cifsFile->srch_inf);
		cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
		if (rc)
			return -ENOENT;
	}
	if (index_to_find < cifsFile->srch_inf.index_of_last_entry) {
		/* we found the buffer that contains the entry */
		/* scan and find it */
		int i;
		char *current_entry;
		char *end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
			smbCalcSize((struct smb_hdr *)
				cifsFile->srch_inf.ntwrk_buf_start);

		current_entry = cifsFile->srch_inf.srch_entries_start;
		first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
					- cifsFile->srch_inf.entries_in_buffer;
		pos_in_buf = index_to_find - first_entry_in_buffer;
		cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));

		for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
			/* go entry by entry figuring out which is first */
			current_entry = nxt_dir_entry(current_entry, end_of_smb,
						cifsFile->srch_inf.info_level);
		}
		if ((current_entry == NULL) && (i < pos_in_buf)) {
			/* BB fixme - check if we should flag this error */
			cERROR(1, ("reached end of buf searching for pos in buf"
			  " %d index to find %lld rc %d",
			  pos_in_buf, index_to_find, rc));
		}
		rc = 0;
		*ppCurrentEntry = current_entry;
	} else {
		cFYI(1, ("index not in buffer - could not findnext into it"));
		return 0;
	}

	if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
		cFYI(1, ("can not return entries pos_in_buf beyond last"));
		*num_to_ret = 0;
	} else
		*num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf;

	return rc;
}
コード例 #20
0
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
			   struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int offset, end;
	struct net *net = dev_net(skb_dst(skb)->dev);

	if (fq->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	offset = ntohs(fhdr->frag_off) & ~0x7;
	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
				 IPSTATS_MIB_INHDRERRORS);
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
				  ((u8 *)&fhdr->frag_off -
				   skb_network_header(skb)));
		return -1;
	}

	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		const unsigned char *nh = skb_network_header(skb);
		skb->csum = csum_sub(skb->csum,
				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
						  0));
	}

	
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		if (end < fq->q.len ||
		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
			goto err;
		fq->q.last_in |= INET_FRAG_LAST_IN;
		fq->q.len = end;
	} else {
		if (end & 0x7) {
			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
					 IPSTATS_MIB_INHDRERRORS);
			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
					  offsetof(struct ipv6hdr, payload_len));
			return -1;
		}
		if (end > fq->q.len) {
			
			if (fq->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			fq->q.len = end;
		}
	}

	if (end == offset)
		goto err;

	
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
		goto err;

	if (pskb_trim_rcsum(skb, end - offset))
		goto err;

	prev = fq->q.fragments_tail;
	if (!prev || FRAG6_CB(prev)->offset < offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for(next = fq->q.fragments; next != NULL; next = next->next) {
		if (FRAG6_CB(next)->offset >= offset)
			break;	
		prev = next;
	}

found:

	
	if (prev &&
	    (FRAG6_CB(prev)->offset + prev->len) > offset)
		goto discard_fq;

	
	if (next && FRAG6_CB(next)->offset < end)
		goto discard_fq;

	FRAG6_CB(skb)->offset = offset;

	
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		fq->iif = dev->ifindex;
		skb->dev = NULL;
	}
	fq->q.stamp = skb->tstamp;
	fq->q.meat += skb->len;
	atomic_add(skb->truesize, &fq->q.net->mem);

	if (offset == 0) {
		fq->nhoffset = nhoff;
		fq->q.last_in |= INET_FRAG_FIRST_IN;
	}

	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    fq->q.meat == fq->q.len) {
		int res;
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
		res = ip6_frag_reasm(fq, prev, dev);
		skb->_skb_refdst = orefdst;
		return res;
	}

	skb_dst_drop(skb);

	write_lock(&ip6_frags.lock);
	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
	write_unlock(&ip6_frags.lock);
	return -1;

discard_fq:
	fq_kill(fq);
err:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_REASMFAILS);
	kfree_skb(skb);
	return -1;
}
コード例 #21
0
ファイル: hp_sdc.c プロジェクト: AshishNamdev/linux
unsigned long hp_sdc_put(void)
{
	hp_sdc_transaction *curr;
	uint8_t act;
	int idx, curridx;

	int limit = 0;

	write_lock(&hp_sdc.lock);

	/* If i8042 buffers are full, we cannot do anything that
	   requires output, so we skip to the administrativa. */
	if (hp_sdc.ibf) {
		hp_sdc_status_in8();
		if (hp_sdc.ibf)
			goto finish;
	}

 anew:
	/* See if we are in the middle of a sequence. */
	if (hp_sdc.wcurr < 0)
		hp_sdc.wcurr = 0;
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr == hp_sdc.wcurr)
		hp_sdc.wcurr++;
	read_unlock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
		hp_sdc.wcurr = 0;
	curridx = hp_sdc.wcurr;

	if (hp_sdc.tq[curridx] != NULL)
		goto start;

	while (++curridx != hp_sdc.wcurr) {
		if (curridx >= HP_SDC_QUEUE_LEN) {
			curridx = -1; /* Wrap to top */
			continue;
		}
		read_lock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.rcurr == curridx) {
			read_unlock_irq(&hp_sdc.rtq_lock);
			continue;
		}
		read_unlock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.tq[curridx] != NULL)
			break; /* Found one. */
	}
	if (curridx == hp_sdc.wcurr) { /* There's nothing queued to do. */
		curridx = -1;
	}
	hp_sdc.wcurr = curridx;

 start:

	/* Check to see if the interrupt mask needs to be set. */
	if (hp_sdc.set_im) {
		hp_sdc_status_out8(hp_sdc.im | HP_SDC_CMD_SET_IM);
		hp_sdc.set_im = 0;
		goto finish;
	}

	if (hp_sdc.wcurr == -1)
		goto done;

	curr = hp_sdc.tq[curridx];
	idx = curr->actidx;

	if (curr->actidx >= curr->endidx) {
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
			hp_sdc.wcurr = 0;
		goto finish;
	}

	act = curr->seq[idx];
	idx++;

	if (curr->idx >= curr->endidx) {
		if (act & HP_SDC_ACT_DEALLOC)
			kfree(curr);
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
			hp_sdc.wcurr = 0;
		goto finish;
	}

	while (act & HP_SDC_ACT_PRECMD) {
		if (curr->idx != idx) {
			idx++;
			act &= ~HP_SDC_ACT_PRECMD;
			break;
		}
		hp_sdc_status_out8(curr->seq[idx]);
		curr->idx++;
		/* act finished? */
		if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_PRECMD)
			goto actdone;
		/* skip quantity field if data-out sequence follows. */
		if (act & HP_SDC_ACT_DATAOUT)
			curr->idx++;
		goto finish;
	}
	if (act & HP_SDC_ACT_DATAOUT) {
		int qty;

		qty = curr->seq[idx];
		idx++;
		if (curr->idx - idx < qty) {
			hp_sdc_data_out8(curr->seq[curr->idx]);
			curr->idx++;
			/* act finished? */
			if (curr->idx - idx >= qty &&
			    (act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAOUT)
				goto actdone;
			goto finish;
		}
		idx += qty;
		act &= ~HP_SDC_ACT_DATAOUT;
	} else
	    while (act & HP_SDC_ACT_DATAREG) {
		int mask;
		uint8_t w7[4];

		mask = curr->seq[idx];
		if (idx != curr->idx) {
			idx++;
			idx += !!(mask & 1);
			idx += !!(mask & 2);
			idx += !!(mask & 4);
			idx += !!(mask & 8);
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0];
		w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1];
		w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2];
		w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3];

		if (hp_sdc.wi > 0x73 || hp_sdc.wi < 0x70 ||
		    w7[hp_sdc.wi - 0x70] == hp_sdc.r7[hp_sdc.wi - 0x70]) {
			int i = 0;

			/* Need to point the write index register */
			while (i < 4 && w7[i] == hp_sdc.r7[i])
				i++;

			if (i < 4) {
				hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i);
				hp_sdc.wi = 0x70 + i;
				goto finish;
			}

			idx++;
			if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG)
				goto actdone;

			curr->idx = idx;
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]);
		hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70];
		hp_sdc.wi++; /* write index register autoincrements */
		{
			int i = 0;

			while ((i < 4) && w7[i] == hp_sdc.r7[i])
				i++;
			if (i >= 4) {
				curr->idx = idx + 1;
				if ((act & HP_SDC_ACT_DURING) ==
				    HP_SDC_ACT_DATAREG)
					goto actdone;
			}
		}
		goto finish;
	}
	/* We don't go any further in the command if there is a pending read,
	   because we don't want interleaved results. */
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr >= 0) {
		read_unlock_irq(&hp_sdc.rtq_lock);
		goto finish;
	}
	read_unlock_irq(&hp_sdc.rtq_lock);


	if (act & HP_SDC_ACT_POSTCMD) {
		uint8_t postcmd;

		/* curr->idx should == idx at this point. */
		postcmd = curr->seq[idx];
		curr->idx++;
		if (act & HP_SDC_ACT_DATAIN) {

			/* Start a new read */
			hp_sdc.rqty = curr->seq[curr->idx];
			do_gettimeofday(&hp_sdc.rtv);
			curr->idx++;
			/* Still need to lock here in case of spurious irq. */
			write_lock_irq(&hp_sdc.rtq_lock);
			hp_sdc.rcurr = curridx;
			write_unlock_irq(&hp_sdc.rtq_lock);
			hp_sdc_status_out8(postcmd);
			goto finish;
		}
		hp_sdc_status_out8(postcmd);
		goto actdone;
	}

 actdone:
	if (act & HP_SDC_ACT_SEMAPHORE)
		up(curr->act.semaphore);
	else if (act & HP_SDC_ACT_CALLBACK)
		curr->act.irqhook(0,NULL,0,0);

	if (curr->idx >= curr->endidx) { /* This transaction is over. */
		if (act & HP_SDC_ACT_DEALLOC)
			kfree(curr);
		hp_sdc.tq[curridx] = NULL;
	} else {
		curr->actidx = idx + 1;
		curr->idx = idx + 2;
	}
	/* Interleave outbound data between the transactions. */
	hp_sdc.wcurr++;
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
		hp_sdc.wcurr = 0;

 finish:
	/* If by some quirk IBF has cleared and our ISR has run to
	   see that that has happened, do it all again. */
	if (!hp_sdc.ibf && limit++ < 20)
		goto anew;

 done:
	if (hp_sdc.wcurr >= 0)
		tasklet_schedule(&hp_sdc.task);
	write_unlock(&hp_sdc.lock);

	return 0;
}
コード例 #22
0
ファイル: linux_systemroot.c プロジェクト: paperclip/talpa
/*
 * Object creation/destruction.
 */
LinuxSystemRoot* newLinuxSystemRoot(void)
{
    LinuxSystemRoot* object;
    unsigned m_seq = 0;


    object = talpa_alloc(sizeof(template_LinuxSystemRoot));
    if ( object )
    {
        struct task_struct* inittask;


        memcpy(object, &template_LinuxSystemRoot, sizeof(template_LinuxSystemRoot));
        object->i_ISystemRoot.object = object;

        talpa_tasklist_lock();
        inittask = talpa_find_task_by_pid(1);
        talpa_tasklist_unlock();

        if ( inittask )
        {
            struct fs_struct *init_fs;
            struct vfsmount *rootmnt;

            task_lock(inittask);
            init_fs = inittask->fs;
            if ( init_fs )
            {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
                spin_lock(&init_fs->lock);
  #else
                write_lock(&init_fs->lock);
  #endif
                init_fs->users++;
  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
                spin_unlock(&init_fs->lock);
  #else
                write_unlock(&init_fs->lock);
  #endif
#else
                atomic_inc(&init_fs->count);
#endif
            }
            task_unlock(inittask);

            if ( init_fs )
            {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
                spin_lock(&init_fs->lock);
#else
                write_lock(&init_fs->lock);
#endif


restart_mnt:
                talpa_vfsmount_lock(&m_seq); // locks dcache_lock on 2.4
                for (rootmnt = talpa_fs_mnt(init_fs); rootmnt != getParent(rootmnt); rootmnt = getParent(rootmnt));
                object->mMnt = mntget(rootmnt);
                object->mDentry = dget(rootmnt->mnt_root);
                if (talpa_vfsmount_unlock(&m_seq)) // unlocks dcache_lock on 2.4
                {
                    goto restart_mnt;
                }
  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
                init_fs->users--;
  #else
                atomic_dec(&init_fs->count);
  #endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
                spin_unlock(&init_fs->lock);
#else
                write_unlock(&init_fs->lock);
#endif
            }
        }

        if ( !object->mMnt || !object->mDentry )
        {
            if ( object->mMnt )
            {
                mntput(object->mMnt);
            }

            if ( object->mDentry )
            {
                dput(object->mDentry);
            }

            talpa_free(object);

            return NULL;
        }
    }

    return object;
}
コード例 #23
0
ファイル: qsd_lib.c プロジェクト: hocks/lustre-release
/*
 * Initialize on-disk structures in order to manage quota enforcement for
 * the target associated with the qsd instance \qsd and starts the reintegration
 * procedure for each quota type as soon as possible.
 * The last step of the reintegration will be completed once qsd_start() is
 * called, at which points the space reconciliation with the master will be
 * executed.
 * This function must be called when the server stack is fully configured,
 * typically when ->ldo_prepare is called across the stack.
 *
 * \param env - the environment passed by the caller
 * \param qsd - is qsd_instance to prepare
 *
 * \retval - 0 on success, appropriate error on failure
 */
int qsd_prepare(const struct lu_env *env, struct qsd_instance *qsd)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	int			 qtype, rc = 0;
	ENTRY;

	if (unlikely(qsd == NULL))
		RETURN(0);

	read_lock(&qsd->qsd_lock);
	if (qsd->qsd_prepared) {
		CERROR("%s: qsd instance already prepared\n", qsd->qsd_svname);
		rc = -EALREADY;
	}
	read_unlock(&qsd->qsd_lock);
	if (rc)
		RETURN(rc);

	/* Record whether this qsd instance is managing quota enforcement for a
	 * MDT (i.e. inode quota) or OST (block quota) */
	if (lu_device_is_md(qsd->qsd_dev->dd_lu_dev.ld_site->ls_top_dev)) {
		qsd->qsd_is_md = true;
		qsd->qsd_sync_threshold = LQUOTA_LEAST_QUNIT(LQUOTA_RES_MD);
	} else {
		qsd->qsd_sync_threshold = LQUOTA_LEAST_QUNIT(LQUOTA_RES_DT);
	}

	/* look-up on-disk directory for the quota slave */
	qsd->qsd_root = lquota_disk_dir_find_create(env, qsd->qsd_dev, NULL,
						    QSD_DIR);
	if (IS_ERR(qsd->qsd_root)) {
		rc = PTR_ERR(qsd->qsd_root);
		qsd->qsd_root = NULL;
		CERROR("%s: failed to create quota slave root dir (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	/* initialize per-quota type data */
	for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
		rc = qsd_qtype_init(env, qsd, qtype);
		if (rc)
			RETURN(rc);
	}

	/* pools successfully setup, mark the qsd as prepared */
	write_lock(&qsd->qsd_lock);
	qsd->qsd_prepared = true;
	write_unlock(&qsd->qsd_lock);

	/* start reintegration thread for each type, if required */
	for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
		struct qsd_qtype_info	*qqi = qsd->qsd_type_array[qtype];

		if (qsd_type_enabled(qsd, qtype) && qsd->qsd_acct_failed) {
			LCONSOLE_ERROR("%s: can't enable quota enforcement "
				       "since space accounting isn't functional"
				       ". Please run tunefs.lustre --quota on "
				       "an unmounted filesystem if not done "
				       "already\n", qsd->qsd_svname);
			break;
		}

		rc = qsd_start_reint_thread(qqi);
		if (rc) {
			CERROR("%s: failed to start reint thread for type %s "
			       "(%d)\n", qsd->qsd_svname, QTYPE_NAME(qtype),
			       rc);
			RETURN(rc);
		}
	}

	/* start writeback thread */
	rc = qsd_start_upd_thread(qsd);
	if (rc) {
		CERROR("%s: failed to start writeback thread (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	/* generate osp name */
	rc = tgt_name2lwp_name(qsd->qsd_svname, qti->qti_buf,
			       MTI_NAME_MAXLEN, 0);
	if (rc) {
		CERROR("%s: failed to generate ospname (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	/* the connection callback will start the reintegration
	 * procedure if quota is enabled */
	rc = lustre_register_lwp_item(qti->qti_buf, &qsd->qsd_exp,
				      qsd_conn_callback, (void *)qsd);
	if (rc) {
		CERROR("%s: fail to get connection to master (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	RETURN(0);
}
コード例 #24
0
ファイル: reassembly.c プロジェクト: Adjustxx/Savaged-Zen
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
			   struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int offset, end;
	struct net *net = dev_net(skb_dst(skb)->dev);

	if (fq->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	offset = ntohs(fhdr->frag_off) & ~0x7;
	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
				 IPSTATS_MIB_INHDRERRORS);
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
				  ((u8 *)&fhdr->frag_off -
				   skb_network_header(skb)));
		return -1;
	}

	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		const unsigned char *nh = skb_network_header(skb);
		skb->csum = csum_sub(skb->csum,
				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
						  0));
	}

	/* Is this the final fragment? */
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
			goto err;
		fq->q.last_in |= INET_FRAG_LAST_IN;
		fq->q.len = end;
	} else {
		/* Check if the fragment is rounded to 8 bytes.
		 * Required by the RFC.
		 */
		if (end & 0x7) {
			/* RFC2460 says always send parameter problem in
			 * this case. -DaveM
			 */
			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
					 IPSTATS_MIB_INHDRERRORS);
			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
					  offsetof(struct ipv6hdr, payload_len));
			return -1;
		}
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
			if (fq->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			fq->q.len = end;
		}
	}

	if (end == offset)
		goto err;

	/* Point into the IP datagram 'data' part. */
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
		goto err;

	if (pskb_trim_rcsum(skb, end - offset))
		goto err;

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = fq->q.fragments_tail;
	if (!prev || FRAG6_CB(prev)->offset < offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for(next = fq->q.fragments; next != NULL; next = next->next) {
		if (FRAG6_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

found:
	/* RFC5722, Section 4:
	 *                                  When reassembling an IPv6 datagram, if
	 *   one or more its constituent fragments is determined to be an
	 *   overlapping fragment, the entire datagram (and any constituent
	 *   fragments, including those not yet received) MUST be silently
	 *   discarded.
	 */

	/* Check for overlap with preceding fragment. */
	if (prev &&
	    (FRAG6_CB(prev)->offset + prev->len) > offset)
		goto discard_fq;

	/* Look for overlap with succeeding segment. */
	if (next && FRAG6_CB(next)->offset < end)
		goto discard_fq;

	FRAG6_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		fq->iif = dev->ifindex;
		skb->dev = NULL;
	}
	fq->q.stamp = skb->tstamp;
	fq->q.meat += skb->len;
	atomic_add(skb->truesize, &fq->q.net->mem);

	/* The first fragment.
	 * nhoffset is obtained from the first fragment, of course.
	 */
	if (offset == 0) {
		fq->nhoffset = nhoff;
		fq->q.last_in |= INET_FRAG_FIRST_IN;
	}

	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    fq->q.meat == fq->q.len)
		return ip6_frag_reasm(fq, prev, dev);

	write_lock(&ip6_frags.lock);
	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
	write_unlock(&ip6_frags.lock);
	return -1;

discard_fq:
	fq_kill(fq);
err:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_REASMFAILS);
	kfree_skb(skb);
	return -1;
}
コード例 #25
0
ファイル: ipv6_sockglue.c プロジェクト: OpenHMR/Open-HMR600
int ipv6_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int optlen)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	int val, valbool;
	int retv = -ENOPROTOOPT;

	if (level == SOL_IP && sk->sk_type != SOCK_RAW)
		return udp_prot.setsockopt(sk, level, optname, optval, optlen);

	if(level!=SOL_IPV6)
		goto out;

	if (optval == NULL)
		val=0;
	else if (get_user(val, (int __user *) optval))
		return -EFAULT;

	valbool = (val!=0);

	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->sk_protocol != IPPROTO_UDP &&
			    sk->sk_protocol != IPPROTO_TCP)
				break;

			if (sk->sk_state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (ipv6_only_sock(sk) ||
			    !(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			if (sk->sk_protocol == IPPROTO_TCP) {
				struct tcp_sock *tp = tcp_sk(sk);

				local_bh_disable();
				sock_prot_dec_use(sk->sk_prot);
				sock_prot_inc_use(&tcp_prot);
				local_bh_enable();
				sk->sk_prot = &tcp_prot;
				tp->af_specific = &ipv4_specific;
				sk->sk_socket->ops = &inet_stream_ops;
				sk->sk_family = PF_INET;
				tcp_sync_mss(sk, tp->pmtu_cookie);
			} else {
				local_bh_disable();
				sock_prot_dec_use(sk->sk_prot);
				sock_prot_inc_use(&udp_prot);
				local_bh_enable();
				sk->sk_prot = &udp_prot;
				sk->sk_socket->ops = &inet_dgram_ops;
				sk->sk_family = PF_INET;
			}
			opt = xchg(&np->opt, NULL);
			if (opt)
				sock_kfree_s(sk, opt, opt->tot_len);
			pktopt = xchg(&np->pktoptions, NULL);
			if (pktopt)
				kfree_skb(pktopt);

			sk->sk_destruct = inet_sock_destruct;
#ifdef INET_REFCNT_DEBUG
			atomic_dec(&inet6_sock_nr);
#endif
			module_put(THIS_MODULE);
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_V6ONLY:
		if (inet_sk(sk)->num)
			goto e_inval;
		np->ipv6only = valbool;
		retv = 0;
		break;

	case IPV6_PKTINFO:
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_HOPLIMIT:
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_RTHDR:
		if (val < 0 || val > 2)
			goto e_inval;
		np->rxopt.bits.srcrt = val;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_DSTOPTS:
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi fl;
		int junk;

		fl.fl6_flowlabel = 0;
		fl.oif = sk->sk_bound_dev_if;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (opt == NULL)
			break;

		memset(opt, 0, sizeof(*opt));
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void*)(opt+1);

		retv = datagram_send_ctl(&msg, &fl, opt, &junk);
		if (retv)
			goto done;
update:
		retv = 0;
		if (sk->sk_type == SOCK_STREAM) {
			if (opt) {
				struct tcp_sock *tp = tcp_sk(sk);
				if (!((1 << sk->sk_state) &
				      (TCPF_LISTEN | TCPF_CLOSE))
				    && inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
					tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
					tcp_sync_mss(sk, tp->pmtu_cookie);
				}
			}
			opt = xchg(&np->opt, opt);
			sk_dst_reset(sk);
		} else {
			write_lock(&sk->sk_dst_lock);
			opt = xchg(&np->opt, opt);
			write_unlock(&sk->sk_dst_lock);
			sk_dst_reset(sk);
		}

done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->sk_type == SOCK_STREAM)
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_MULTICAST_IF:
		if (sk->sk_type == SOCK_STREAM)
			goto e_inval;
		if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
			goto e_inval;

		if (__dev_get_by_index(val) == NULL) {
			retv = -ENODEV;
			break;
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_JOIN_ANYCAST:
	case IPV6_LEAVE_ANYCAST:
	{
		struct ipv6_mreq mreq;

		if (optlen != sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_JOIN_ANYCAST)
			retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		else
			retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		break;
	}
	case MCAST_JOIN_GROUP:
	case MCAST_LEAVE_GROUP:
	{
		struct group_req greq;
		struct sockaddr_in6 *psin6;

		retv = -EFAULT;
		if (copy_from_user(&greq, optval, sizeof(struct group_req)))
			break;
		if (greq.gr_group.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		psin6 = (struct sockaddr_in6 *)&greq.gr_group;
		if (optname == MCAST_JOIN_GROUP)
			retv = ipv6_sock_mc_join(sk, greq.gr_interface,
				&psin6->sin6_addr);
		else
			retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
				&psin6->sin6_addr);
		break;
	}
	case MCAST_JOIN_SOURCE_GROUP:
	case MCAST_LEAVE_SOURCE_GROUP:
	case MCAST_BLOCK_SOURCE:
	case MCAST_UNBLOCK_SOURCE:
	{
		struct group_source_req greqs;
		int omode, add;

		if (optlen != sizeof(struct group_source_req))
			goto e_inval;
		if (copy_from_user(&greqs, optval, sizeof(greqs))) {
			retv = -EFAULT;
			break;
		}
		if (greqs.gsr_group.ss_family != AF_INET6 ||
		    greqs.gsr_source.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		if (optname == MCAST_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == MCAST_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
			struct sockaddr_in6 *psin6;

			psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
			retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
				&psin6->sin6_addr);
			if (retv)
				break;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /*IP_DROP_SOURCE_MEMBERSHIP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		retv = ip6_mc_source(add, omode, sk, &greqs);
		break;
	}
	case MCAST_MSFILTER:
	{
		extern int sysctl_optmem_max;
		extern int sysctl_mld_max_msf;
		struct group_filter *gsf;

		if (optlen < GROUP_FILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			retv = -ENOBUFS;
			break;
		}
		gsf = (struct group_filter *)kmalloc(optlen,GFP_KERNEL);
		if (gsf == 0) {
			retv = -ENOBUFS;
			break;
		}
		retv = -EFAULT;
		if (copy_from_user(gsf, optval, optlen)) {
			kfree(gsf);
			break;
		}
		/* numsrc >= (4G-140)/128 overflow in 32 bits */
		if (gsf->gf_numsrc >= 0x1ffffffU ||
		    gsf->gf_numsrc > sysctl_mld_max_msf) {
			kfree(gsf);
			retv = -ENOBUFS;
			break;
		}
		if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
			kfree(gsf);
			retv = -EINVAL;
			break;
		}
		retv = ip6_mc_msfilter(sk, gsf);
		kfree(gsf);

		break;
	}
	case IPV6_ROUTER_ALERT:
		retv = ip6_ra_control(sk, val, NULL);
		break;
	case IPV6_MTU_DISCOVER:
		if (val<0 || val>2)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->sk_error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;
	case IPV6_IPSEC_POLICY:
	case IPV6_XFRM_POLICY:
		retv = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		retv = xfrm_user_policy(sk, optname, optval, optlen);
		break;

#ifdef CONFIG_NETFILTER
	default:
		retv = nf_setsockopt(sk, PF_INET6, optname, optval, 
					    optlen);
		break;
#endif

	}
	release_sock(sk);

out:
	return retv;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
コード例 #26
0
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int flags, offset;
	int ihl, end;
	int err = -ENOENT;
	u8 ecn;

	if (qp->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
	    unlikely(ip_frag_too_far(qp)) &&
	    unlikely(err = ip_frag_reinit(qp))) {
		ipq_kill(qp);
		goto err;
	}

	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
	offset = ntohs(ip_hdr(skb)->frag_off);
	flags = offset & ~IP_OFFSET;
	offset &= IP_OFFSET;
	offset <<= 3;		
	ihl = ip_hdrlen(skb);

	
	end = offset + skb->len - ihl;
	err = -EINVAL;

	
	if ((flags & IP_MF) == 0) {
		if (end < qp->q.len ||
		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
			goto err;
		qp->q.last_in |= INET_FRAG_LAST_IN;
		qp->q.len = end;
	} else {
		if (end&7) {
			end &= ~7;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
		if (end > qp->q.len) {
			
			if (qp->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			qp->q.len = end;
		}
	}
	if (end == offset)
		goto err;

	err = -ENOMEM;
	if (pskb_pull(skb, ihl) == NULL)
		goto err;

	err = pskb_trim_rcsum(skb, end - offset);
	if (err)
		goto err;

	prev = qp->q.fragments_tail;
	if (!prev || FRAG_CB(prev)->offset < offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = qp->q.fragments; next != NULL; next = next->next) {
		if (FRAG_CB(next)->offset >= offset)
			break;	
		prev = next;
	}

found:
	if (prev) {
		int i = (FRAG_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			err = -EINVAL;
			if (end <= offset)
				goto err;
			err = -ENOMEM;
			if (!pskb_pull(skb, i))
				goto err;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	err = -ENOMEM;

	while (next && FRAG_CB(next)->offset < end) {
		int i = end - FRAG_CB(next)->offset; 

		if (i < next->len) {
			if (!pskb_pull(next, i))
				goto err;
			FRAG_CB(next)->offset += i;
			qp->q.meat -= i;
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			next = next->next;

			if (prev)
				prev->next = next;
			else
				qp->q.fragments = next;

			qp->q.meat -= free_it->len;
			frag_kfree_skb(qp->q.net, free_it);
		}
	}

	FRAG_CB(skb)->offset = offset;

	
	skb->next = next;
	if (!next)
		qp->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		qp->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		qp->iif = dev->ifindex;
		skb->dev = NULL;
	}
	qp->q.stamp = skb->tstamp;
	qp->q.meat += skb->len;
	qp->ecn |= ecn;
	atomic_add(skb->truesize, &qp->q.net->mem);
	if (offset == 0)
		qp->q.last_in |= INET_FRAG_FIRST_IN;

	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    qp->q.meat == qp->q.len) {
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
		err = ip_frag_reasm(qp, prev, dev);
		skb->_skb_refdst = orefdst;
		return err;
	}

	skb_dst_drop(skb);

	write_lock(&ip4_frags.lock);
	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
	write_unlock(&ip4_frags.lock);
	return -EINPROGRESS;

err:
	kfree_skb(skb);
	return err;
}
コード例 #27
0
ファイル: ami304_compass.c プロジェクト: Feeyo/LG_Optimus2X
int AMI304_Init(int mode)
{
	u8 databuf[10];
	//	u8 regaddr;
	NvU8 regaddr;
	u8 ctrl1, ctrl2, ctrl3;

	NvCompassI2CGetRegs(compass_dev->hOdmComp, AMI304_REG_CTRL1, &ctrl1, 1);

#if DEBUG_AMI304
	printk("[%s:%d] Compass Sensor \n", __FUNCTION__, __LINE__);
#endif

#if DEBUG_AMI304
	printk("[%s:%d] AMI304_REG_CTRL1 = 0x%x \n", __FUNCTION__, __LINE__, ctrl1);
#endif

	regaddr = AMI304_REG_CTRL2;
	NvCompassI2CGetRegs(compass_dev->hOdmComp, AMI304_REG_CTRL2, &ctrl2, 1);
#if DEBUG_AMI304
	printk("[%s:%d] AMI304_REG_CTRL2 = 0x%x \n", __FUNCTION__, __LINE__, ctrl2);
#endif

	regaddr = AMI304_REG_CTRL3;
	NvCompassI2CGetRegs(compass_dev->hOdmComp, AMI304_REG_CTRL3, &ctrl3, 1);
#if DEBUG_AMI304
	printk("[%s:%d] AMI304_REG_CTRL3 = 0x%x \n", __FUNCTION__, __LINE__, ctrl3);
	printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif
	databuf[0] = AMI304_REG_CTRL1;
	if (mode==AMI304_FORCE_MODE) {
#if DEBUG_AMI304
		printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif
		databuf[1] = ctrl1 | AMI304_CTRL1_PC1 | AMI304_CTRL1_FS1_FORCE;
		write_lock(&ami304_data.lock);
		ami304_data.mode = AMI304_FORCE_MODE;
		write_unlock(&ami304_data.lock);
	} else	{
#if DEBUG_AMI304
		printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif
		databuf[1] = ctrl1 | AMI304_CTRL1_PC1 | AMI304_CTRL1_FS1_NORMAL | AMI304_CTRL1_ODR1;
		write_lock(&ami304_data.lock);
		ami304_data.mode = AMI304_NORMAL_MODE;
		write_unlock(&ami304_data.lock);
	}
#if DEBUG_AMI304
	printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif

	NvCompassI2CSetRegs(compass_dev->hOdmComp, AMI304_REG_CTRL1, &databuf[1], 1);
#if DEBUG_AMI304
	printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif

	databuf[0] = AMI304_REG_CTRL2;
	databuf[1] = ctrl2 | AMI304_CTRL2_DREN;
	NvCompassI2CSetRegs(compass_dev->hOdmComp, AMI304_REG_CTRL2, &databuf[1], 1);
#if DEBUG_AMI304
	printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif

	databuf[0] = AMI304_REG_CTRL3;
	databuf[1] = ctrl3 | AMI304_CTRL3_B0_LO_CLR;
	NvCompassI2CSetRegs(compass_dev->hOdmComp, AMI304_REG_CTRL3, &databuf[1], 1);
#if DEBUG_AMI304
	printk("## [%s:%d] ##\n", __FUNCTION__, __LINE__);
#endif

	return 0;
}
コード例 #28
0
static inline int l2cap_connect_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd, __u8 *data)
{
	struct l2cap_chan_list *list = &conn->chan_list;
	l2cap_conn_req *req = (l2cap_conn_req *) data;
	l2cap_conn_rsp rsp;
	struct sock *sk, *parent;
	int result = 0, status = 0;

	__u16 dcid = 0, scid = __le16_to_cpu(req->scid);
	__u16 psm  = req->psm;

	BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);

	/* Check if we have socket listening on psm */
	parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
	if (!parent) {
		result = L2CAP_CR_BAD_PSM;
		goto sendresp;
	}

	result = L2CAP_CR_NO_MEM;

	/* Check for backlog size */
	if (parent->ack_backlog > parent->max_ack_backlog) {
		BT_DBG("backlog full %d", parent->ack_backlog); 
		goto response;
	}

	sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
	if (!sk)
		goto response;

	write_lock(&list->lock);

	/* Check if we already have channel with that dcid */
	if (__l2cap_get_chan_by_dcid(list, scid)) {
		write_unlock(&list->lock);
		sk->zapped = 1;
		l2cap_sock_kill(sk);
		goto response;
	}

	hci_conn_hold(conn->hcon);

	l2cap_sock_init(sk, parent);
	bacpy(&bluez_pi(sk)->src, conn->src);
	bacpy(&bluez_pi(sk)->dst, conn->dst);
	l2cap_pi(sk)->psm  = psm;
	l2cap_pi(sk)->dcid = scid;

	__l2cap_chan_add(conn, sk, parent);
	dcid = l2cap_pi(sk)->scid;

	l2cap_sock_set_timer(sk, sk->sndtimeo);

	/* Service level security */
	result = L2CAP_CR_PEND;
	status = L2CAP_CS_AUTHEN_PEND;
	sk->state = BT_CONNECT2;
	l2cap_pi(sk)->ident = cmd->ident;
	
	if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
		if (!hci_conn_encrypt(conn->hcon))
			goto done;
	} else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
		if (!hci_conn_auth(conn->hcon))
			goto done;
	}

	sk->state = BT_CONFIG;
	result = status = 0;

done:
	write_unlock(&list->lock);

response:
	bh_unlock_sock(parent);

sendresp:
	rsp.scid   = __cpu_to_le16(scid);
	rsp.dcid   = __cpu_to_le16(dcid);
	rsp.result = __cpu_to_le16(result);
	rsp.status = __cpu_to_le16(status);
	l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, L2CAP_CONN_RSP_SIZE, &rsp);
	return 0;
}
コード例 #29
0
ファイル: ami304_compass.c プロジェクト: Feeyo/LG_Optimus2X
static int ami304daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
		unsigned long arg)
{
	int valuebuf[4];
	int calidata[7];
	int controlbuf[10];
	char strbuf[AMI304_BUFSIZE];
	void __user *data;
	int retval=0;
	int mode;
	int en_dis_Report;

	//check the authority is root or not
	if (!capable(CAP_SYS_ADMIN)) {
		retval = -EPERM;
		goto err_out;
	}

	switch (cmd) {

		case AMI304MID_IOCTL_GET_SENSORDATA:
#if DEBUG_DAEMON_IOCTL
			printk("Release ami304daemon_ioctl, :AMI304MID_IOCTL_GET_SENSORDATA\n");
#endif
			data = (void __user *) arg;
			if (data == NULL)
				break;
			AMI304_ReadSensorData(strbuf, AMI304_BUFSIZE);
			if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304MID_IOCTL_SET_POSTURE:
#if DEBUG_DAEMON_IOCTL
			printk("Release ami304daemon_ioctl, :AMI304MID_IOCTL_SET_POSTURE\n");
#endif
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(&valuebuf, data, sizeof(valuebuf))) {
				retval = -EFAULT;
				goto err_out;
			}
			write_lock(&ami304mid_data.datalock);
			ami304mid_data.yaw   = valuebuf[0];
			ami304mid_data.pitch = valuebuf[1];
			ami304mid_data.roll  = valuebuf[2];
			ami304mid_data.mag_status = valuebuf[3];
			write_unlock(&ami304mid_data.datalock);
			break;

		case AMI304MID_IOCTL_SET_CALIDATA:
#if DEBUG_DAEMON_IOCTL
			printk("Release ami304daemon_ioctl, :AMI304MID_IOCTL_SET_CALIDATA\n");
#endif
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(&calidata, data, sizeof(calidata))) {
				retval = -EFAULT;
				goto err_out;
			}
			write_lock(&ami304mid_data.datalock);
			ami304mid_data.nmx = calidata[0];
			ami304mid_data.nmy = calidata[1];
			ami304mid_data.nmz = calidata[2];
			ami304mid_data.nax = calidata[3];
			ami304mid_data.nay = calidata[4];
			ami304mid_data.naz = calidata[5];
			ami304mid_data.mag_status = calidata[6];
			write_unlock(&ami304mid_data.datalock);
			//			AMI304_Report_Value(en_dis_Report);
			break;

		case AMI304MID_IOCTL_GET_CONTROL:
#if DEBUG_DAEMON_IOCTL
			printk("Release ami304daemon_ioctl, :AMI304MID_IOCTL_GET_CONTROL\n");
#endif
			read_lock(&ami304mid_data.ctrllock);
			memcpy(controlbuf, &ami304mid_data.controldata[0], sizeof(controlbuf));
			read_unlock(&ami304mid_data.ctrllock);
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_to_user(data, controlbuf, sizeof(controlbuf))) {
				retval = -EFAULT;
				goto err_out;
			}
			break;

		case AMI304MID_IOCTL_SET_CONTROL:
#if DEBUG_DAEMON_IOCTL
			printk("Release ami304daemon_ioctl, :AMI304MID_IOCTL_SET_CONTROL\n");
#endif

			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(controlbuf, data, sizeof(controlbuf))) {
				retval = -EFAULT;
				goto err_out;
			}
			write_lock(&ami304mid_data.ctrllock);
			memcpy(&ami304mid_data.controldata[0], controlbuf, sizeof(controlbuf));
			write_unlock(&ami304mid_data.ctrllock);
			break;

		case AMI304MID_IOCTL_SET_MODE:
#if DEBUG_DAEMON_IOCTL
			printk("Release ami304daemon_ioctl, :AMI304MID_IOCTL_SET_MODE\n");
#endif

			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(&mode, data, sizeof(mode))) {
				retval = -EFAULT;
				goto err_out;
			}
			AMI304_SetMode(mode);
			break;

			//Add for input_device sync
		case AMI304MID_IOCTL_SET_REPORT:
			data = (void __user *) arg;
			if (data == NULL)
				break;
			if (copy_from_user(&en_dis_Report, data, sizeof(mode))) {
				retval = -EFAULT;
				goto err_out;
			}
			read_lock(&ami304mid_data.datalock);
			AMI304_Report_Value(en_dis_Report);
			read_unlock(&ami304mid_data.datalock);
			break;

		default:
			printk(KERN_ERR "%s not supported = 0x%04x", __FUNCTION__, cmd);
			retval = -ENOIOCTLCMD;
			break;
	}

err_out:
	return retval;
}
コード例 #30
0
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
    struct sk_buff *prev, *next;
    struct net_device *dev;
    int flags, offset;
    int ihl, end;
    int err = -ENOENT;

    if (qp->q.last_in & INET_FRAG_COMPLETE)
        goto err;

    if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
            unlikely(ip_frag_too_far(qp)) &&
            unlikely(err = ip_frag_reinit(qp))) {
        ipq_kill(qp);
        goto err;
    }

    offset = ntohs(ip_hdr(skb)->frag_off);
    flags = offset & ~IP_OFFSET;
    offset &= IP_OFFSET;
    offset <<= 3;		/* offset is in 8-byte chunks */
    ihl = ip_hdrlen(skb);

    /* Determine the position of this fragment. */
    end = offset + skb->len - ihl;
    err = -EINVAL;

    /* Is this the final fragment? */
    if ((flags & IP_MF) == 0) {
        /* If we already have some bits beyond end
         * or have different end, the segment is corrrupted.
         */
        if (end < qp->q.len ||
                ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
            goto err;
        qp->q.last_in |= INET_FRAG_LAST_IN;
        qp->q.len = end;
    } else {
        if (end&7) {
            end &= ~7;
            if (skb->ip_summed != CHECKSUM_UNNECESSARY)
                skb->ip_summed = CHECKSUM_NONE;
        }
        if (end > qp->q.len) {
            /* Some bits beyond end -> corruption. */
            if (qp->q.last_in & INET_FRAG_LAST_IN)
                goto err;
            qp->q.len = end;
        }
    }
    if (end == offset)
        goto err;

    err = -ENOMEM;
    if (pskb_pull(skb, ihl) == NULL)
        goto err;

    err = pskb_trim_rcsum(skb, end - offset);
    if (err)
        goto err;

    /* Find out which fragments are in front and at the back of us
     * in the chain of fragments so far.  We must know where to put
     * this fragment, right?
     */
    prev = NULL;
    for (next = qp->q.fragments; next != NULL; next = next->next) {
        if (FRAG_CB(next)->offset >= offset)
            break;	/* bingo! */
        prev = next;
    }

    /* We found where to put this one.  Check for overlap with
     * preceding fragment, and, if needed, align things so that
     * any overlaps are eliminated.
     */
    if (prev) {
        int i = (FRAG_CB(prev)->offset + prev->len) - offset;

        if (i > 0) {
            offset += i;
            err = -EINVAL;
            if (end <= offset)
                goto err;
            err = -ENOMEM;
            if (!pskb_pull(skb, i))
                goto err;
            if (skb->ip_summed != CHECKSUM_UNNECESSARY)
                skb->ip_summed = CHECKSUM_NONE;
        }
    }

    err = -ENOMEM;

    while (next && FRAG_CB(next)->offset < end) {
        int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */

        if (i < next->len) {
            /* Eat head of the next overlapped fragment
             * and leave the loop. The next ones cannot overlap.
             */
            if (!pskb_pull(next, i))
                goto err;
            FRAG_CB(next)->offset += i;
            qp->q.meat -= i;
            if (next->ip_summed != CHECKSUM_UNNECESSARY)
                next->ip_summed = CHECKSUM_NONE;
            break;
        } else {
            struct sk_buff *free_it = next;

            /* Old fragment is completely overridden with
             * new one drop it.
             */
            next = next->next;

            if (prev)
                prev->next = next;
            else
                qp->q.fragments = next;

            qp->q.meat -= free_it->len;
            frag_kfree_skb(qp->q.net, free_it, NULL);
        }
    }

    FRAG_CB(skb)->offset = offset;

    /* Insert this fragment in the chain of fragments. */
    skb->next = next;
    if (prev)
        prev->next = skb;
    else
        qp->q.fragments = skb;

    dev = skb->dev;
    if (dev) {
        qp->iif = dev->ifindex;
        skb->dev = NULL;
    }
    qp->q.stamp = skb->tstamp;
    qp->q.meat += skb->len;
    atomic_add(skb->truesize, &qp->q.net->mem);
    if (offset == 0)
        qp->q.last_in |= INET_FRAG_FIRST_IN;

    if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            qp->q.meat == qp->q.len)
        return ip_frag_reasm(qp, prev, dev);

    write_lock(&ip4_frags.lock);
    list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
    write_unlock(&ip4_frags.lock);
    return -EINPROGRESS;

err:
    kfree_skb(skb);
    return err;
}