コード例 #1
0
static struct mtd_part *add_one_partition(struct mtd_info *master,
		const struct mtd_partition *part, int partno,
		u_int32_t cur_offset)
{
	struct mtd_part *slave;

	/* allocate the partition structure */
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
	if (!slave) {
		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
			master->name);
		del_mtd_partitions(master);
		return NULL;
	}
	list_add(&slave->list, &mtd_partitions);

	/* set up the MTD object for this partition */
	slave->mtd.type = master->type;
	slave->mtd.flags = master->flags & ~part->mask_flags;
	slave->mtd.size = part->size;
	slave->mtd.writesize = master->writesize;
	slave->mtd.oobsize = master->oobsize;
	slave->mtd.oobavail = master->oobavail;
	slave->mtd.subpage_sft = master->subpage_sft;

	slave->mtd.name = part->name;
	slave->mtd.owner = master->owner;

	slave->mtd.read = part_read;
	slave->mtd.write = part_write;

	if (master->panic_write)
		slave->mtd.panic_write = part_panic_write;

	if (master->point && master->unpoint) {
		slave->mtd.point = part_point;
		slave->mtd.unpoint = part_unpoint;
	}

	if (master->read_oob)
		slave->mtd.read_oob = part_read_oob;
	if (master->write_oob)
		slave->mtd.write_oob = part_write_oob;
	if (master->read_user_prot_reg)
		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
	if (master->read_fact_prot_reg)
		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
	if (master->write_user_prot_reg)
		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
	if (master->lock_user_prot_reg)
		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
	if (master->get_user_prot_info)
		slave->mtd.get_user_prot_info = part_get_user_prot_info;
	if (master->get_fact_prot_info)
		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
	if (master->sync)
		slave->mtd.sync = part_sync;
	if (!partno && master->suspend && master->resume) {
			slave->mtd.suspend = part_suspend;
			slave->mtd.resume = part_resume;
	}
	if (master->writev)
		slave->mtd.writev = part_writev;
	if (master->lock)
		slave->mtd.lock = part_lock;
	if (master->unlock)
		slave->mtd.unlock = part_unlock;
	if (master->block_isbad)
		slave->mtd.block_isbad = part_block_isbad;
	if (master->block_markbad)
		slave->mtd.block_markbad = part_block_markbad;
	slave->mtd.erase = part_erase;
	slave->master = master;
	slave->offset = part->offset;
	slave->index = partno;

	if (slave->offset == MTDPART_OFS_APPEND)
		slave->offset = cur_offset;
	if (slave->offset == MTDPART_OFS_NXTBLK) {
		slave->offset = cur_offset;
		if ((cur_offset % master->erasesize) != 0) {
			/* Round up to next erasesize */
			slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
			printk(KERN_NOTICE "Moving partition %d: "
			       "0x%08x -> 0x%08x\n", partno,
			       cur_offset, slave->offset);
		}
	}
	if (slave->mtd.size == MTDPART_SIZ_FULL)
		slave->mtd.size = master->size - slave->offset;

	printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
		slave->offset + slave->mtd.size, slave->mtd.name);

	/* let's do some sanity checks */
	if (slave->offset >= master->size) {
		/* let's register it anyway to preserve ordering */
		slave->offset = 0;
		slave->mtd.size = 0;
		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
			part->name);
		goto out_register;
	}
	if (slave->offset + slave->mtd.size > master->size) {
		slave->mtd.size = master->size - slave->offset;
		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
			part->name, master->name, slave->mtd.size);
	}
	if (master->numeraseregions > 1) {
		/* Deal with variable erase size stuff */
		int i, max = master->numeraseregions;
		u32 end = slave->offset + slave->mtd.size;
		struct mtd_erase_region_info *regions = master->eraseregions;

		/* Find the first erase regions which is part of this
		 * partition. */
		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
			;
		/* The loop searched for the region _behind_ the first one */
		i--;

		/* Pick biggest erasesize */
		for (; i < max && regions[i].offset < end; i++) {
			if (slave->mtd.erasesize < regions[i].erasesize) {
				slave->mtd.erasesize = regions[i].erasesize;
			}
		}
		BUG_ON(slave->mtd.erasesize == 0);
	} else {
		/* Single erase size */
		slave->mtd.erasesize = master->erasesize;
	}

	if ((slave->mtd.flags & MTD_WRITEABLE) &&
	    (slave->offset % slave->mtd.erasesize)) {
		/* Doesn't start on a boundary of major erase size */
		/* FIXME: Let it be writable if it is on a boundary of
		 * _minor_ erase size though */
		slave->mtd.flags &= ~MTD_WRITEABLE;
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
			part->name);
	}
	if ((slave->mtd.flags & MTD_WRITEABLE) &&
	    (slave->mtd.size % slave->mtd.erasesize)) {
		slave->mtd.flags &= ~MTD_WRITEABLE;
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
			part->name);
	}

	slave->mtd.ecclayout = master->ecclayout;
#ifndef CONFIG_MV88DE3010_MTD_SPEEDUP
	if (master->block_isbad) {
		uint32_t offs = 0;

		while (offs < slave->mtd.size) {
			if (master->block_isbad(master,
						offs + slave->offset))
				slave->mtd.ecc_stats.badblocks++;
			offs += slave->mtd.erasesize;
		}
	}
#else
	/* for Berlin, rootfs partition is #1, check it only 
	 *
	 * Comment the following block check for unknown reason.
	 * The check did break the first read inside one partition.
	 * 	--zhengshi 2010/10/01
	 *
	if(master->block_isbad && (partno == 1)) {
		uint32_t offs = 0;

		while(offs < slave->mtd.size) {
			if (master->block_isbad(master, offs + slave->offset))
				slave->mtd.ecc_stats.badblocks++;
			offs += slave->mtd.erasesize;
		}
		slave->mtd.checked = 1;
}*/
#endif

out_register:
	if (part->mtdp) {
		/* store the object pointer (caller may or may not register it*/
		*part->mtdp = &slave->mtd;
		slave->registered = 0;
	} else {
		/* register our partition */
		add_mtd_device(&slave->mtd);
		slave->registered = 1;
	}
	return slave;
}
コード例 #2
0
static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
	struct super_block *sb = inode->i_sb;
	int len, err;
	char strbuf[HFSPLUS_MAX_STRLEN + 1];
	hfsplus_cat_entry entry;
	struct hfs_find_data fd;
	struct hfsplus_readdir_data *rd;
	u16 type;

	if (filp->f_pos >= inode->i_size)
		return 0;

	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
	hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
	err = hfs_brec_find(&fd);
	if (err)
		goto out;

	switch ((u32)filp->f_pos) {
	case 0:
		/* This is completely artificial... */
		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
			goto out;
		filp->f_pos++;
		/* fall through */
	case 1:
		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
			err = -EIO;
			goto out;
		}

		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
			fd.entrylength);
		if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
			printk(KERN_ERR "hfs: bad catalog folder thread\n");
			err = -EIO;
			goto out;
		}
		if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
			printk(KERN_ERR "hfs: truncated catalog thread\n");
			err = -EIO;
			goto out;
		}
		if (filldir(dirent, "..", 2, 1,
			    be32_to_cpu(entry.thread.parentID), DT_DIR))
			goto out;
		filp->f_pos++;
		/* fall through */
	default:
		if (filp->f_pos >= inode->i_size)
			goto out;
		err = hfs_brec_goto(&fd, filp->f_pos - 1);
		if (err)
			goto out;
	}

	for (;;) {
		if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
			printk(KERN_ERR "hfs: walked past end of dir\n");
			err = -EIO;
			goto out;
		}

		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
			err = -EIO;
			goto out;
		}

		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
			fd.entrylength);
		type = be16_to_cpu(entry.type);
		len = HFSPLUS_MAX_STRLEN;
		err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
		if (err)
			goto out;
		if (type == HFSPLUS_FOLDER) {
			if (fd.entrylength <
					sizeof(struct hfsplus_cat_folder)) {
				printk(KERN_ERR "hfs: small dir entry\n");
				err = -EIO;
				goto out;
			}
			if (HFSPLUS_SB(sb)->hidden_dir &&
			    HFSPLUS_SB(sb)->hidden_dir->i_ino ==
					be32_to_cpu(entry.folder.id))
				goto next;
			if (filldir(dirent, strbuf, len, filp->f_pos,
				    be32_to_cpu(entry.folder.id), DT_DIR))
				break;
		} else if (type == HFSPLUS_FILE) {
			if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
				printk(KERN_ERR "hfs: small file entry\n");
				err = -EIO;
				goto out;
			}
			if (filldir(dirent, strbuf, len, filp->f_pos,
				    be32_to_cpu(entry.file.id), DT_REG))
				break;
		} else {
			printk(KERN_ERR "hfs: bad catalog entry type\n");
			err = -EIO;
			goto out;
		}
next:
		filp->f_pos++;
		if (filp->f_pos >= inode->i_size)
			goto out;
		err = hfs_brec_goto(&fd, 1);
		if (err)
			goto out;
	}
	rd = filp->private_data;
	if (!rd) {
		rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
		if (!rd) {
			err = -ENOMEM;
			goto out;
		}
		filp->private_data = rd;
		rd->file = filp;
		list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
	}
	memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
out:
	hfs_find_exit(&fd);
	return err;
}
コード例 #3
0
asmlinkage long
sys_timer_create(const clockid_t which_clock,
		 struct sigevent __user *timer_event_spec,
		 timer_t __user * created_timer_id)
{
	int error = 0;
	struct k_itimer *new_timer = NULL;
	int new_timer_id;
	struct task_struct *process = NULL;
	unsigned long flags;
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

	if (invalid_clockid(which_clock))
		return -EINVAL;

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
 retry:
	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
		error = -EAGAIN;
		goto out;
	}
	spin_lock_irq(&idr_lock);
	error = idr_get_new(&posix_timers_id, (void *) new_timer,
			    &new_timer_id);
	spin_unlock_irq(&idr_lock);
	if (error == -EAGAIN)
		goto retry;
	else if (error) {
		/*
		 * Wierd looking, but we return EAGAIN if the IDR is
		 * full (proper POSIX return value for this)
		 */
		error = -EAGAIN;
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;
	error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
	if (error)
		goto out;

	/*
	 * return the timer_id now.  The next step is hard to
	 * back out if there is an error.
	 */
	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}
	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
		new_timer->it_sigev_notify = event.sigev_notify;
		new_timer->it_sigev_signo = event.sigev_signo;
		new_timer->it_sigev_value = event.sigev_value;

		read_lock(&tasklist_lock);
		if ((process = good_sigevent(&event))) {
			/*
			 * We may be setting up this process for another
			 * thread.  It may be exiting.  To catch this
			 * case the we check the PF_EXITING flag.  If
			 * the flag is not set, the siglock will catch
			 * him before it is too late (in exit_itimers).
			 *
			 * The exec case is a bit more invloved but easy
			 * to code.  If the process is in our thread
			 * group (and it must be or we would not allow
			 * it here) and is doing an exec, it will cause
			 * us to be killed.  In this case it will wait
			 * for us to die which means we can finish this
			 * linkage with our last gasp. I.e. no code :)
			 */
			spin_lock_irqsave(&process->sighand->siglock, flags);
			if (!(process->flags & PF_EXITING)) {
				new_timer->it_process = process;
				list_add(&new_timer->list,
					 &process->signal->posix_timers);
				spin_unlock_irqrestore(&process->sighand->siglock, flags);
				if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
					get_task_struct(process);
			} else {
				spin_unlock_irqrestore(&process->sighand->siglock, flags);
				process = NULL;
			}
		}
		read_unlock(&tasklist_lock);
		if (!process) {
			error = -EINVAL;
			goto out;
		}
	} else {
		new_timer->it_sigev_notify = SIGEV_SIGNAL;
		new_timer->it_sigev_signo = SIGALRM;
		new_timer->it_sigev_value.sival_int = new_timer->it_id;
		process = current->group_leader;
		spin_lock_irqsave(&process->sighand->siglock, flags);
		new_timer->it_process = process;
		list_add(&new_timer->list, &process->signal->posix_timers);
		spin_unlock_irqrestore(&process->sighand->siglock, flags);
	}

 	/*
	 * In the case of the timer belonging to another task, after
	 * the task is unlocked, the timer is owned by the other task
	 * and may cease to exist at any time.  Don't use or modify
	 * new_timer after the unlock call.
	 */

out:
	if (error)
		release_posix_timer(new_timer, it_id_set);

	return error;
}
コード例 #4
0
int wlan_logging_sock_activate_svc(int log_fe_to_console, int num_buf)
{
	int i = 0;
	unsigned long irq_flag;

	pr_info("%s: Initalizing FEConsoleLog = %d NumBuff = %d\n",
			__func__, log_fe_to_console, num_buf);

	gapp_pid = INVALID_PID;

	gplog_msg = (struct log_msg *) vmalloc(
			num_buf * sizeof(struct log_msg));
	if (!gplog_msg) {
		pr_err("%s: Could not allocate memory\n", __func__);
		return -ENOMEM;
	}

	vos_mem_zero(gplog_msg, (num_buf * sizeof(struct log_msg)));

	gwlan_logging.log_fe_to_console = !!log_fe_to_console;
	gwlan_logging.num_buf = num_buf;

	spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
	INIT_LIST_HEAD(&gwlan_logging.free_list);
	INIT_LIST_HEAD(&gwlan_logging.filled_list);

	for (i = 0; i < num_buf; i++) {
		list_add(&gplog_msg[i].node, &gwlan_logging.free_list);
		gplog_msg[i].index = i;
	}
	gwlan_logging.pcur_node = (struct log_msg *)
		(gwlan_logging.free_list.next);
	list_del_init(gwlan_logging.free_list.next);
	spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);

	init_waitqueue_head(&gwlan_logging.wait_queue);
	gwlan_logging.exit = false;
	clear_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag);
	clear_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag);
	clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag);
	init_completion(&gwlan_logging.shutdown_comp);
	gwlan_logging.thread = kthread_create(wlan_logging_thread, NULL,
					"wlan_logging_thread");
	if (IS_ERR(gwlan_logging.thread)) {
		pr_err("%s: Could not Create LogMsg Thread Controller",
		       __func__);
		spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
		gwlan_logging.pcur_node = NULL;
		spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);
		vfree(gplog_msg);
		gplog_msg = NULL;
		return -ENOMEM;
	}
	wake_up_process(gwlan_logging.thread);
	gwlan_logging.is_active = true;
	gwlan_logging.is_flush_complete = false;

	nl_srv_register(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);

	pr_info("%s: Activated wlan_logging svc\n", __func__);
	return 0;
}
コード例 #5
0
ファイル: ptrace.c プロジェクト: Adjustxx/Savaged-Zen
/*
 * ptrace a task: make the debugger its new parent and
 * move it to the ptrace list.
 *
 * Must be called with the tasklist lock write-held.
 */
void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
{
	BUG_ON(!list_empty(&child->ptrace_entry));
	list_add(&child->ptrace_entry, &new_parent->ptraced);
	child->parent = new_parent;
}
コード例 #6
0
ファイル: soc_common.c プロジェクト: 325116067/semc-qsd8x50
int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
				struct skt_dev_info *sinfo)
{
	struct soc_pcmcia_socket *skt;
	int ret, i;

	mutex_lock(&soc_pcmcia_sockets_lock);

	/*
	 * Initialise the per-socket structure.
	 */
	for (i = 0; i < sinfo->nskt; i++) {
		skt = &sinfo->skt[i];

		skt->socket.ops = &soc_common_pcmcia_operations;
		skt->socket.owner = ops->owner;
		skt->socket.dev.parent = dev;

		init_timer(&skt->poll_timer);
		skt->poll_timer.function = soc_common_pcmcia_poll_event;
		skt->poll_timer.data = (unsigned long)skt;
		skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;

		skt->dev	= dev;
		skt->ops	= ops;

		ret = request_resource(&iomem_resource, &skt->res_skt);
		if (ret)
			goto out_err_1;

		ret = request_resource(&skt->res_skt, &skt->res_io);
		if (ret)
			goto out_err_2;

		ret = request_resource(&skt->res_skt, &skt->res_mem);
		if (ret)
			goto out_err_3;

		ret = request_resource(&skt->res_skt, &skt->res_attr);
		if (ret)
			goto out_err_4;

		skt->virt_io = ioremap(skt->res_io.start, 0x10000);
		if (skt->virt_io == NULL) {
			ret = -ENOMEM;
			goto out_err_5;
		}

		if (list_empty(&soc_pcmcia_sockets))
			soc_pcmcia_cpufreq_register();

		list_add(&skt->node, &soc_pcmcia_sockets);

		/*
		 * We initialize default socket timing here, because
		 * we are not guaranteed to see a SetIOMap operation at
		 * runtime.
		 */
		ops->set_timing(skt);

		ret = ops->hw_init(skt);
		if (ret)
			goto out_err_6;

		skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
		skt->socket.resource_ops = &pccard_static_ops;
		skt->socket.irq_mask = 0;
		skt->socket.map_size = PAGE_SIZE;
		skt->socket.pci_irq = skt->irq;
		skt->socket.io_offset = (unsigned long)skt->virt_io;

		skt->status = soc_common_pcmcia_skt_state(skt);

		ret = pcmcia_register_socket(&skt->socket);
		if (ret)
			goto out_err_7;

		WARN_ON(skt->socket.sock != i);

		add_timer(&skt->poll_timer);

		ret = device_create_file(&skt->socket.dev, &dev_attr_status);
		if (ret)
			goto out_err_8;
	}

	dev_set_drvdata(dev, sinfo);
	ret = 0;
	goto out;

	do {
		skt = &sinfo->skt[i];

		device_remove_file(&skt->socket.dev, &dev_attr_status);
 out_err_8:
		del_timer_sync(&skt->poll_timer);
		pcmcia_unregister_socket(&skt->socket);

 out_err_7:
		flush_scheduled_work();

		ops->hw_shutdown(skt);
 out_err_6:
 		list_del(&skt->node);
		iounmap(skt->virt_io);
 out_err_5:
		release_resource(&skt->res_attr);
 out_err_4:
		release_resource(&skt->res_mem);
 out_err_3:
		release_resource(&skt->res_io);
 out_err_2:
		release_resource(&skt->res_skt);
 out_err_1:
		i--;
	} while (i > 0);

	kfree(sinfo);

 out:
	mutex_unlock(&soc_pcmcia_sockets_lock);
	return ret;
}
コード例 #7
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context, *skb2;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}
		skb = NULL;

		skb2 = skb_dequeue(&dev->rx_frames);
		while (skb2) {
			if (status < 0
					|| ETH_HLEN > skb2->len
					|| skb2->len > ETH_FRAME_LEN) {
				dev->net->stats.rx_errors++;
				dev->net->stats.rx_length_errors++;
				DBG(dev, "rx length %d\n", skb2->len);
				dev_kfree_skb_any(skb2);
				goto next_frame;
			}
			skb2->protocol = eth_type_trans(skb2, dev->net);
			dev->net->stats.rx_packets++;
			dev->net->stats.rx_bytes += skb2->len;

			/* no buffer copies needed, unless hardware can't
			 * use skb buffers.
			 */
			status = netif_rx(skb2);
next_frame:
			skb2 = skb_dequeue(&dev->rx_frames);
		}
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any(skb);
	if (!netif_running(dev->net)) {
clean:
		spin_lock(&dev->req_lock);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock(&dev->req_lock);
		req = NULL;
	}
	if (req)
		rx_submit(dev, req, GFP_ATOMIC);
}
コード例 #8
0
ファイル: htp_request.c プロジェクト: PhilSchroeder/suricata
/**
 * Parses request headers.
 *
 * @param connp
 * @returns HTP_OK on state change, HTTP_ERROR on error, or HTP_DATA when more data is needed.
 */
int htp_connp_REQ_HEADERS(htp_connp_t *connp) {
    for (;;) {
        IN_COPY_BYTE_OR_RETURN(connp);

        if (connp->in_header_line == NULL) {
            connp->in_header_line = calloc(1, sizeof (htp_header_line_t));
            if (connp->in_header_line == NULL) return HTP_ERROR;
            connp->in_header_line->first_nul_offset = -1;
        }

        // Keep track of NUL bytes
        if (connp->in_next_byte == 0) {
            // Store the offset of the first NUL
            if (connp->in_header_line->has_nulls == 0) {
                connp->in_header_line->first_nul_offset = connp->in_line_len;
            }

            // Remember how many NULs there were
            connp->in_header_line->flags |= HTP_FIELD_NUL_BYTE;
            connp->in_header_line->has_nulls++;
        }

        // Have we reached the end of the line?
        if (connp->in_next_byte == LF) {
            #ifdef HTP_DEBUG
            fprint_raw_data(stderr, __FUNCTION__, connp->in_line, connp->in_line_len);
            #endif

            // Should we terminate headers?
            if (htp_connp_is_line_terminator(connp, connp->in_line, connp->in_line_len)) {
                // Terminator line

                // Parse previous header, if any
                if (connp->in_header_line_index != -1) {
                    if (connp->cfg->process_request_header(connp) != HTP_OK) {
                        // Note: downstream responsible for error logging
                        return HTP_ERROR;
                    }

                    // Reset index
                    connp->in_header_line_index = -1;
                }

                // Cleanup
                free(connp->in_header_line);
                connp->in_line_len = 0;
                connp->in_header_line = NULL;                

                // We've seen all request headers
                if (connp->in_chunk_count != connp->in_chunk_request_index) {
                    connp->in_tx->flags |= HTP_MULTI_PACKET_HEAD;
                }

                // Move onto the next processing phase
                if (connp->in_tx->progress[0] == TX_PROGRESS_REQ_HEADERS) {
                    // Determine if this request has a body
                    //connp->in_state = htp_connp_REQ_BODY_DETERMINE;
                    connp->in_state = htp_connp_REQ_CONNECT_CHECK;
                } else {
                    // Run hook REQUEST_TRAILER
                    int rc = hook_run_all(connp->cfg->hook_request_trailer, connp);
                    if (rc != HOOK_OK) {
                        htp_log(connp, HTP_LOG_MARK, HTP_LOG_ERROR, 0,
                            "Request trailer callback returned error (%d)", rc);
                        return HTP_ERROR;
                    }

                    // We've completed parsing this request
                    connp->in_state = htp_connp_REQ_IDLE;
                    connp->in_tx->progress[0] = TX_PROGRESS_WAIT;
                }

                return HTP_OK;
            }

            // Prepare line for consumption
            size_t raw_in_line_len = connp->in_line_len;
            htp_chomp(connp->in_line, &connp->in_line_len);

            // Check for header folding
            if (htp_connp_is_line_folded(connp->in_line, connp->in_line_len) == 0) {
                // New header line

                // Parse previous header, if any
                if (connp->in_header_line_index != -1) {
                    if (connp->cfg->process_request_header(connp) != HTP_OK) {
                        // Note: downstream responsible for error logging
                        return HTP_ERROR;
                    }

                    // Reset index
                    connp->in_header_line_index = -1;
                }

                // Remember the index of the fist header line
                connp->in_header_line_index = connp->in_header_line_counter;
            } else {
                // Folding; check that there's a previous header line to add to
                if (connp->in_header_line_index == -1) {
                    if (!(connp->in_tx->flags & HTP_INVALID_FOLDING)) {
                        connp->in_tx->flags |= HTP_INVALID_FOLDING;
                        htp_log(connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0,
                            "Invalid request field folding");
                    }
                }
            }

            // Add the raw header line to the list
            if (raw_in_line_len > connp->in_line_len) {
                if (raw_in_line_len - connp->in_line_len == 2 &&
                        connp->in_line[connp->in_line_len] == 0x0d &&
                        connp->in_line[connp->in_line_len + 1] == 0x0a) {
                    connp->in_header_line->terminators = NULL;
                } else {
                    connp->in_header_line->terminators =
                        bstr_memdup((char *) connp->in_line + connp->in_line_len,
                                raw_in_line_len - connp->in_line_len);
                    if (connp->in_header_line->terminators == NULL) {
                        return HTP_ERROR;
                    }
                }
            } else {
                connp->in_header_line->terminators = NULL;
            }

            connp->in_header_line->line = bstr_memdup((char *) connp->in_line, connp->in_line_len);
            if (connp->in_header_line->line == NULL) {
                return HTP_ERROR;
            }
            list_add(connp->in_tx->request_header_lines, connp->in_header_line);
            connp->in_header_line = NULL;

            // Cleanup for the next line
            connp->in_line_len = 0;
            if (connp->in_header_line_index == -1) {
                connp->in_header_line_index = connp->in_header_line_counter;
            }

            connp->in_header_line_counter++;
        }
    }
    return HTP_ERROR;
}
コード例 #9
0
ファイル: bitmap.c プロジェクト: liexusong/Linux-2.4.16
/* 
** We pre-allocate 8 blocks.  Pre-allocation is used for files > 16 KB only.
** This lowers fragmentation on large files by grabbing a contiguous set of
** blocks at once.  It also limits the number of times the bitmap block is
** logged by making X number of allocation changes in a single transaction.
**
** We are using a border to divide the disk into two parts.  The first part
** is used for tree blocks, which have a very high turnover rate (they
** are constantly allocated then freed)
**
** The second part of the disk is for the unformatted nodes of larger files.
** Putting them away from the tree blocks lowers fragmentation, and makes
** it easier to group files together.  There are a number of different
** allocation schemes being tried right now, each is documented below.
**
** A great deal of the allocator's speed comes because reiserfs_get_block
** sends us the block number of the last unformatted node in the file.  Once
** a given block is allocated past the border, we don't collide with the
** blocks near the search_start again.
** 
*/
int reiserfs_new_unf_blocknrs2 (struct reiserfs_transaction_handle *th, 
				struct inode       * p_s_inode,
				unsigned long      * free_blocknrs,
				unsigned long        search_start)
{
  int ret=0, blks_gotten=0;
  unsigned long border = 0;
  unsigned long bstart = 0;
  unsigned long hash_in, hash_out;
  unsigned long saved_search_start=search_start;
  int allocated[PREALLOCATION_SIZE];
  int blks;

  if (!reiserfs_no_border(th->t_super)) {
    /* we default to having the border at the 10% mark of the disk.  This
    ** is an arbitrary decision and it needs tuning.  It also needs a limit
    ** to prevent it from taking too much space on huge drives.
    */
    bstart = (SB_BLOCK_COUNT(th->t_super) / 10); 
  }
  if (!reiserfs_no_unhashed_relocation(th->t_super)) {
    /* this is a very simple first attempt at preventing too much grouping
    ** around the border value.  Since k_dir_id is never larger than the
    ** highest allocated oid, it is far from perfect, and files will tend
    ** to be grouped towards the start of the border
    */
    border = le32_to_cpu(INODE_PKEY(p_s_inode)->k_dir_id) % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
  } else {
    /* why would we want to delcare a local variable to this if statement
    ** name border????? -chris
    ** unsigned long border = 0;
    */
    if (!reiserfs_hashed_relocation(th->t_super)) {
      hash_in = le32_to_cpu((INODE_PKEY(p_s_inode))->k_dir_id);
				/* I wonder if the CPU cost of the
                                   hash will obscure the layout
                                   effect? Of course, whether that
                                   effect is good or bad we don't
                                   know.... :-) */
      
      hash_out = keyed_hash(((char *) (&hash_in)), 4);
      border = hash_out % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
    }
  }
  border += bstart ;
  allocated[0] = 0 ; /* important.  Allows a check later on to see if at
                      * least one block was allocated.  This prevents false
		      * no disk space returns
		      */

  if ( (p_s_inode->i_size < 4 * 4096) || 
       !(S_ISREG(p_s_inode->i_mode)) )
    {
      if ( search_start < border 
	   || (
				/* allow us to test whether it is a
                                   good idea to prevent files from
                                   getting too far away from their
                                   packing locality by some unexpected
                                   means.  This might be poor code for
                                   directories whose files total
                                   larger than 1/10th of the disk, and
                                   it might be good code for
                                   suffering from old insertions when the disk
                                   was almost full. */
               /* changed from !reiserfs_test3(th->t_super), which doesn't
               ** seem like a good idea.  Think about adding blocks to
               ** a large file.  If you've allocated 10% of the disk
               ** in contiguous blocks, you start over at the border value
               ** for every new allocation.  This throws away all the
               ** information sent in about the last block that was allocated
               ** in the file.  Not a good general case at all.
               ** -chris
               */
	       reiserfs_test4(th->t_super) && 
	       (search_start > border + (SB_BLOCK_COUNT(th->t_super) / 10))
	       )
	   )
	search_start=border;
  
      ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				     1/*amount_needed*/, 
				     0/*use reserved blocks for root */,
				     1/*for_formatted*/,
				     0/*for prealloc */) ;  
      return ret;
    }

  /* take a block off the prealloc list and return it -Hans */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count > 0) {
    p_s_inode->u.reiserfs_i.i_prealloc_count--;
    *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block++;

    /* if no more preallocated blocks, remove inode from list */
    if (! p_s_inode->u.reiserfs_i.i_prealloc_count) {
      list_del(&p_s_inode->u.reiserfs_i.i_prealloc_list);
    }
    
    return ret;
  }

				/* else get a new preallocation for the file */
  reiserfs_discard_prealloc (th, p_s_inode);
  /* this uses the last preallocated block as the search_start.  discard
  ** prealloc does not zero out this number.
  */
  if (search_start <= p_s_inode->u.reiserfs_i.i_prealloc_block) {
    search_start = p_s_inode->u.reiserfs_i.i_prealloc_block;
  }
  
  /* doing the compare again forces search_start to be >= the border,
  ** even if the file already had prealloction done.  This seems extra,
  ** and should probably be removed
  */
  if ( search_start < border ) search_start=border; 

  /* If the disk free space is already below 10% we should 
  ** start looking for the free blocks from the beginning 
  ** of the partition, before the border line.
  */
  if ( SB_FREE_BLOCKS(th->t_super) <= (SB_BLOCK_COUNT(th->t_super) / 10) ) {
    search_start=saved_search_start;
  }

  *free_blocknrs = 0;
  blks = PREALLOCATION_SIZE-1;
  for (blks_gotten=0; blks_gotten<PREALLOCATION_SIZE; blks_gotten++) {
    ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				   1/*amount_needed*/, 
				   0/*for root reserved*/,
				   1/*for_formatted*/,
				   (blks_gotten > 0)/*must_be_contiguous*/) ;
    /* if we didn't find a block this time, adjust blks to reflect
    ** the actual number of blocks allocated
    */ 
    if (ret != CARRY_ON) {
      blks = blks_gotten > 0 ? (blks_gotten - 1) : 0 ;
      break ;
    }
    allocated[blks_gotten]= *free_blocknrs;
#ifdef CONFIG_REISERFS_CHECK
    if ( (blks_gotten>0) && (allocated[blks_gotten] - allocated[blks_gotten-1]) != 1 ) {
      /* this should be caught by new_blocknrs now, checking code */
      reiserfs_warning("yura-1, reiserfs_new_unf_blocknrs2: pre-allocated not contiguous set of blocks!\n") ;
      reiserfs_free_block(th, allocated[blks_gotten]);
      blks = blks_gotten-1; 
      break;
    }
#endif
    if (blks_gotten==0) {
      p_s_inode->u.reiserfs_i.i_prealloc_block = *free_blocknrs;
    }
    search_start = *free_blocknrs; 
    *free_blocknrs = 0;
  }
  p_s_inode->u.reiserfs_i.i_prealloc_count = blks;
  *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block;
  p_s_inode->u.reiserfs_i.i_prealloc_block++;

  /* if inode has preallocated blocks, link him to list */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count) {
    list_add(&p_s_inode->u.reiserfs_i.i_prealloc_list,
	     &SB_JOURNAL(th->t_super)->j_prealloc_list);
  } 
  /* we did actually manage to get 1 block */
  if (ret != CARRY_ON && allocated[0] > 0) {
    return CARRY_ON ;
  }
  /* NO_MORE_UNUSED_CONTIGUOUS_BLOCKS should only mean something to
  ** the preallocation code.  The rest of the filesystem asks for a block
  ** and should either get it, or know the disk is full.  The code
  ** above should never allow ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCK,
  ** as it doesn't send for_prealloc = 1 to do_reiserfs_new_blocknrs
  ** unless it has already successfully allocated at least one block.
  ** Just in case, we translate into a return value the rest of the
  ** filesystem can understand.
  **
  ** It is an error to change this without making the
  ** rest of the filesystem understand NO_MORE_UNUSED_CONTIGUOUS_BLOCKS
  ** If you consider it a bug to return NO_DISK_SPACE here, fix the rest
  ** of the fs first.
  */
  if (ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCKS) {
#ifdef CONFIG_REISERFS_CHECK
    reiserfs_warning("reiser-2015: this shouldn't happen, may cause false out of disk space error");
#endif
     return NO_DISK_SPACE; 
  }
  return ret;
}
コード例 #10
0
ファイル: coroipcs.c プロジェクト: emrehe/corosync
/*
 * returns 0 if should be called again, -1 if finished
 */
static inline int conn_info_destroy (struct conn_info *conn_info)
{
	unsigned int res;
	void *retval;

	list_del (&conn_info->list);
	list_init (&conn_info->list);
	list_add (&conn_info->list, &conn_info_exit_list_head);

	if (conn_info->state == CONN_STATE_THREAD_REQUEST_EXIT) {
		res = pthread_join (conn_info->thread, &retval);
		conn_info->state = CONN_STATE_THREAD_DESTROYED;
		return (0);
	}

	if (conn_info->state == CONN_STATE_THREAD_INACTIVE ||
		conn_info->state == CONN_STATE_DISCONNECT_INACTIVE) {
		list_del (&conn_info->list);
		close (conn_info->fd);
		api->free (conn_info);
		return (-1);
	}

	if (conn_info->state == CONN_STATE_THREAD_ACTIVE) {
		ipc_sem_post (conn_info->control_buffer, SEMAPHORE_REQUEST_OR_FLUSH_OR_EXIT);
		return (0);
	}

	/*
	 * Retry library exit function if busy
	 */
	if (conn_info->state == CONN_STATE_THREAD_DESTROYED) {
		api->serialize_lock ();
		res = api->exit_fn_get (conn_info->service) (conn_info);
		api->serialize_unlock ();
		api->stats_destroy_connection (conn_info->stats_handle);
		if (res == -1) {
			return (0);
		} else {
			conn_info->state = CONN_STATE_LIB_EXIT_CALLED;
		}
	}

	pthread_mutex_lock (&conn_info->mutex);
	if (conn_info->refcount > 0) {
		pthread_mutex_unlock (&conn_info->mutex);
		return (0);
	}
	list_del (&conn_info->list);
	pthread_mutex_unlock (&conn_info->mutex);

	/*
	 * Let library know, that connection is now closed
	 */
	conn_info->control_buffer->ipc_closed = 1;
	ipc_sem_post (conn_info->control_buffer, SEMAPHORE_RESPONSE);
	ipc_sem_post (conn_info->control_buffer, SEMAPHORE_DISPATCH);

#if _POSIX_THREAD_PROCESS_SHARED > 0
	sem_destroy (&conn_info->control_buffer->sem_request_or_flush_or_exit);
	sem_destroy (&conn_info->control_buffer->sem_request);
	sem_destroy (&conn_info->control_buffer->sem_response);
	sem_destroy (&conn_info->control_buffer->sem_dispatch);
#else
	semctl (conn_info->control_buffer->semid, 0, IPC_RMID);
#endif
	/*
	 * Destroy shared memory segment and semaphore
	 */
	res = munmap ((void *)conn_info->control_buffer, conn_info->control_size);
	res = munmap ((void *)conn_info->request_buffer, conn_info->request_size);
	res = munmap ((void *)conn_info->response_buffer, conn_info->response_size);

	/*
	 * Free allocated data needed to retry exiting library IPC connection
	 */
	if (conn_info->private_data) {
		api->free (conn_info->private_data);
	}
	close (conn_info->fd);
	res = circular_memory_unmap (conn_info->dispatch_buffer, conn_info->dispatch_size);
	zcb_all_free (conn_info);
	api->free (conn_info);
	return (-1);
}
コード例 #11
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;
	bool		queue = 0;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}

		if (!status)
			queue = 1;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

clean:
	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->rx_reqs);
	spin_unlock(&dev->req_lock);

	if (queue)
		queue_work(uether_wq, &dev->rx_work);
}
コード例 #12
0
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	int ret;
	unsigned long flags;
	struct ks_bridge *ksb = fp->private_data;
	struct data_pkt *pkt = NULL;
	size_t space, copied;

read_start:
	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
		return -ENODEV;

	spin_lock_irqsave(&ksb->lock, flags);
	if (list_empty(&ksb->to_ks_list)) {
		spin_unlock_irqrestore(&ksb->lock, flags);
		ret = wait_event_interruptible(ksb->ks_wait_q,
				!list_empty(&ksb->to_ks_list) ||
				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
		if (ret < 0)
			return ret;

		goto read_start;
	}

	space = count;
	copied = 0;
	while (!list_empty(&ksb->to_ks_list) && space &&
			test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		size_t len;

		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
		list_del_init(&pkt->list);
		len = min_t(size_t, space, pkt->len - pkt->n_read);
		spin_unlock_irqrestore(&ksb->lock, flags);

		ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
		if (ret) {
			dev_err(ksb->fs_dev.this_device,
					"copy_to_user failed err:%d\n", ret);
			ksb_free_data_pkt(pkt);
			return -EFAULT;
		}

		pkt->n_read += len;
		space -= len;
		copied += len;

		if (pkt->n_read == pkt->len) {
			/*
			 * re-init the packet and queue it
			 * for more data.
			 */
			pkt->n_read = 0;
			pkt->len = MAX_DATA_PKT_SIZE;
			submit_one_urb(ksb, GFP_KERNEL, pkt);
			pkt = NULL;
		}
		spin_lock_irqsave(&ksb->lock, flags);
	}

	/* put the partial packet back in the list */
	if (!space && pkt && pkt->n_read != pkt->len) {
		if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
			list_add(&pkt->list, &ksb->to_ks_list);
		else
			ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	dbg_log_event(ksb, "KS_READ", copied, 0);

	dev_dbg(ksb->fs_dev.this_device, "count:%d space:%d copied:%d", count,
			space, copied);

	return copied;
}
コード例 #13
0
ファイル: proc.c プロジェクト: AbnerZheng/mooc_os_lab
/* do_fork -     parent process for a new child process
 * @clone_flags: used to guide how to clone the child process
 * @stack:       the parent's user stack pointer. if stack==0, It means to fork a kernel thread.
 * @tf:          the trapframe info, which will be copied to child process's proc->tf
 */
int
do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) {
    int ret = -E_NO_FREE_PROC;
    struct proc_struct *proc;
    if (nr_process >= MAX_PROCESS) {
        goto fork_out;
    }
    ret = -E_NO_MEM;
    //LAB4:EXERCISE2 YOUR CODE
    /*
     * Some Useful MACROs, Functions and DEFINEs, you can use them in below implementation.
     * MACROs or Functions:
     *   alloc_proc:   create a proc struct and init fields (lab4:exercise1)
     *   setup_kstack: alloc pages with size KSTACKPAGE as process kernel stack
     *   copy_mm:      process "proc" duplicate OR share process "current"'s mm according clone_flags
     *                 if clone_flags & CLONE_VM, then "share" ; else "duplicate"
     *   copy_thread:  setup the trapframe on the  process's kernel stack top and
     *                 setup the kernel entry point and stack of process
     *   hash_proc:    add proc into proc hash_list
     *   get_pid:      alloc a unique pid for process
     *   wakup_proc:   set proc->state = PROC_RUNNABLE
     * VARIABLES:
     *   proc_list:    the process set's list
     *   nr_process:   the number of process set
     */

    //    1. call alloc_proc to allocate a proc_struct
    //    2. call setup_kstack to allocate a kernel stack for child process
    //    3. call copy_mm to dup OR share mm according clone_flag
    //    4. call copy_thread to setup tf & context in proc_struct
    //    5. insert proc_struct into hash_list && proc_list
    //    6. call wakup_proc to make the new child process RUNNABLE
    //    7. set ret vaule using child proc's pid
    if ((proc = alloc_proc()) == NULL) {
        goto fork_out;
    }

    proc->parent = current;

    if (setup_kstack(proc) != 0) {
        goto bad_fork_cleanup_proc;
    }
    if (copy_mm(clone_flags, proc) != 0) {
        goto bad_fork_cleanup_kstack;
    }
    copy_thread(proc, stack, tf);

    bool intr_flag;
    local_intr_save(intr_flag);
    {
        proc->pid = get_pid();
        hash_proc(proc);
        list_add(&proc_list, &(proc->list_link));
        nr_process ++;
    }
    local_intr_restore(intr_flag);

    wakeup_proc(proc);

    ret = proc->pid;
fork_out:
    return ret;

bad_fork_cleanup_kstack:
    put_kstack(proc);
bad_fork_cleanup_proc:
    kfree(proc);
    goto fork_out;
}
コード例 #14
0
ファイル: l2tp_eth.c プロジェクト: ParrotSec/linux-psec
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
	struct net_device *dev;
	char name[IFNAMSIZ];
	struct l2tp_tunnel *tunnel;
	struct l2tp_session *session;
	struct l2tp_eth *priv;
	struct l2tp_eth_sess *spriv;
	int rc;
	struct l2tp_eth_net *pn;

	tunnel = l2tp_tunnel_find(net, tunnel_id);
	if (!tunnel) {
		rc = -ENODEV;
		goto out;
	}

	session = l2tp_session_find(net, tunnel, session_id);
	if (session) {
		rc = -EEXIST;
		goto out;
	}

	if (cfg->ifname) {
		dev = dev_get_by_name(net, cfg->ifname);
		if (dev) {
			dev_put(dev);
			rc = -EEXIST;
			goto out;
		}
		strlcpy(name, cfg->ifname, IFNAMSIZ);
	} else
		strcpy(name, L2TP_ETH_DEV_NAME);

	session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
				      peer_session_id, cfg);
	if (!session) {
		rc = -ENOMEM;
		goto out;
	}

	dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
			   l2tp_eth_dev_setup);
	if (!dev) {
		rc = -ENOMEM;
		goto out_del_session;
	}

	dev_net_set(dev, net);
	if (session->mtu == 0)
		session->mtu = dev->mtu - session->hdr_len;
	dev->mtu = session->mtu;
	dev->needed_headroom += session->hdr_len;

	priv = netdev_priv(dev);
	priv->dev = dev;
	priv->session = session;
	INIT_LIST_HEAD(&priv->list);

	priv->tunnel_sock = tunnel->sock;
	session->recv_skb = l2tp_eth_dev_recv;
	session->session_close = l2tp_eth_delete;
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
	session->show = l2tp_eth_show;
#endif

	spriv = l2tp_session_priv(session);
	spriv->dev = dev;

	rc = register_netdev(dev);
	if (rc < 0)
		goto out_del_dev;

	__module_get(THIS_MODULE);
	/* Must be done after register_netdev() */
	strlcpy(session->ifname, dev->name, IFNAMSIZ);

	dev_hold(dev);
	pn = l2tp_eth_pernet(dev_net(dev));
	spin_lock(&pn->l2tp_eth_lock);
	list_add(&priv->list, &pn->l2tp_eth_dev_list);
	spin_unlock(&pn->l2tp_eth_lock);

	return 0;

out_del_dev:
	free_netdev(dev);
	spriv->dev = NULL;
out_del_session:
	l2tp_session_delete(session);
out:
	return rc;
}
コード例 #15
0
ファイル: file.c プロジェクト: Haryaalcar/vcmi-build
/*
====================================================================
Return a list with all accessible files and directories in path
with the extension ext (if != 0). Don't show hidden files.
Root is the name of the parent directory that can't be left. If this
is next directory up '..' is not added.
====================================================================
*/
Text* get_file_list( char *path, char *ext, char *root )
{
    Text *text = 0;
    int i, j;
    DIR *dir;
    DIR *test_dir;
    struct dirent *dirent = 0;
    List *list = 0;
    struct stat fstat;
    char file_name[512];
    FILE *file;
    int len;

    /* open this directory */
    if ( ( dir = opendir( path ) ) == 0 ) {
        fprintf( stderr, "get_file_list: can't open parent directory '%s'\n", path );
        return 0;
    }

    text = calloc( 1, sizeof( Text ) );

    /* use dynamic list to gather all valid entries */
    list = list_create( LIST_AUTO_DELETE, LIST_NO_CALLBACK );
    /* read each entry and check if its a valid entry, then add it to the dynamic list */
    while ( ( dirent = readdir( dir ) ) != 0 ) {
        /* hiden stuff is not displayed */
        if ( dirent->d_name[0] == '.' && dirent->d_name[1] != '.' ) continue;
        /* check if it's the root directory */
        if ( root )
            if ( dirent->d_name[0] == '.' )
                if ( strlen( path ) > strlen( root ) )
                    if ( !strncmp( path + strlen( path ) - strlen( root ), root, strlen( root ) ) )
                        continue;
        /* get stats */
        sprintf( file_name, "%s/%s", path, dirent->d_name );
        if ( stat( file_name, &fstat ) == -1 ) continue;
        /* check directory */
        if ( S_ISDIR( fstat.st_mode ) ) {
            if ( ( test_dir = opendir( file_name ) ) == 0  ) continue;
            closedir( test_dir );
            sprintf( file_name, "*%s", dirent->d_name );
            list_add( list, strdup( file_name ) );
        }
        else
        /* check regular file */
        if ( S_ISREG( fstat.st_mode ) ) {
            /* test it */
            if ( ( file = fopen( file_name, "r" ) ) == 0 ) continue;
            fclose( file );
            /* check if this file has the proper extension */
            if ( ext )
                if ( !strequal( dirent->d_name + ( strlen( dirent->d_name ) - strlen( ext ) ), ext ) )
                    continue;
            list_add( list, strdup( dirent->d_name ) );
        }
    }
    /* close dir */
    closedir( dir );

    /* convert to static list */
    text->count = list->count;
    text->lines = calloc( list->count, sizeof( char* ));
    for ( i = 0; i < text->count; i++ )
        text->lines[i] = strdup( (char*)list_get( list, i ) );
    list_delete( list );

    /* sort this list: directories at top and everything in alphabetical order */
    if ( text->count > 0 )
        for ( i = 0; i < text->count - 1; i++ )
            for ( j = i + 1; j < text->count; j++ ) {
                /* directory comes first */
                if ( text->lines[j][0] == '*' ) {
                    if ( text->lines[i][0] != '*' )
                        swap( &text->lines[i], &text->lines[j] );
                    else {
                        /* do not exceed buffer size of smaller buffer */
                        len = strlen( text->lines[i] );
                        if ( strlen( text->lines[j] ) < len ) len = strlen( text->lines[j] );
                        if ( strncmp( text->lines[j], text->lines[i], len ) < 0 )
                            swap( &text->lines[i], &text->lines[j] );
                    }
                }
                else {
                    /* do not exceed buffer size of smaller buffer */
                    len = strlen( text->lines[i] );
                    if ( strlen( text->lines[j] ) < len ) len = strlen( text->lines[j] );
                    if ( strncmp( text->lines[j], text->lines[i], len ) < 0 )
                        swap( &text->lines[i], &text->lines[j] );
                }
            }

    return text;
}
コード例 #16
0
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
		struct sigevent __user *, timer_event_spec,
		timer_t __user *, created_timer_id)
{
	struct k_clock *kc = clockid_to_kclock(which_clock);
	struct k_itimer *new_timer;
	int error, new_timer_id;
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

	if (!kc)
		return -EINVAL;
	if (!kc->timer_create)
		return -EOPNOTSUPP;

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
 retry:
	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
		error = -EAGAIN;
		goto out;
	}
	spin_lock_irq(&idr_lock);
	error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
	spin_unlock_irq(&idr_lock);
	if (error) {
		if (error == -EAGAIN)
			goto retry;
		/*
		 * Weird looking, but we return EAGAIN if the IDR is
		 * full (proper POSIX return value for this)
		 */
		error = -EAGAIN;
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;

	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
		rcu_read_lock();
		new_timer->it_pid = get_pid(good_sigevent(&event));
		rcu_read_unlock();
		if (!new_timer->it_pid) {
			error = -EINVAL;
			goto out;
		}
	} else {
		event.sigev_notify = SIGEV_SIGNAL;
		event.sigev_signo = SIGALRM;
		event.sigev_value.sival_int = new_timer->it_id;
		new_timer->it_pid = get_pid(task_tgid(current));
	}

	new_timer->it_sigev_notify     = event.sigev_notify;
	new_timer->sigq->info.si_signo = event.sigev_signo;
	new_timer->sigq->info.si_value = event.sigev_value;
	new_timer->sigq->info.si_tid   = new_timer->it_id;
	new_timer->sigq->info.si_code  = SI_TIMER;

	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}

	error = kc->timer_create(new_timer);
	if (error)
		goto out;

	spin_lock_irq(&current->sighand->siglock);
	new_timer->it_signal = current->signal;
	list_add(&new_timer->list, &current->signal->posix_timers);
	spin_unlock_irq(&current->sighand->siglock);

	return 0;
	/*
	 * In the case of the timer belonging to another task, after
	 * the task is unlocked, the timer is owned by the other task
	 * and may cease to exist at any time.  Don't use or modify
	 * new_timer after the unlock call.
	 */
out:
	release_posix_timer(new_timer, it_id_set);
	return error;
}
コード例 #17
0
ファイル: buddy_allocator.c プロジェクト: agustingianni/ArgOS
page_t *
buddy_page_alloc(uint32_t order, pgflags_t flags)
{
        /*
         * TODO: Aca tendria que adquirir un lock
         * para protejer el manejo de listas. En linux
         * usa un interrupt safe spinlock
         * Lo que seria bueno es lockear el acceso cuando es absolutamente
         * necesario, esto nos permitiria que se puedan satisfaces
         * multiples alocaciones al mismo tiempo.
         */

        if (order >= MAX_ORDER)
		return NULL;

	uint32_t current_order = order;

	buddy_list_t *free_list = &buddy.free_lists[order];

	struct list_head *head;
	struct list_head *curr;

	page_t *page;

	do
	{
		/* Lista de bloques de paginas libres del orden actual */
		head = &free_list->head;

		/* el primer bloque de paginas libre */
		curr = head->next;

		/* Si la lista no esta vacia, quiere decir que hay paginas para sacar */
		if (!list_empty(head))
		{
			/* Indice de la pagina dentro del mem_map */
			unsigned int index;

			/* Obtenemos la estructura page (head no es la variable, es el miembro) */
			page = list_entry(curr, page_t, head);

			/* Eliminamos el bloque actual de paginas de la lista */
			list_del(curr);

			/* Calculamos el indice en el mem_map de la primer pagina del bloque */
			index = page - mem_map;

			#define MARK_USED(index, order, free_list) \
				bit_complement((index) >> (1+(order)), (free_list)->page_bitmap)

			/* Si current order es MAX_ORDER no existe un buddy */
			if (current_order != MAX_ORDER-1)
				MARK_USED(index, current_order, free_list);

			/* Tamano del bloque actual */
			uint32_t size = 1 << current_order;

			/*
			 * Si current_order > order, quiere decir que no habia un bloque
			 * de justo tamano 2**order por lo que se busco un order mayor
			 * para dividirlo.
			 */
                        while (current_order > order)
			{
				/* Obtenemos la free list de un orden anterior */
                                free_list--;

				/* Decrementamos el current order */
                                current_order--;

				/* Calculamos el tamano del bloque de 2**current_order */
				size >>= 1;

				/* Agregamos al buddy1 a la lista de bloques libres */
				list_add(&(page)->head, &free_list->head);

				/*
				 * Aca marcamos que uno de los dos buddies
				 * esta siendo usado, lo que no quiere decir
				 * cual de los dos.
				 */
                                MARK_USED(index, current_order, free_list);

				/* Seguimos con la siguiente */
                                index += size;

				/*
				 * Siguiente pagina, size es en realidad  la
				 * cantidad de paginas que vamos a saltear, es
				 * decir el el tamano del buddy1
				 */
                                page += size;
                        }

			/* current_order = order -> tenemos una pagina */
			//set_page_count(page, 1);

			return page;
		}

		current_order++;
		free_list++;
	} while (current_order < MAX_ORDER);
コード例 #18
0
ファイル: sw_modinit.c プロジェクト: billzbh/ov-secure-kernel
/**
 * @brief 
 *      This function is called by each module for registration
 *
 * @param sw_dev
 *      File operations structure of the device
 */
void sw_device_register(struct sw_file_operations* sw_dev)
{
    list_add(&sw_dev_head.dev_list, &sw_dev->head);    
}
コード例 #19
0
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed, but
	 * means receivers can't recover lost synch on their own (because
	 * new packets don't only start after a short RX).
	 */
	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
	skb_reserve(skb, NET_IP_ALIGN);

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return retval;
}
コード例 #20
0
ファイル: proc.c プロジェクト: liangchao1992/ucore
// hash_proc - add proc into proc hash_list
static void hash_proc(struct proc_struct *proc)
{
	list_add(hash_list + pid_hashfn(proc->pid), &(proc->hash_link));
}
コード例 #21
0
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	req->zero = 1;
	if (!dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	/* throttle highspeed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget))
		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
			: 0;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
コード例 #22
0
ファイル: proc.c プロジェクト: liangchao1992/ucore
// copy_mm - process "proc" duplicate OR share process "current"'s mm according clone_flags
//         - if clone_flags & CLONE_VM, then "share" ; else "duplicate"
static int copy_mm(uint32_t clone_flags, struct proc_struct *proc)
{
	struct mm_struct *mm, *oldmm = current->mm;

	/* current is a kernel thread */
	if (oldmm == NULL) {
		return 0;
	}
	if (clone_flags & CLONE_VM) {
		mm = oldmm;
		goto good_mm;
	}

	int ret = -E_NO_MEM;
	if ((mm = mm_create()) == NULL) {
		goto bad_mm;
	}
	if (setup_pgdir(mm) != 0) {
		goto bad_pgdir_cleanup_mm;
	}

	lock_mm(oldmm);
	{
		ret = dup_mmap(mm, oldmm);
	}
	unlock_mm(oldmm);

#ifdef UCONFIG_BIONIC_LIBC
	lock_mm(mm);
	{
		ret = remapfile(mm, proc);
	}
	unlock_mm(mm);
#endif //UCONFIG_BIONIC_LIBC

	if (ret != 0) {
		goto bad_dup_cleanup_mmap;
	}

good_mm:
	if (mm != oldmm) {
		mm->brk_start = oldmm->brk_start;
		mm->brk = oldmm->brk;
		bool intr_flag;
		local_intr_save(intr_flag);
		{
			list_add(&(proc_mm_list), &(mm->proc_mm_link));
		}
		local_intr_restore(intr_flag);
	}
	mm_count_inc(mm);
	proc->mm = mm;
	set_pgdir(proc, mm->pgdir);
	return 0;
bad_dup_cleanup_mmap:
	exit_mmap(mm);
	put_pgdir(mm);
bad_pgdir_cleanup_mm:
	mm_destroy(mm);
bad_mm:
	return ret;
}
コード例 #23
0
ファイル: mod.c プロジェクト: chyyuu/ucore-driver
static noinline struct module *load_module(void __user * umod,
					   unsigned long len,
					   const char __user * uargs)
{
	struct elfhdr *hdr;
	struct secthdr *sechdrs;
	char *secstrings, *args, *modmagic, *strtab = NULL;
	//char *staging;

	unsigned int i;
	unsigned int symindex = 0;
	unsigned int strindex = 0;
	unsigned int modindex, versindex, infoindex, pcpuindex;
	struct module *mod;
	long err = 0;
	void *ptr = NULL;

	kprintf("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs);

	if (len < sizeof(*hdr))
		return NULL;
	if (len > 64 * 1024 * 1024 || (hdr = kmalloc(len)) == NULL)
		return NULL;

	kprintf("load_module: copy_from_user\n");

	struct mm_struct *mm = current->mm;
	lock_mm(mm);
	if (!copy_from_user(mm, hdr, umod, len, 1)) {
		unlock_mm(mm);
		goto free_hdr;
	}
	unlock_mm(mm);

	kprintf("load_module: hdr:%p\n", hdr);
	// sanity check
	if (memcmp(&(hdr->e_magic), ELFMAG, SELFMAG) != 0
	    || hdr->e_type != ET_REL || !elf_check_arch(hdr)
	    || hdr->e_shentsize != sizeof(*sechdrs)) {
		kprintf("load_module: sanity check failed.\n");
		goto free_hdr;
	}

	if (len < hdr->e_shoff + hdr->e_shnum * sizeof(*sechdrs))
		goto truncated;

	sechdrs = (void *)hdr + hdr->e_shoff;
	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
	sechdrs[0].sh_addr = 0;

	for (i = 1; i < hdr->e_shnum; i++) {
		if (sechdrs[i].sh_type != SHT_NOBITS
		    && len < sechdrs[i].sh_offset + sechdrs[i].sh_size)
			goto truncated;

		// mark sh_addr
		sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;

		if (sechdrs[i].sh_type == SHT_SYMTAB) {
			symindex = i;
			strindex = sechdrs[i].sh_link;
			strtab = (char *)hdr + sechdrs[strindex].sh_offset;
		}

	}

	modindex =
	    find_sec(hdr, sechdrs, secstrings, ".gnu.linkonce.this_module");

	if (!modindex) {
		kprintf("load_module: No module found in object.\n");
		goto free_hdr;
	}
	// temp: point mod into copy of data
	mod = (void *)sechdrs[modindex].sh_addr;

	if (symindex == 0) {
		kprintf("load_module: %s module has no symbols (stripped?).\n",
			mod->name);
		goto free_hdr;
	}
	versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
	infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
	pcpuindex = 0;//find_pcpusec(hdr, sechdrs, secstrings);

	// don't keep modinfo and version
	sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
	sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC;

	// keep symbol and string tables
	sechdrs[symindex].sh_flags |= SHF_ALLOC;
	sechdrs[strindex].sh_flags |= SHF_ALLOC;

	/*if (!check_modstruct_version(sechdrs, versindex, mod)) {
		goto free_hdr;
	}*/

	/*
	   modmagic = get_modinfo(sechdrs, infoindex, "vermagic");

	   if (!modmagic) {
	   kprintf("load_module: bad vermagic\n");
	   goto free_hdr;
	   } else if (!same_magic(modmagic, vermagic, versindex)) {
	   ; 
	   // TODO: module magic is left for future use.
	   }
	 */

	//staging = get_modinfo(sechdrs, infoindex, "staging");
	// TODO: staging is left for future use.

	if (find_module(mod->name)) {
		kprintf("load_module: module %s exists\n", mod->name);
		goto free_mod;
	}

	mod->state = MODULE_STATE_COMING;

	// err = module_frob_arch_sections(hdr, sechdrs, secstrings, mod);
	// TODO: we do not need it for x86 or arm

	// TODO: percpu is no longer needed.

	layout_sections(mod, hdr, sechdrs, secstrings);

	ptr = module_alloc_update_bounds(mod->core_size);

	if (!ptr) {
		goto free_percpu;
	}
	memset(ptr, 0, mod->core_size);
	mod->module_core = ptr;

	ptr = module_alloc_update_bounds(mod->init_size);

	if (!ptr && mod->init_size) {
		goto free_core;
	}
	memset(ptr, 0, mod->init_size);
	mod->module_init = ptr;

	kprintf("load_module: final section addresses:\n");
	for (i = 0; i < hdr->e_shnum; i++) {
		void *dest;
		if (!(sechdrs[i].sh_flags & SHF_ALLOC)) {
			kprintf("\tSkipped %s\n",
				secstrings + sechdrs[i].sh_name);
			continue;
		}
		if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
			dest =
			    mod->module_init +
			    (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
		else
			dest = mod->module_core + sechdrs[i].sh_entsize;
		if (sechdrs[i].sh_type != SHT_NOBITS)
			memcpy(dest, (void *)sechdrs[i].sh_addr,
			       sechdrs[i].sh_size);
		sechdrs[i].sh_addr = (unsigned long)dest;
		kprintf("\t0x%lx %s\n", sechdrs[i].sh_addr,
			secstrings + sechdrs[i].sh_name);
	}
	/* Module has been moved. */
	mod = (void *)sechdrs[modindex].sh_addr;

	/* Now we've moved module, initialize linked lists, etc. */
	module_unload_init(mod);

	/* Set up license info based on the info section */
	set_license(mod, get_modinfo(sechdrs, infoindex, "license"));

	err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
			       mod);

	if (err < 0)
		goto cleanup;

	mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab",
				 sizeof(*mod->syms), &mod->num_syms);
	mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab");

	// relocations
	for (i = 1; i < hdr->e_shnum; i++) {
		const char *strtab = (char *)sechdrs[strindex].sh_addr;
		unsigned int info = sechdrs[i].sh_info;

		/* Not a valid relocation section */
		if (info >= hdr->e_shnum)
			continue;

		/* Don't bother with non-allocated sections */
		if (!(sechdrs[info].sh_flags & SHF_ALLOC))
			continue;

		if (sechdrs[i].sh_type == SHT_REL)
			err = apply_relocate(sechdrs, strtab, symindex, i, mod);
		else if (sechdrs[i].sh_type == SHT_RELA)
			err =
			    apply_relocate_add(sechdrs, strtab, symindex, i,
					       mod);

		if (err < 0)
			goto cleanup;
	}

	err = verify_export_symbols(mod);
	if (err < 0)
		goto cleanup;

	// TODO: kallsyms is left for future use.
	//add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);

	err = module_finalize(hdr, sechdrs, mod);
	if (err < 0)
		goto cleanup;

	list_add(&modules, &mod->list);

	kfree(hdr);
	return mod;

cleanup:
	module_unload_free(mod);

free_init:
	module_free(mod, mod->module_init);

free_core:
	module_free(mod, mod->module_core);

free_percpu:

free_mod:

free_hdr:
	kfree(hdr);
	return NULL;

truncated:
	kprintf("load_module: module len %lu truncated.\n");
	goto free_hdr;
}
コード例 #24
0
ファイル: proc.c プロジェクト: liangchao1992/ucore
static int load_icode(int fd, int argc, char **kargv, int envc, char **kenvp)
{
	assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
	assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
	if (current->mm != NULL) {
		panic("load_icode: current->mm must be empty.\n");
	}

	int ret = -E_NO_MEM;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t real_entry;
//#endif //UCONFIG_BIONIC_LIBC

	struct mm_struct *mm;
	if ((mm = mm_create()) == NULL) {
		goto bad_mm;
	}

	if (setup_pgdir(mm) != 0) {
		goto bad_pgdir_cleanup_mm;
	}

	mm->brk_start = 0;

	struct Page *page;

	struct elfhdr __elf, *elf = &__elf;
	if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
		goto bad_elf_cleanup_pgdir;
	}

	if (elf->e_magic != ELF_MAGIC) {
		ret = -E_INVAL_ELF;
		goto bad_elf_cleanup_pgdir;
	}
//#ifdef UCONFIG_BIONIC_LIBC
	real_entry = elf->e_entry;

	uint32_t load_address, load_address_flag = 0;
//#endif //UCONFIG_BIONIC_LIBC

	struct proghdr __ph, *ph = &__ph;
	uint32_t vm_flags, phnum;
	pte_perm_t perm = 0;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t is_dynamic = 0, interp_idx;
	uint32_t bias = 0;
//#endif //UCONFIG_BIONIC_LIBC
	for (phnum = 0; phnum < elf->e_phnum; phnum++) {
		off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		if (ph->p_type == ELF_PT_INTERP) {
			is_dynamic = 1;
			interp_idx = phnum;
			continue;
		}

		if (ph->p_type != ELF_PT_LOAD) {
			continue;
		}
		if (ph->p_filesz > ph->p_memsz) {
			ret = -E_INVAL_ELF;
			goto bad_cleanup_mmap;
		}

		if (ph->p_va == 0 && !bias) {
			bias = 0x00008000;
		}

		if ((ret = map_ph(fd, ph, mm, &bias, 0)) != 0) {
			kprintf("load address: 0x%08x size: %d\n", ph->p_va,
				ph->p_memsz);
			goto bad_cleanup_mmap;
		}

		if (load_address_flag == 0)
			load_address = ph->p_va + bias;
		++load_address_flag;

	  /*********************************************/
		/*
		   vm_flags = 0;
		   ptep_set_u_read(&perm);
		   if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
		   if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
		   if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
		   if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

		   if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
		   goto bad_cleanup_mmap;
		   }

		   if (mm->brk_start < ph->p_va + ph->p_memsz) {
		   mm->brk_start = ph->p_va + ph->p_memsz;
		   }

		   off_t offset = ph->p_offset;
		   size_t off, size;
		   uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

		   end = ph->p_va + ph->p_filesz;
		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
		   goto bad_cleanup_mmap;
		   }
		   start += size, offset += size;
		   }

		   end = ph->p_va + ph->p_memsz;

		   if (start < la) {
		   // ph->p_memsz == ph->p_filesz 
		   if (start == end) {
		   continue ;
		   }
		   off = start + PGSIZE - la, size = PGSIZE - off;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   assert((end < la && start == end) || (end >= la && start == la));
		   }

		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   }
		 */
	  /**************************************/
	}

	mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

	/* setup user stack */
	vm_flags = VM_READ | VM_WRITE | VM_STACK;
	if ((ret =
	     mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags,
		    NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (is_dynamic) {
		elf->e_entry += bias;

		bias = 0;

		off_t phoff =
		    elf->e_phoff + sizeof(struct proghdr) * interp_idx;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		char *interp_path = (char *)kmalloc(ph->p_filesz);
		load_icode_read(fd, interp_path, ph->p_filesz, ph->p_offset);

		int interp_fd = sysfile_open(interp_path, O_RDONLY);
		assert(interp_fd >= 0);
		struct elfhdr interp___elf, *interp_elf = &interp___elf;
		assert((ret =
			load_icode_read(interp_fd, interp_elf,
					sizeof(struct elfhdr), 0)) == 0);
		assert(interp_elf->e_magic == ELF_MAGIC);

		struct proghdr interp___ph, *interp_ph = &interp___ph;
		uint32_t interp_phnum;
		uint32_t va_min = 0xffffffff, va_max = 0;
		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			if (va_min > interp_ph->p_va)
				va_min = interp_ph->p_va;
			if (va_max < interp_ph->p_va + interp_ph->p_memsz)
				va_max = interp_ph->p_va + interp_ph->p_memsz;
		}

		bias = get_unmapped_area(mm, va_max - va_min + 1 + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);

		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			assert((ret =
				map_ph(interp_fd, interp_ph, mm, &bias,
				       1)) == 0);
		}

		real_entry = interp_elf->e_entry + bias;

		sysfile_close(interp_fd);
		kfree(interp_path);
	}

	sysfile_close(fd);

	bool intr_flag;
	local_intr_save(intr_flag);
	{
		list_add(&(proc_mm_list), &(mm->proc_mm_link));
	}
	local_intr_restore(intr_flag);
	mm_count_inc(mm);
	current->mm = mm;
	set_pgdir(current, mm->pgdir);
	mp_set_mm_pagetable(mm);

	if (!is_dynamic) {
		real_entry += bias;
	}
#ifdef UCONFIG_BIONIC_LIBC
	if (init_new_context_dynamic(current, elf, argc, kargv, envc, kenvp,
				     is_dynamic, real_entry, load_address,
				     bias) < 0)
		goto bad_cleanup_mmap;
#else
	if (init_new_context(current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;
#endif //UCONFIG_BIONIC_LIBC
	ret = 0;
out:
	return ret;
bad_cleanup_mmap:
	exit_mmap(mm);
bad_elf_cleanup_pgdir:
	put_pgdir(mm);
bad_pgdir_cleanup_mm:
	mm_destroy(mm);
bad_mm:
	goto out;
}
コード例 #25
0
ファイル: slob.c プロジェクト: fusion2004/cop4610
static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
{
	list_add(&sp->list, list);
	__SetPageSlobFree((struct page *)sp);
}
コード例 #26
0
ファイル: nfssvc.c プロジェクト: Dronevery/JetsonTK1-kernel
/*
 * This is the NFS server kernel thread
 */
static void
nfsd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	struct fs_struct *fsp;
	int		err;
	struct nfsd_list me;
	sigset_t shutdown_mask, allowed_mask;

	/* Lock module and set up kernel thread */
	lock_kernel();
	daemonize("nfsd");

	/* After daemonize() this kernel thread shares current->fs
	 * with the init process. We need to create files with a
	 * umask of 0 instead of init's umask. */
	fsp = copy_fs_struct(current->fs);
	if (!fsp) {
		printk("Unable to start nfsd thread: out of memory\n");
		goto out;
	}
	exit_fs(current);
	current->fs = fsp;
	current->fs->umask = 0;

	siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
	siginitsetinv(&allowed_mask, ALLOWED_SIGS);

	nfsdstats.th_cnt++;

	lockd_up();				/* start lockd */

	me.task = current;
	list_add(&me.list, &nfsd_list);

	unlock_kernel();

	/*
	 * We want less throttling in balance_dirty_pages() so that nfs to
	 * localhost doesn't cause nfsd to lock up due to all the client's
	 * dirty pages.
	 */
	current->flags |= PF_LESS_THROTTLE;

	/*
	 * The main request loop
	 */
	for (;;) {
		/* Block all but the shutdown signals */
		sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		while ((err = svc_recv(serv, rqstp,
				       60*60*HZ)) == -EAGAIN)
			;
		if (err < 0)
			break;
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_inc(&nfsd_busy);

		/* Lock the export hash tables for reading. */
		exp_readlock();

		/* Process request with signals blocked.  */
		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);

		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		exp_readunlock();
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_dec(&nfsd_busy);
	}

	if (err != -EINTR) {
		printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
	} else {
		unsigned int	signo;

		for (signo = 1; signo <= _NSIG; signo++)
			if (sigismember(&current->pending.signal, signo) &&
			    !sigismember(&current->blocked, signo))
				break;
		err = signo;
	}

	lock_kernel();

	/* Release lockd */
	lockd_down();

	/* Check if this is last thread */
	if (serv->sv_nrthreads==1) {
		
		printk(KERN_WARNING "nfsd: last server has exited\n");
		if (err != SIG_NOCLEAN) {
			printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
			nfsd_export_flush();
		}
		nfsd_serv = NULL;
	        nfsd_racache_shutdown();	/* release read-ahead cache */
		nfs4_state_shutdown();
	}
	list_del(&me.list);
	nfsdstats.th_cnt --;

out:
	/* Release the thread */
	svc_exit_thread(rqstp);

	/* Release module */
	module_put_and_exit(0);
}
コード例 #27
0
ファイル: intro.c プロジェクト: liwcezar/atrinik
/**
 * Show the main GUI after starting the client -- servers list, chat box,
 * connecting to server, etc.
 */
void intro_show(void)
{
    SDL_Surface *texture;
    int x, y;
    size_t server_count;
    server_struct *node;
    char buf[MAX_BUF];
    SDL_Rect box;

    sound_start_bg_music("intro.ogg", setting_get_int(OPT_CAT_SOUND, OPT_VOLUME_MUSIC), -1);

    texture = TEXTURE_CLIENT("intro");

    /* Background */
    surface_show(ScreenSurface, 0, 0, NULL, texture);
    textwin_show(ScreenSurface, texture->w, 1, ScreenSurface->w - texture->w - 2, ScreenSurface->h - 3);

    /* Calculate whether to show the eyes or not. Blinks every
     * EYES_BLINK_TIME ticks, then waits EYES_BLINK_DELAY ticks until
     * showing the eyes again. */
    if (SDL_GetTicks() - eyes_blink_ticks >= (eyes_draw ? EYES_BLINK_TIME : EYES_BLINK_DELAY)) {
        eyes_blink_ticks = SDL_GetTicks();
        eyes_draw++;
    }

    if (eyes_draw) {
        SDL_Rect src_box;

        src_box.x = 0;
        src_box.y = eyes_draw - 1;
        src_box.w = TEXTURE_CLIENT("eyes")->w;
        src_box.h = TEXTURE_CLIENT("eyes")->h;
        surface_show(ScreenSurface, texture->w - 90, 310 + src_box.y, &src_box, TEXTURE_CLIENT("eyes"));

        if (eyes_draw > 1) {
            eyes_draw++;

            if (eyes_draw > src_box.h) {
                eyes_draw = 1;
            }
        }
    }

    texture = TEXTURE_CLIENT("servers_bg");
    x = 15;
    y = ScreenSurface->h - texture->h - 5;
    surface_show(ScreenSurface, x, y, NULL, texture);

    server_count = server_get_count();

    /* Create the buttons. */
    if (!list_servers) {
        button_create(&button_play);
        button_create(&button_refresh);
        button_create(&button_server);
        button_create(&button_settings);
        button_create(&button_update);
        button_create(&button_help);
        button_create(&button_credits);
        button_create(&button_quit);
    }

    /* List doesn't exist or the count changed? Create new list. */
    if (!list_servers || last_server_count != server_count) {
        size_t i;

        /* Remove it if it exists already. */
        if (list_servers) {
            list_remove(list_servers);
        }

        /* Create the servers list. */
        list_servers = list_create(11, 3, 8);
        list_servers->handle_enter_func = list_handle_enter;
        list_servers->handle_esc_func = list_handle_esc;
        list_servers->text_color_hook = list_text_color;
        list_scrollbar_enable(list_servers);
        list_set_column(list_servers, 0, 295, 7, "Server", -1);
        list_set_column(list_servers, 1, 50, 9, "Port", 1);
        list_set_column(list_servers, 2, 46, 7, "Players", 1);

        /* Add the servers to the list. */
        for (i = 0; i < server_count; i++) {
            node = server_get_id(i);

            list_add(list_servers, i, 0, node->name);
            snprintf(VS(buf),
                     "%d",
                     node->port_crypto == -1 ? node->port : node->port_crypto);
            list_add(list_servers, i, 1, buf);

            if (node->player >= 0) {
                snprintf(buf, sizeof(buf), "%d", node->player);
            } else {
                strcpy(buf, "-");
            }

            list_add(list_servers, i, 2, buf);
        }

        /* Store the new count. */
        last_server_count = server_count;
    }

    /* Actually draw the list. */
    list_show(list_servers, x + 12, y + 8);
    node = server_get_id(list_servers->row_selected - 1);

    /* Do we have any selected server? If so, show its version and
     * description. */
    if (node) {
        snprintf(buf, sizeof(buf), "Version: %s", node->version);
        text_show_shadow(ScreenSurface, FONT_ARIAL10, buf, x + 13, y + 185, COLOR_HGOLD, COLOR_BLACK, 0, NULL);

        box.w = 410;
        box.h = 48;
        text_show(ScreenSurface, FONT_ARIAL10, node->desc, x + 13, y + 197, COLOR_WHITE, TEXT_WORD_WRAP | TEXT_MARKUP, &box);
    }

    /* Show whether we are connecting to the metaserver or not. */
    if (ms_connecting(-1)) {
        text_show_shadow(ScreenSurface, FONT_ARIAL10, "Connecting to metaserver, please wait...", x + 105, y + 8, COLOR_HGOLD, COLOR_BLACK, 0, NULL);
    } else {
        text_show_shadow(ScreenSurface, FONT_ARIAL10, "Select a secure server.", x + 196, y + 8, COLOR_GREEN, COLOR_BLACK, 0, NULL);
    }

    texture = TEXTURE_CLIENT("servers_bg_over");
    surface_show(ScreenSurface, x, y, NULL, texture);

    x += texture->w + 20;
    texture = TEXTURE_CLIENT("news_bg");
    surface_show(ScreenSurface, x, y, NULL, texture);

    box.w = texture->w;
    box.h = 0;
    text_show_shadow(ScreenSurface, FONT_SERIF12, "Game News", x, y + 10, COLOR_HGOLD, COLOR_BLACK, TEXT_ALIGN_CENTER, &box);

    /* No list yet, make one and start downloading the data. */
    if (!list_news) {
        /* Start downloading. */
        news_request = curl_request_create(clioption_settings.game_news_url,
                                           CURL_PKEY_TRUST_ULTIMATE);
        curl_request_start_get(news_request);

        list_news = list_create(18, 1, 8);
        list_news->focus = 0;
        list_news->handle_enter_func = list_handle_enter;
        list_news->handle_esc_func = list_handle_esc;
        list_set_column(list_news, 0, 150, 7, NULL, -1);
        list_set_font(list_news, FONT_ARIAL10);
    }

    /* Download in progress? */
    if (news_request != NULL) {
        curl_state_t state = curl_request_get_state(news_request);
        /* Finished downloading, parse the data. */
        if (state == CURL_STATE_OK) {
            char *body = curl_request_get_body(news_request, NULL);
            if (body != NULL) {
                uint32_t i = 0;
                char *cp = strtok(body, "\n");
                while (cp != NULL) {
                    list_add(list_news, i++, 0, cp);
                    cp = strtok(NULL, "\n");
                }
            }
        }

        /* Finished downloading or there was an error: clean up in either
         * case. */
        if (state != CURL_STATE_INPROGRESS) {
            curl_request_free(news_request);
            news_request = NULL;
        }
    }

    /* Show the news list. */
    list_show(list_news, x + 13, y + 10);

    button_play.x = button_refresh.x = button_server.x = button_settings.x = button_update.x = button_help.x = button_credits.x = button_quit.x = 489;
    y += 2;

    button_play.y = y + 10;
    button_show(&button_play, "Play");

    button_refresh.y = y + 35;
    button_show(&button_refresh, "Refresh");

    button_server.y = y + 60;
    button_show(&button_server, "Server");

    button_settings.y = y + 86;
    button_show(&button_settings, "Settings");

    button_update.y = y + 110;
    button_show(&button_update, "Update");

    button_help.y = y + 135;
    button_show(&button_help, "Help");

    button_credits.y = y + 160;
    button_show(&button_credits, "Credits");

    button_quit.y = y + 224;
    button_show(&button_quit, "Quit");

    if (clioption_settings.connect[0] && cpl.state < ST_STARTCONNECT) {
        size_t i;

        for (i = 0; i < server_count; i++) {
            node = server_get_id(i);

            if (strcasecmp(clioption_settings.connect[0], node->name) == 0) {
                list_servers->row_selected = i + 1;

                if (!clioption_settings.reconnect) {
                    efree(clioption_settings.connect[0]);
                    clioption_settings.connect[0] = NULL;
                }

                event_push_key_once(SDLK_RETURN, 0);
                break;
            }
        }
    }
}
コード例 #28
0
ファイル: itens_cgi.c プロジェクト: elyezer/controle-estoque
int main(int argc, char const *argv[])
{
    list_t * list = NULL;
    node_t * node = NULL;
    item_t * item = NULL;
    item_t * novo_item = NULL;
    char buffer[500];
    request_t * request = NULL;
    response_t * response = response_empty(NULL);
    var_t * var = NULL;
    char * query = NULL;
    error_t error = ERROR_NULL;
    unsigned int user_id;
    unsigned char user_level;

    request_process(&request);

    login_info(request, &user_id, &user_level);

    if (user_level > ANONYMOUS)
    {
        node = request->GET->first;
        while (node != NULL)
        {
            var = (var_t *) node->data;

            if (strcmp(var->name, "q") == 0)
            {
                query = var->value;
            }

            node = node->next;
        }

        itens_load(&list, query);

        if (list->last != NULL)
        {
            item = (item_t *) list->last->data;
            last_id = item->id;
        }

        if (request->method == POST)
        {
            novo_item = form_process(request);
            if (novo_item != NULL)
            {
                if (novo_item->id == 0)
                {
                    novo_item->id = ++last_id;
                    list_add(list, novo_item);
                }
                else
                {
                    node = list->first;
                    while (node != NULL)
                    {
                        item = (item_t *) node->data;

                        if (item->id == novo_item->id)
                        {
                            item->quantidade = novo_item->quantidade;
                            item->fim_estoque = novo_item->fim_estoque;
                            break;
                        }

                        node = node->next;
                    }
                }

                itens_save(list);
            }
            else
            {
                error = ERROR_ITEM;
            }
        }

        if (error != ERROR_NULL)
        {
            error_page(&response, error, "/cgi-bin/itens");
        }
        else
        {
            login_refresh_session(&response, user_id, user_level);
            response_write_template(&response, "templates/header.html");
            response_write_template(&response, "templates/itens.html");

            if (list->first == NULL)
            {
                response_write(&response, "<p>Nenhum item encontrado</p>");
            }
            else
            {
                response_write(&response, "<table class=\"table table-bordered table-striped\">"
                    "<tr><th>Id</th><th>Nome</th><th>Tipo</th><th>Descrição</th>"
                    "<th>Quantidade</th><th>Previsão de fim de estoque</th>"
                    "<th class=\"edit\">Ações</th></tr>");
                node = list->first;
                while (node != NULL)
                {
                    item = (item_t *) node->data;
                    sprintf(buffer,
                        "<tr><td>%u</td><td class=\"nome\">%s</td><td class=\"tipo\">%s</td>"
                        "<td class=\"descricao\">%s</td><td class=\"quantidade\">%u</td>"
                        "<td class=\"fimestoque\">%s</td><td class=\"edit\">"
                        "<div class=\"btn-group\">"
                        "<button class=\"btn edit-btn\" data-id=\"%u\"><i class=\"icon-pencil\"></i></button>"
                        "</div>"
                        "</td></tr>",
                        item->id,
                        item->nome,
                        item->tipo,
                        item->descricao,
                        item->quantidade,
                        item->fim_estoque,
                        item->id);
                    response_write(&response, buffer);
                    item_free(&item);
                    node = node->next;
                }
                response_write(&response, "</table>");
            }

            response_write(&response, "<script src=\"/js/itens.js\"></script>");
            response_write_template(&response, "templates/footer.html");
        }
    }
    else
    {
        error_page(&response, ERROR_LOGIN_REQUIRED, "/");
    }

    response_send(response);

    request_free(request);

    return 0;
}
コード例 #29
0
/**
 * ipath_create_qp - create a queue pair for a device
 * @ibpd: the protection domain who's device we create the queue pair for
 * @init_attr: the attributes of the queue pair
 * @udata: unused by InfiniPath
 *
 * Returns the queue pair on success, otherwise returns an errno.
 *
 * Called by the ib_create_qp() core verbs function.
 */
struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
			      struct ib_qp_init_attr *init_attr,
			      struct ib_udata *udata)
{
	struct ipath_qp *qp;
	int err;
	struct ipath_swqe *swq = NULL;
	struct ipath_ibdev *dev;
	size_t sz;
	size_t sg_list_sz;
	struct ib_qp *ret;

	if (init_attr->create_flags) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	/* Check receive queue parameters if no SRQ is specified. */
	if (!init_attr->srq) {
		if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
		    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
		if (init_attr->cap.max_send_sge +
		    init_attr->cap.max_send_wr +
		    init_attr->cap.max_recv_sge +
		    init_attr->cap.max_recv_wr == 0) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
	}

	switch (init_attr->qp_type) {
	case IB_QPT_UC:
	case IB_QPT_RC:
	case IB_QPT_UD:
	case IB_QPT_SMI:
	case IB_QPT_GSI:
		sz = sizeof(struct ipath_sge) *
			init_attr->cap.max_send_sge +
			sizeof(struct ipath_swqe);
		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
		if (swq == NULL) {
			ret = ERR_PTR(-ENOMEM);
			goto bail;
		}
		sz = sizeof(*qp);
		sg_list_sz = 0;
		if (init_attr->srq) {
			struct ipath_srq *srq = to_isrq(init_attr->srq);

			if (srq->rq.max_sge > 1)
				sg_list_sz = sizeof(*qp->r_sg_list) *
					(srq->rq.max_sge - 1);
		} else if (init_attr->cap.max_recv_sge > 1)
			sg_list_sz = sizeof(*qp->r_sg_list) *
				(init_attr->cap.max_recv_sge - 1);
		qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
		if (!qp) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_swq;
		}
		if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
		    init_attr->qp_type == IB_QPT_SMI ||
		    init_attr->qp_type == IB_QPT_GSI)) {
			qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
			if (!qp->r_ud_sg_list) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_qp;
			}
		} else
			qp->r_ud_sg_list = NULL;
		if (init_attr->srq) {
			sz = 0;
			qp->r_rq.size = 0;
			qp->r_rq.max_sge = 0;
			qp->r_rq.wq = NULL;
			init_attr->cap.max_recv_wr = 0;
			init_attr->cap.max_recv_sge = 0;
		} else {
			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
				sizeof(struct ipath_rwqe);
			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
					      qp->r_rq.size * sz);
			if (!qp->r_rq.wq) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_sg_list;
			}
		}

		/*
		 * ib_create_qp() will initialize qp->ibqp
		 * except for qp->ibqp.qp_num.
		 */
		spin_lock_init(&qp->s_lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait_dma);
		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
		INIT_LIST_HEAD(&qp->piowait);
		INIT_LIST_HEAD(&qp->timerwait);
		qp->state = IB_QPS_RESET;
		qp->s_wq = swq;
		qp->s_size = init_attr->cap.max_send_wr + 1;
		qp->s_max_sge = init_attr->cap.max_send_sge;
		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
		else
			qp->s_flags = 0;
		dev = to_idev(ibpd->device);
		err = ipath_alloc_qpn(&dev->qp_table, qp,
				      init_attr->qp_type);
		if (err) {
			ret = ERR_PTR(err);
			vfree(qp->r_rq.wq);
			goto bail_sg_list;
		}
		qp->ip = NULL;
		qp->s_tx = NULL;
		ipath_reset_qp(qp, init_attr->qp_type);
		break;

	default:
		/* Don't support raw QPs */
		ret = ERR_PTR(-ENOSYS);
		goto bail;
	}

	init_attr->cap.max_inline_data = 0;

	/*
	 * Return the address of the RWQ as the offset to mmap.
	 * See ipath_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		if (!qp->r_rq.wq) {
			__u64 offset = 0;

			err = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		} else {
			u32 s = sizeof(struct ipath_rwq) +
				qp->r_rq.size * sz;

			qp->ip =
			    ipath_create_mmap_info(dev, s,
						   ibpd->uobject->context,
						   qp->r_rq.wq);
			if (!qp->ip) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_ip;
			}

			err = ib_copy_to_udata(udata, &(qp->ip->offset),
					       sizeof(qp->ip->offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		}
	}

	spin_lock(&dev->n_qps_lock);
	if (dev->n_qps_allocated == ib_ipath_max_qps) {
		spin_unlock(&dev->n_qps_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_qps_allocated++;
	spin_unlock(&dev->n_qps_lock);

	if (qp->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &qp->ibqp;
	goto bail;

bail_ip:
	if (qp->ip)
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
	else
		vfree(qp->r_rq.wq);
	ipath_free_qp(&dev->qp_table, qp);
	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
bail_sg_list:
	kfree(qp->r_ud_sg_list);
bail_qp:
	kfree(qp);
bail_swq:
	vfree(swq);
bail:
	return ret;
}
コード例 #30
0
int jffs2_scan_medium(struct jffs2_sb_info *c)
{
	int i, ret;
	uint32_t empty_blocks = 0, bad_blocks = 0;
	unsigned char *flashbuf = NULL;
	uint32_t buf_size = 0;
#ifndef __ECOS
	size_t pointlen;

	if (c->mtd->point) {
		ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
		if (!ret && pointlen < c->mtd->size) {
			/* Don't muck about if it won't let us point to the whole flash */
			D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
			c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
			flashbuf = NULL;
		}
		if (ret)
			D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
	}
#endif
	if (!flashbuf) {
		/* For NAND it's quicker to read a whole eraseblock at a time,
		   apparently */
		if (jffs2_cleanmarker_oob(c))
			buf_size = c->sector_size;
		else
			buf_size = PAGE_SIZE;

		/* Respect kmalloc limitations */
		if (buf_size > 128*1024)
			buf_size = 128*1024;

		D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
		flashbuf = kmalloc(buf_size, GFP_KERNEL);
		if (!flashbuf)
			return -ENOMEM;
	}

	for (i=0; i<c->nr_blocks; i++) {
		struct jffs2_eraseblock *jeb = &c->blocks[i];

		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);

		if (ret < 0)
			goto out;

		jffs2_dbg_acct_paranoia_check_nolock(c, jeb);

		/* Now decide which list to put it on */
		switch(ret) {
		case BLK_STATE_ALLFF:
			/* 
			 * Empty block.   Since we can't be sure it 
			 * was entirely erased, we just queue it for erase
			 * again.  It will be marked as such when the erase
			 * is complete.  Meanwhile we still count it as empty
			 * for later checks.
			 */
			empty_blocks++;
			list_add(&jeb->list, &c->erase_pending_list);
			c->nr_erasing_blocks++;
			break;

		case BLK_STATE_CLEANMARKER:
			/* Only a CLEANMARKER node is valid */
			if (!jeb->dirty_size) {
				/* It's actually free */
				list_add(&jeb->list, &c->free_list);
				c->nr_free_blocks++;
			} else {
				/* Dirt */
				D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
				list_add(&jeb->list, &c->erase_pending_list);
				c->nr_erasing_blocks++;
			}
			break;

		case BLK_STATE_CLEAN:
                        /* Full (or almost full) of clean data. Clean list */
                        list_add(&jeb->list, &c->clean_list);
			break;

		case BLK_STATE_PARTDIRTY:
                        /* Some data, but not full. Dirty list. */
                        /* We want to remember the block with most free space
                           and stick it in the 'nextblock' position to start writing to it. */
                        if (jeb->free_size > min_free(c) && 
			    (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
                                /* Better candidate for the next writes to go to */
                                if (c->nextblock) {
					c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
					c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
					c->free_size -= c->nextblock->free_size;
					c->wasted_size -= c->nextblock->wasted_size;
					c->nextblock->free_size = c->nextblock->wasted_size = 0;
					if (VERYDIRTY(c, c->nextblock->dirty_size)) {
						list_add(&c->nextblock->list, &c->very_dirty_list);
					} else {
						list_add(&c->nextblock->list, &c->dirty_list);
					}
				}
                                c->nextblock = jeb;
                        } else {
				jeb->dirty_size += jeb->free_size + jeb->wasted_size;
				c->dirty_size += jeb->free_size + jeb->wasted_size;
				c->free_size -= jeb->free_size;
				c->wasted_size -= jeb->wasted_size;
				jeb->free_size = jeb->wasted_size = 0;
				if (VERYDIRTY(c, jeb->dirty_size)) {
					list_add(&jeb->list, &c->very_dirty_list);
				} else {
					list_add(&jeb->list, &c->dirty_list);
				}
                        }
			break;

		case BLK_STATE_ALLDIRTY:
			/* Nothing valid - not even a clean marker. Needs erasing. */
                        /* For now we just put it on the erasing list. We'll start the erases later */
			D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
                        list_add(&jeb->list, &c->erase_pending_list);
			c->nr_erasing_blocks++;
			break;
			
		case BLK_STATE_BADBLOCK:
			D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
                        list_add(&jeb->list, &c->bad_list);
			c->bad_size += c->sector_size;
			c->free_size -= c->sector_size;
			bad_blocks++;
			break;
		default:
			printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
			BUG();	
		}
	}
	
	/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
	if (c->nextblock && (c->nextblock->dirty_size)) {
		c->nextblock->wasted_size += c->nextblock->dirty_size;
		c->wasted_size += c->nextblock->dirty_size;
		c->dirty_size -= c->nextblock->dirty_size;
		c->nextblock->dirty_size = 0;
	}
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
	if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
		/* If we're going to start writing into a block which already 
		   contains data, and the end of the data isn't page-aligned,
		   skip a little and align it. */

		uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);

		D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
			  skip));
		c->nextblock->wasted_size += skip;
		c->wasted_size += skip;

		c->nextblock->free_size -= skip;
		c->free_size -= skip;
	}
#endif
	if (c->nr_erasing_blocks) {
		if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { 
			printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
			printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
			ret = -EIO;
			goto out;
		}
		jffs2_erase_pending_trigger(c);
	}
	ret = 0;
 out:
	if (buf_size)
		kfree(flashbuf);
#ifndef __ECOS
	else 
		c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
#endif
	return ret;
}