bool MultiTower::init()
{
	if (!Sprite::init()) //如果忘记了这句话则会在runApplication报错
	{
		return false;
	}

	instance = GameManager::getInstance();

	setScope(300);
	setAttack(1);
	setRate(5);//2.5秒钟开火一次
	setTowerType(4);
	setIsSelected(false);
	setGrade(0);//开始没有等级
	setPower(40);//塔消耗电力
	setMove(false);//开始不处于移动状态
	setIsPowerEnough(true);//开始状态电力足够
	setIsPowerConsumption(true);//该塔耗电

	towerSprite = Sprite::create("towerItem/Item5.png");
	towerSprite->setScale(1);
	addChild(towerSprite, 5);

	gradeSprite = Sprite::create("level1.png");
	gradeSprite->setAnchorPoint(Point(0, 0));
	gradeSprite->setPosition(this->getPosition().x + 10, -towerSprite->getBoundingBox().size.height / 2);
	gradeSprite->setOpacity(0);//开始让其不可见
	addChild(gradeSprite, 6);

	noPowerSprite = Sprite::create("noPower.png");
	noPowerSprite->setAnchorPoint(Point(0.5, 0));
	noPowerSprite->setScale(1);
	noPowerSprite->setPosition(towerSprite->getBoundingBox().size.width / 2 - 50, towerSprite->getBoundingBox().size.height - 30);
	noPowerSprite->setVisible(false);
	addChild(noPowerSprite, 7);

	//沉默图片
	noAttackSprite = Sprite::create("noSpeak.png");
	noAttackSprite->setScale(0.7);
	noAttackSprite->setAnchorPoint(Point(0.5, 0));
	noAttackSprite->setPosition(towerSprite->getBoundingBox().size.width / 2 - 58, towerSprite->getBoundingBox().size.height - 60);
	addChild(noAttackSprite, 8);
	noAttackSprite->setVisible(false);

	//血量条背景图片
	towerHpSprite = Sprite::create("manaBarBg.png");
	towerHpSprite->setPosition(noPowerSprite->getPosition()-Point(10,20));
	towerHpSprite->setScale(0.6);
	addChild(towerHpSprite, 10);
	//炮塔血量
	hp = 4;
	//炮塔血量进度条
	towerHp = ProgressTimer::create(Sprite::create("soldierProduceTimeBar.png")); //参数是一个图片sprite
	towerHp->setScaleX(2);
	towerHp->setScaleY(5.2);
	towerHp->setType(ProgressTimer::Type::BAR);
	towerHp->setMidpoint(Point(0, 0.5f));
	towerHp->setBarChangeRate(Point(1, 0));
	towerHp->setPercentage(100);
	towerHp->setPosition(Point(towerHpSprite->getContentSize().width / 2, towerHpSprite->getContentSize().height / 3 * 2 - 10));
	towerHpSprite->addChild(towerHp, 5);
	//初始化不可见
	towerHp->setVisible(false);
	towerHpSprite->setVisible(false);


	schedule(schedule_selector(MultiTower::checkNearestEnemy, this), 0.2);
	schedule(schedule_selector(MultiTower::shoot,this), rate);

	addTouch();//添加触摸事件

	return true;
}
Beispiel #2
0
void schedule_exists (Ob ob) { schedule(ExistsTask(ob)); }
Beispiel #3
0
void schedule_nless (Ob lhs, Ob rhs) { schedule(NegativeOrderTask(lhs, rhs)); }
Beispiel #4
0
static s32 amvdec_loadmc(const u32 *p)
{
	ulong timeout;
	s32 ret = 0;

#ifdef AMVDEC_USE_STATIC_MEMORY
	if (mc_addr == NULL) {
#else
	{
#endif
		mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
	}

	if (!mc_addr)
		return -ENOMEM;

	memcpy(mc_addr, p, MC_SIZE);

	mc_addr_map = dma_map_single(amports_get_dma_device(),
		mc_addr, MC_SIZE, DMA_TO_DEVICE);

	WRITE_VREG(MPSR, 0);
	WRITE_VREG(CPSR, 0);

	/* Read CBUS register for timing */
	timeout = READ_VREG(MPSR);
	timeout = READ_VREG(MPSR);

	timeout = jiffies + HZ;

	WRITE_VREG(IMEM_DMA_ADR, mc_addr_map);
	WRITE_VREG(IMEM_DMA_COUNT, 0x1000);
	WRITE_VREG(IMEM_DMA_CTRL, (0x8000 | (7 << 16)));

	while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) {
		if (time_before(jiffies, timeout))
			schedule();
		else {
			pr_err("vdec load mc error\n");
			ret = -EBUSY;
			break;
		}
	}

	dma_unmap_single(amports_get_dma_device(),
		mc_addr_map, MC_SIZE, DMA_TO_DEVICE);

#ifndef AMVDEC_USE_STATIC_MEMORY
	kfree(mc_addr);
	mc_addr = NULL;
#endif

	return ret;
}

s32 amvdec_loadmc_ex(enum vformat_e type, const char *name, char *def)
{
	return am_loadmc_ex(type, name, def, &amvdec_loadmc);
}

static s32 amvdec2_loadmc(const u32 *p)
{
	if (has_vdec2()) {
		ulong timeout;
		s32 ret = 0;

#ifdef AMVDEC_USE_STATIC_MEMORY
		if (mc_addr == NULL) {
#else
		{
#endif
			mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
		}

		if (!mc_addr)
			return -ENOMEM;

		memcpy(mc_addr, p, MC_SIZE);

		mc_addr_map = dma_map_single(amports_get_dma_device(),
			mc_addr, MC_SIZE, DMA_TO_DEVICE);

		WRITE_VREG(VDEC2_MPSR, 0);
		WRITE_VREG(VDEC2_CPSR, 0);

		/* Read CBUS register for timing */
		timeout = READ_VREG(VDEC2_MPSR);
		timeout = READ_VREG(VDEC2_MPSR);

		timeout = jiffies + HZ;

		WRITE_VREG(VDEC2_IMEM_DMA_ADR, mc_addr_map);
		WRITE_VREG(VDEC2_IMEM_DMA_COUNT, 0x1000);
		WRITE_VREG(VDEC2_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));

		while (READ_VREG(VDEC2_IMEM_DMA_CTRL) & 0x8000) {
			if (time_before(jiffies, timeout))
				schedule();
			else {
				pr_err("vdec2 load mc error\n");
				ret = -EBUSY;
				break;
			}
		}

		dma_unmap_single(amports_get_dma_device(),
			mc_addr_map, MC_SIZE, DMA_TO_DEVICE);

#ifndef AMVDEC_USE_STATIC_MEMORY
		kfree(mc_addr);
		mc_addr = NULL;
#endif

		return ret;
	} else
		return 0;
}

s32 amvdec2_loadmc_ex(enum vformat_e type, const char *name, char *def)
{
	if (has_vdec2())
		return am_loadmc_ex(type, name, def, &amvdec2_loadmc);
	else
		return 0;
}

s32 amhcodec_loadmc(const u32 *p)
{
#ifdef AMVDEC_USE_STATIC_MEMORY
	if (mc_addr == NULL) {
#else
	{
#endif
		mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
	}

	if (!mc_addr)
		return -ENOMEM;

	memcpy(mc_addr, p, MC_SIZE);

	mc_addr_map = dma_map_single(amports_get_dma_device(),
			mc_addr, MC_SIZE, DMA_TO_DEVICE);

	WRITE_VREG(HCODEC_IMEM_DMA_ADR, mc_addr_map);
	WRITE_VREG(HCODEC_IMEM_DMA_COUNT, 0x100);
	WRITE_VREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));

	while (READ_VREG(HCODEC_IMEM_DMA_CTRL) & 0x8000)
		udelay(1000);

	dma_unmap_single(amports_get_dma_device(),
			mc_addr_map, MC_SIZE, DMA_TO_DEVICE);

#ifndef AMVDEC_USE_STATIC_MEMORY
	kfree(mc_addr);
#endif

	return 0;
}

s32 amhcodec_loadmc_ex(enum vformat_e type, const char *name, char *def)
{
	return am_loadmc_ex(type, name, def, &amhcodec_loadmc);
}

static s32 amhevc_loadmc(const u32 *p)
{
	ulong timeout;
	s32 ret = 0;

	if (has_hevc_vdec()) {
#ifdef AMVDEC_USE_STATIC_MEMORY
		if (mc_addr == NULL) {
#else
		{
#endif
			mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
		}

		if (!mc_addr)
			return -ENOMEM;

		memcpy(mc_addr, p, MC_SIZE);

		mc_addr_map =
			dma_map_single(amports_get_dma_device(),
			mc_addr, MC_SIZE, DMA_TO_DEVICE);

		WRITE_VREG(HEVC_MPSR, 0);
		WRITE_VREG(HEVC_CPSR, 0);

		/* Read CBUS register for timing */
		timeout = READ_VREG(HEVC_MPSR);
		timeout = READ_VREG(HEVC_MPSR);

		timeout = jiffies + HZ;

		WRITE_VREG(HEVC_IMEM_DMA_ADR, mc_addr_map);
		WRITE_VREG(HEVC_IMEM_DMA_COUNT, 0x1000);
		WRITE_VREG(HEVC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));

		while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) {
			if (time_before(jiffies, timeout))
				schedule();
			else {
				pr_err("vdec2 load mc error\n");
				ret = -EBUSY;
				break;
			}
		}

		dma_unmap_single(amports_get_dma_device(),
				mc_addr_map, MC_SIZE, DMA_TO_DEVICE);

#ifndef AMVDEC_USE_STATIC_MEMORY
		kfree(mc_addr);
		mc_addr = NULL;
#endif
	}

	return ret;
}

s32 amhevc_loadmc_ex(enum vformat_e type, const char *name, char *def)
{
	if (has_hevc_vdec())
		return am_loadmc_ex(type, name, def, &amhevc_loadmc);
	else
		return 0;
}

void amvdec_start(void)
{
#ifdef CONFIG_WAKELOCK
	amvdec_wake_lock();
#endif

	/* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */
	if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);

		WRITE_VREG(DOS_SW_RESET0, (1 << 12) | (1 << 11));
		WRITE_VREG(DOS_SW_RESET0, 0);

		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);
	} else {
		/* #else */
		/* additional cbus dummy register reading for timing control */
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);

		WRITE_MPEG_REG(RESET0_REGISTER, RESET_VCPU | RESET_CCPU);

		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
	}
	/* #endif */

	WRITE_VREG(MPSR, 0x0001);
}

void amvdec2_start(void)
{
	if (has_vdec2()) {
#ifdef CONFIG_WAKELOCK
		amvdec_wake_lock();
#endif

		READ_VREG(DOS_SW_RESET2);
		READ_VREG(DOS_SW_RESET2);
		READ_VREG(DOS_SW_RESET2);

		WRITE_VREG(DOS_SW_RESET2, (1 << 12) | (1 << 11));
		WRITE_VREG(DOS_SW_RESET2, 0);

		READ_VREG(DOS_SW_RESET2);
		READ_VREG(DOS_SW_RESET2);
		READ_VREG(DOS_SW_RESET2);

		WRITE_VREG(VDEC2_MPSR, 0x0001);
	}
}

void amhcodec_start(void)
{
	WRITE_VREG(HCODEC_MPSR, 0x0001);
}

void amhevc_start(void)
{
	if (has_hevc_vdec()) {
#ifdef CONFIG_WAKELOCK
		amvdec_wake_lock();
#endif

		READ_VREG(DOS_SW_RESET3);
		READ_VREG(DOS_SW_RESET3);
		READ_VREG(DOS_SW_RESET3);

		WRITE_VREG(DOS_SW_RESET3, (1 << 12) | (1 << 11));
		WRITE_VREG(DOS_SW_RESET3, 0);

		READ_VREG(DOS_SW_RESET3);
		READ_VREG(DOS_SW_RESET3);
		READ_VREG(DOS_SW_RESET3);

		WRITE_VREG(HEVC_MPSR, 0x0001);
	}
}

void amvdec_stop(void)
{
	ulong timeout = jiffies + HZ;

	WRITE_VREG(MPSR, 0);
	WRITE_VREG(CPSR, 0);

	while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) {
		if (time_after(jiffies, timeout))
			break;
	}

	/* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */
	if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);

		WRITE_VREG(DOS_SW_RESET0, (1 << 12) | (1 << 11));
		WRITE_VREG(DOS_SW_RESET0, 0);

		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);
		READ_VREG(DOS_SW_RESET0);
	} else {
		/* #else */
		WRITE_MPEG_REG(RESET0_REGISTER, RESET_VCPU | RESET_CCPU);

		/* additional cbus dummy register reading for timing control */
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
		READ_MPEG_REG(RESET0_REGISTER);
	}
	/* #endif */

#ifdef CONFIG_WAKELOCK
	amvdec_wake_unlock();
#endif
}
Beispiel #5
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int
__mutex_lock_common(struct mutex *lock, long state)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned int old_val;
	unsigned long flags;

	spin_lock_mutex(&lock->wait_lock, flags);

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	old_val = atomic_xchg(&lock->count, -1);
	if (old_val == 1)
		goto done;

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		old_val = atomic_xchg(&lock->count, -1);
		if (old_val == 1)
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
#if 0
		if (unlikely((state == TASK_INTERRUPTIBLE &&
					signal_pending(task)) ||
			      (state == TASK_KILLABLE &&
					fatal_signal_pending(task)))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			spin_unlock_mutex(&lock->wait_lock, flags);

			return -EINTR;
		}
#endif
		set_task_state(task, state);

		/* didnt get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, task_thread_info(task));

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);


	return 0;
}
Beispiel #6
0
/*
 * wait_on_bit() sleep function for interruptible waiting
 */
int fscache_wait_bit_interruptible(void *flags)
{
	schedule();
	return signal_pending(current);
}
Beispiel #7
0
int
ocfbench_init(void)
{
	int i;
	unsigned long mbps;
	unsigned long flags;

	printk("Crypto Speed tests\n");

	requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
	if (!requests) {
		printk("malloc failed\n");
		return -EINVAL;
	}

	for (i = 0; i < request_q_len; i++) {
		/* +64 for return data */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
		INIT_WORK(&requests[i].work, ocf_request_wq);
#else
		INIT_WORK(&requests[i].work, ocf_request, &requests[i]);
#endif
		requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
		if (!requests[i].buffer) {
			printk("malloc failed\n");
			return -EINVAL;
		}
		memset(requests[i].buffer, '0' + i, request_size + 128);
	}

	/*
	 * OCF benchmark
	 */
	printk("OCF: testing ...\n");
	if (ocf_init() == -1)
		return -EINVAL;

	spin_lock_init(&ocfbench_counter_lock);
	total = outstanding = 0;
	jstart = jiffies;
	for (i = 0; i < request_q_len; i++) {
		spin_lock_irqsave(&ocfbench_counter_lock, flags);
		outstanding++;
		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
		ocf_request(&requests[i]);
	}
	while (outstanding > 0)
		schedule();
	jstop = jiffies;

	mbps = 0;
	if (jstop > jstart) {
		mbps = (unsigned long) total * (unsigned long) request_size * 8;
		mbps /= ((jstop - jstart) * 1000) / HZ;
	}
	printk("OCF: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
			total, request_size, (int)(jstop - jstart),
			((int)mbps) / 1000, ((int)mbps) % 1000);
	ocf_done();

#ifdef BENCH_IXP_ACCESS_LIB
	/*
	 * IXP benchmark
	 */
	printk("IXP: testing ...\n");
	ixp_init();
	total = outstanding = 0;
	jstart = jiffies;
	for (i = 0; i < request_q_len; i++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
		INIT_WORK(&requests[i].work, ixp_request_wq);
#else
		INIT_WORK(&requests[i].work, ixp_request, &requests[i]);
#endif
		spin_lock_irqsave(&ocfbench_counter_lock, flags);
		outstanding++;
		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
		ixp_request(&requests[i]);
	}
	while (outstanding > 0)
		schedule();
	jstop = jiffies;

	mbps = 0;
	if (jstop > jstart) {
		mbps = (unsigned long) total * (unsigned long) request_size * 8;
		mbps /= ((jstop - jstart) * 1000) / HZ;
	}
	printk("IXP: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
			total, request_size, jstop - jstart,
			((int)mbps) / 1000, ((int)mbps) % 1000);
	ixp_done();
#endif /* BENCH_IXP_ACCESS_LIB */

	for (i = 0; i < request_q_len; i++)
		kfree(requests[i].buffer);
	kfree(requests);
	return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
}
static ssize_t pp_write (struct file * file, const char * buf, size_t count,
			 loff_t * ppos)
{
	unsigned int minor = MINOR (file->f_dentry->d_inode->i_rdev);
	struct pp_struct *pp = file->private_data;
	char * kbuffer;
	ssize_t bytes_written = 0;
	ssize_t wrote;
	int mode;
	struct parport *pport;

	if (!(pp->flags & PP_CLAIMED)) {
		/* Don't have the port claimed */
		printk (KERN_DEBUG CHRDEV "%x: claim the port first\n",
			minor);
		return -EINVAL;
	}

	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
	if (!kbuffer) {
		return -ENOMEM;
	}
	pport = pp->pdev->port;
	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);

	parport_set_timeout (pp->pdev,
			     (file->f_flags & O_NONBLOCK) ?
			     PARPORT_INACTIVITY_O_NONBLOCK :
			     pp->default_inactivity);

	while (bytes_written < count) {
		ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);

		if (copy_from_user (kbuffer, buf + bytes_written, n)) {
			bytes_written = -EFAULT;
			break;
		}

		if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
			/* do a fast EPP write */
			if (pport->ieee1284.mode & IEEE1284_ADDR) {
				wrote = pport->ops->epp_write_addr (pport,
					kbuffer, n, PARPORT_EPP_FAST);
			} else {
				wrote = pport->ops->epp_write_data (pport,
					kbuffer, n, PARPORT_EPP_FAST);
			}
		} else {
			wrote = parport_write (pp->pdev->port, kbuffer, n);
		}

		if (wrote <= 0) {
			if (!bytes_written) {
				bytes_written = wrote;
			}
			break;
		}

		bytes_written += wrote;

		if (file->f_flags & O_NONBLOCK) {
			if (!bytes_written)
				bytes_written = -EAGAIN;
			break;
		}

		if (signal_pending (current)) {
			if (!bytes_written) {
				bytes_written = -EINTR;
			}
			break;
		}

		if (current->need_resched) {
			schedule ();
		}
	}

	parport_set_timeout (pp->pdev, pp->default_inactivity);

	kfree (kbuffer);
	pp_enable_irq (pp);
	return bytes_written;
}
static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
	unsigned int NumCard;	/* Board number 1 -> 8           */
	unsigned int IndexCard;	/* Index board number 0 -> 7     */
	unsigned char TicCard;	/* Board TIC to send             */
	unsigned long flags;	/* Current priority              */
	struct st_ram_io st_loc;
	struct mailbox tmpmailbox;
#ifdef DEBUG
	int c;
#endif
	DECLARE_WAITQUEUE(wait, current);

	if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
		static int warncount = 5;
		if (warncount) {
			printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
			       count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
			warncount--;
		}
		return -EINVAL;
	}

	if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io))) 
		return -EFAULT;
	
	if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
			  sizeof(struct mailbox))) 
		return -EFAULT;

	NumCard = st_loc.num_card;	/* board number to send          */
	TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
	IndexCard = NumCard - 1;

	if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
		return -EINVAL;

#ifdef DEBUG
	printk("Write to applicom card #%d. struct st_ram_io follows:",
	       IndexCard+1);

		for (c = 0; c < sizeof(struct st_ram_io);) {
		
			printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);

			for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
				printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
			}
		}

		printk("\nstruct mailbox follows:");

		for (c = 0; c < sizeof(struct mailbox);) {
			printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);

			for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
				printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
			}
		}

		printk("\n");
#endif

	spin_lock_irqsave(&apbs[IndexCard].mutex, flags);

	/* Test octet ready correct */
	if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) { 
		Dummy = readb(apbs[IndexCard].RamIO + VERS);
		spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
		printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
		       IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
		DeviceErrorCount++;
		return -EIO;
	}
	
	/* Place ourselves on the wait queue */
	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);

	/* Check whether the card is ready for us */
	while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
		Dummy = readb(apbs[IndexCard].RamIO + VERS);
		/* It's busy. Sleep. */

		spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
		schedule();
		if (signal_pending(current)) {
			remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
					  &wait);
			return -EINTR;
		}
		spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
		set_current_state(TASK_INTERRUPTIBLE);
	}

	/* We may not have actually slept */
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);

	writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);

	/* Which is best - lock down the pages with rawio and then
	   copy directly, or use bounce buffers? For now we do the latter 
	   because it works with 2.2 still */
	{
		unsigned char *from = (unsigned char *) &tmpmailbox;
		void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
		int c;

		for (c = 0; c < sizeof(struct mailbox); c++)
			writeb(*(from++), to++);
	}

	writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
	writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
	writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
	writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
	writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
	writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
	Dummy = readb(apbs[IndexCard].RamIO + VERS);
	spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
	return 0;
}
struct net_device * __init ltpc_probe(void)
{
	struct net_device *dev;
	int err = -ENOMEM;
	int x=0,y=0;
	int autoirq;
	unsigned long f;
	unsigned long timeout;

	dev = alloc_ltalkdev(sizeof(struct ltpc_private));
	if (!dev)
		goto out;

	/* probe for the I/O port address */
	
	if (io != 0x240 && request_region(0x220,8,"ltpc")) {
		x = inb_p(0x220+6);
		if ( (x!=0xff) && (x>=0xf0) ) {
			io = 0x220;
			goto got_port;
		}
		release_region(0x220,8);
	}
	if (io != 0x220 && request_region(0x240,8,"ltpc")) {
		y = inb_p(0x240+6);
		if ( (y!=0xff) && (y>=0xf0) ){ 
			io = 0x240;
			goto got_port;
		}
		release_region(0x240,8);
	} 

	/* give up in despair */
	printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
	err = -ENODEV;
	goto out1;

 got_port:
	/* probe for the IRQ line */
	if (irq < 2) {
		unsigned long irq_mask;

		irq_mask = probe_irq_on();
		/* reset the interrupt line */
		inb_p(io+7);
		inb_p(io+7);
		/* trigger an interrupt (I hope) */
		inb_p(io+6);
		mdelay(2);
		autoirq = probe_irq_off(irq_mask);

		if (autoirq == 0) {
			printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
		} else {
			irq = autoirq;
		}
	}

	/* allocate a DMA buffer */
	ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
	if (!ltdmabuf) {
		printk(KERN_ERR "ltpc: mem alloc failed\n");
		err = -ENOMEM;
		goto out2;
	}

	ltdmacbuf = &ltdmabuf[800];

	if(debug & DEBUG_VERBOSE) {
		printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
	}

	/* reset the card */

	inb_p(io+1);
	inb_p(io+3);

	msleep(20);

	inb_p(io+0);
	inb_p(io+2);
	inb_p(io+7); /* clear reset */
	inb_p(io+4); 
	inb_p(io+5);
	inb_p(io+5); /* enable dma */
	inb_p(io+6); /* tri-state interrupt line */

	ssleep(1);
	
	/* now, figure out which dma channel we're using, unless it's
	   already been specified */
	/* well, 0 is a legal DMA channel, but the LTPC card doesn't
	   use it... */
	dma = ltpc_probe_dma(io, dma);
	if (!dma) {  /* no dma channel */
		printk(KERN_ERR "No DMA channel found on ltpc card.\n");
		err = -ENODEV;
		goto out3;
	}

	/* print out friendly message */
	if(irq)
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
	else
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d.  Using polled mode.\n",io,dma);

	dev->netdev_ops = &ltpc_netdev;
	dev->base_addr = io;
	dev->irq = irq;
	dev->dma = dma;

	/* the card will want to send a result at this point */
	/* (I think... leaving out this part makes the kernel crash,
           so I put it back in...) */

	f=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,0x100);
	enable_dma(dma);
	release_dma_lock(f);

	(void) inb_p(io+3);
	(void) inb_p(io+2);
	timeout = jiffies+100*HZ/100;

	while(time_before(jiffies, timeout)) {
		if( 0xf9 == inb_p(io+6))
			break;
		schedule();
	}

	if(debug & DEBUG_VERBOSE) {
		printk("setting up timer and irq\n");
	}

	/* grab it and don't let go :-) */
	if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
	{
		(void) inb_p(io+7);  /* enable interrupts from board */
		(void) inb_p(io+7);  /* and reset irq line */
	} else {
		if( irq )
			printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
		dev->irq = 0;
		/* polled mode -- 20 times per second */
		/* this is really, really slow... should it poll more often? */
		init_timer(&ltpc_timer);
		ltpc_timer.function=ltpc_poll;
		ltpc_timer.data = (unsigned long) dev;

		ltpc_timer.expires = jiffies + HZ/20;
		add_timer(&ltpc_timer);
	}
	err = register_netdev(dev);
	if (err)
		goto out4;

	return NULL;
out4:
	del_timer_sync(&ltpc_timer);
	if (dev->irq)
		free_irq(dev->irq, dev);
out3:
	free_pages((unsigned long)ltdmabuf, get_order(1000));
out2:
	release_region(io, 8);
out1:
	free_netdev(dev);
out:
	return ERR_PTR(err);
}
Beispiel #11
0
static ssize_t pp_read (struct file * file, char * buf, size_t count,
			loff_t * ppos)
{
	unsigned int minor = MINOR (file->f_dentry->d_inode->i_rdev);
	struct pp_struct *pp = file->private_data;
	char * kbuffer;
	ssize_t bytes_read = 0;
	struct parport *pport;
	int mode;

	if (!(pp->flags & PP_CLAIMED)) {
		/* Don't have the port claimed */
		printk (KERN_DEBUG CHRDEV "%x: claim the port first\n",
			minor);
		return -EINVAL;
	}

	/* Trivial case. */
	if (count == 0)
		return 0;

	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
	if (!kbuffer) {
		return -ENOMEM;
	}
	pport = pp->pdev->port;
	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);

	parport_set_timeout (pp->pdev,
			     (file->f_flags & O_NONBLOCK) ?
			     PARPORT_INACTIVITY_O_NONBLOCK :
			     pp->default_inactivity);

	while (bytes_read == 0) {
		ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);

		if (mode == IEEE1284_MODE_EPP) {
			/* various specials for EPP mode */
			int flags = 0;
			size_t (*fn)(struct parport *, void *, size_t, int);

			if (pp->flags & PP_W91284PIC) {
				flags |= PARPORT_W91284PIC;
			}
			if (pp->flags & PP_FASTREAD) {
				flags |= PARPORT_EPP_FAST;
			}
			if (pport->ieee1284.mode & IEEE1284_ADDR) {
				fn = pport->ops->epp_read_addr;
			} else {
				fn = pport->ops->epp_read_data;
			}
			bytes_read = (*fn)(pport, kbuffer, need, flags);
		} else {
			bytes_read = parport_read (pport, kbuffer, need);
		}

		if (bytes_read != 0)
			break;

		if (file->f_flags & O_NONBLOCK) {
			bytes_read = -EAGAIN;
			break;
		}

		if (signal_pending (current)) {
			bytes_read = -ERESTARTSYS;
			break;
		}

		if (current->need_resched)
			schedule ();
	}

	parport_set_timeout (pp->pdev, pp->default_inactivity);

	if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
		bytes_read = -EFAULT;

	kfree (kbuffer);
	pp_enable_irq (pp);
	return bytes_read;
}
Beispiel #12
0
int xbuf_wait(void *word)
{
	schedule();
	return 0;
}
Beispiel #13
0
asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz,
			    long msgtyp, int msgflg)
{
	struct msg_queue *msq;
	struct msg_receiver msr_d;
	struct list_head* tmp;
	struct msg_msg* msg, *found_msg;
	int err;
	int mode;

	if (msqid < 0 || (long) msgsz < 0)
		return -EINVAL;
	mode = convert_mode(&msgtyp,msgflg);

	msq = msg_lock(msqid);
	if(msq==NULL)
		return -EINVAL;
retry:
	err = -EIDRM;
	if (msg_checkid(msq,msqid))
		goto out_unlock;

	err=-EACCES;
	if (ipcperms (&msq->q_perm, S_IRUGO))
		goto out_unlock;

	tmp = msq->q_messages.next;
	found_msg=NULL;
	while (tmp != &msq->q_messages) {
		msg = list_entry(tmp,struct msg_msg,m_list);
		if(testmsg(msg,msgtyp,mode) &&
		   !security_msg_queue_msgrcv(msq, msg, current, msgtyp, mode)) {
			found_msg = msg;
			if(mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
				found_msg=msg;
				msgtyp=msg->m_type-1;
			} else {
				found_msg=msg;
				break;
			}
		}
		tmp = tmp->next;
	}
	if(found_msg) {
		msg=found_msg;
		if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
			err=-E2BIG;
			goto out_unlock;
		}
		list_del(&msg->m_list);
		msq->q_qnum--;
		msq->q_rtime = get_seconds();
		msq->q_lrpid = current->tgid;
		msq->q_cbytes -= msg->m_ts;
		atomic_sub(msg->m_ts,&msg_bytes);
		atomic_dec(&msg_hdrs);
		ss_wakeup(&msq->q_senders,0);
		msg_unlock(msq);
out_success:
		msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
		if (put_user (msg->m_type, &msgp->mtype) ||
		    store_msg(msgp->mtext, msg, msgsz)) {
			    msgsz = -EFAULT;
		}
		free_msg(msg);
		return msgsz;
	} else
	{
		/* no message waiting. Prepare for pipelined
		 * receive.
		 */
		if (msgflg & IPC_NOWAIT) {
			err=-ENOMSG;
			goto out_unlock;
		}
		list_add_tail(&msr_d.r_list,&msq->q_receivers);
		msr_d.r_tsk = current;
		msr_d.r_msgtype = msgtyp;
		msr_d.r_mode = mode;
		if(msgflg & MSG_NOERROR)
			msr_d.r_maxsize = INT_MAX;
		 else
		 	msr_d.r_maxsize = msgsz;
		msr_d.r_msg = ERR_PTR(-EAGAIN);
		current->state = TASK_INTERRUPTIBLE;
		msg_unlock(msq);

		schedule();

		/*
		 * The below optimisation is buggy.  A sleeping thread that is
		 * woken up checks if it got a message and if so, copies it to
		 * userspace and just returns without taking any locks.
		 * But this return to user space can be faster than the message
		 * send, and if the receiver immediately exits the
		 * wake_up_process performed by the sender will oops.
		 */
#if 0
		msg = (struct msg_msg*) msr_d.r_msg;
		if(!IS_ERR(msg)) 
			goto out_success;
#endif

		msq = msg_lock(msqid);
		msg = (struct msg_msg*)msr_d.r_msg;
		if(!IS_ERR(msg)) {
			/* our message arived while we waited for
			 * the spinlock. Process it.
			 */
			if(msq)
				msg_unlock(msq);
			goto out_success;
		}
		err = PTR_ERR(msg);
		if(err == -EAGAIN) {
			if(!msq)
				BUG();
			list_del(&msr_d.r_list);
			if (signal_pending(current))
				err=-EINTR;
			 else
				goto retry;
		}
	}
out_unlock:
	if(msq)
		msg_unlock(msq);
	return err;
}
Beispiel #14
0
asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg)
{
	struct msg_queue *msq;
	struct msg_msg *msg;
	long mtype;
	int err;
	
	if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
		return -EINVAL;
	if (get_user(mtype, &msgp->mtype))
		return -EFAULT; 
	if (mtype < 1)
		return -EINVAL;

	msg = load_msg(msgp->mtext, msgsz);
	if(IS_ERR(msg))
		return PTR_ERR(msg);

	msg->m_type = mtype;
	msg->m_ts = msgsz;

	msq = msg_lock(msqid);
	err=-EINVAL;
	if(msq==NULL)
		goto out_free;
retry:
	err= -EIDRM;
	if (msg_checkid(msq,msqid))
		goto out_unlock_free;

	err=-EACCES;
	if (ipcperms(&msq->q_perm, S_IWUGO)) 
		goto out_unlock_free;

	err = security_msg_queue_msgsnd(msq, msg, msgflg);
	if (err)
		goto out_unlock_free;

	if(msgsz + msq->q_cbytes > msq->q_qbytes ||
		1 + msq->q_qnum > msq->q_qbytes) {
		struct msg_sender s;

		if(msgflg&IPC_NOWAIT) {
			err=-EAGAIN;
			goto out_unlock_free;
		}
		ss_add(msq, &s);
		msg_unlock(msq);
		schedule();
		current->state= TASK_RUNNING;

		msq = msg_lock(msqid);
		err = -EIDRM;
		if(msq==NULL)
			goto out_free;
		ss_del(&s);
		
		if (signal_pending(current)) {
			err=-EINTR;
			goto out_unlock_free;
		}
		goto retry;
	}

	msq->q_lspid = current->tgid;
	msq->q_stime = get_seconds();

	if(!pipelined_send(msq,msg)) {
		/* noone is waiting for this message, enqueue it */
		list_add_tail(&msg->m_list,&msq->q_messages);
		msq->q_cbytes += msgsz;
		msq->q_qnum++;
		atomic_add(msgsz,&msg_bytes);
		atomic_inc(&msg_hdrs);
	}
	
	err = 0;
	msg = NULL;

out_unlock_free:
	msg_unlock(msq);
out_free:
	if(msg!=NULL)
		free_msg(msg);
	return err;
}
Beispiel #15
0
// __do_exit - cause a thread exit (use do_exit, do_exit_thread instead)
//   1. call exit_mmap & put_pgdir & mm_destroy to free the almost all memory space of process
//   2. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself.
//   3. call scheduler to switch to other process
static int __do_exit(void)
{
	if (current == idleproc) {
		panic("idleproc exit.\n");
	}
	if (current == initproc) {
		panic("initproc exit.\n");
	}

	struct mm_struct *mm = current->mm;
	if (mm != NULL) {
		mp_set_mm_pagetable(NULL);
		if (mm_count_dec(mm) == 0) {
			exit_mmap(mm);
			put_pgdir(mm);
			bool intr_flag;
			local_intr_save(intr_flag);
			{
				list_del(&(mm->proc_mm_link));
			}
			local_intr_restore(intr_flag);
			mm_destroy(mm);
		}
		current->mm = NULL;
	}
	put_sighand(current);
	put_signal(current);
	put_fs(current);
	put_sem_queue(current);
	current->state = PROC_ZOMBIE;

	bool intr_flag;
	struct proc_struct *proc, *parent;
	local_intr_save(intr_flag);
	{
		proc = parent = current->parent;
		do {
			if (proc->wait_state == WT_CHILD) {
				wakeup_proc(proc);
			}
			proc = next_thread(proc);
		} while (proc != parent);

		if ((parent = next_thread(current)) == current) {
			parent = initproc;
		}
		de_thread(current);
		while (current->cptr != NULL) {
			proc = current->cptr;
			current->cptr = proc->optr;

			proc->yptr = NULL;
			if ((proc->optr = parent->cptr) != NULL) {
				parent->cptr->yptr = proc;
			}
			proc->parent = parent;
			parent->cptr = proc;
			if (proc->state == PROC_ZOMBIE) {
				if (parent->wait_state == WT_CHILD) {
					wakeup_proc(parent);
				}
			}
		}
	}

	wakeup_queue(&(current->event_box.wait_queue), WT_INTERRUPTED, 1);

	local_intr_restore(intr_flag);

	schedule();
	panic("__do_exit will not return!! %d %d.\n", current->pid,
	      current->exit_code);
}
static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr)
{
	unsigned long flags;
	unsigned int i;
	unsigned char tmp;
	int ret = 0;
	DECLARE_WAITQUEUE(wait, current);
#ifdef DEBUG
	int loopcount=0;
#endif
	/* No need to ratelimit this. Only root can trigger it anyway */
	if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
		printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n",
			count,sizeof(struct st_ram_io) + sizeof(struct mailbox));
		return -EINVAL;
	}
	
	while(1) {
		/* Stick ourself on the wait queue */
		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&FlagSleepRec, &wait);
		
		/* Scan each board, looking for one which has a packet for us */
		for (i=0; i < MAX_BOARD; i++) {
			if (!apbs[i].RamIO)
				continue;
			spin_lock_irqsave(&apbs[i].mutex, flags);
			
			tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY);
			
			if (tmp == 2) {
				struct st_ram_io st_loc;
				struct mailbox mailbox;

				/* Got a packet for us */
				ret = do_ac_read(i, buf, &st_loc, &mailbox);
				spin_unlock_irqrestore(&apbs[i].mutex, flags);
				set_current_state(TASK_RUNNING);
				remove_wait_queue(&FlagSleepRec, &wait);

				if (copy_to_user(buf, &st_loc, sizeof(st_loc)))
					return -EFAULT;
				if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox)))
					return -EFAULT;
				return tmp;
			}
			
			if (tmp > 2) {
				/* Got an error */
				Dummy = readb(apbs[i].RamIO + VERS);
				
				spin_unlock_irqrestore(&apbs[i].mutex, flags);
				set_current_state(TASK_RUNNING);
				remove_wait_queue(&FlagSleepRec, &wait);
				
				printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n",
				       i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
				DeviceErrorCount++;
				return -EIO;
			}
			
			/* Nothing for us. Try the next board */
			Dummy = readb(apbs[i].RamIO + VERS);
			spin_unlock_irqrestore(&apbs[i].mutex, flags);
			
		} /* per board */

		/* OK - No boards had data for us. Sleep now */

		schedule();
		remove_wait_queue(&FlagSleepRec, &wait);

		if (signal_pending(current))
			return -EINTR;

#ifdef DEBUG
		if (loopcount++ > 2) {
			printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount);
		}
#endif
	} 
}
Beispiel #17
0
/*
 * wait_on_bit() sleep function for uninterruptible waiting
 */
int fscache_wait_bit(void *flags)
{
	schedule();
	return 0;
}
Beispiel #18
0
void
IOManagerEPoll::idle()
{
    epoll_event events[64];
    while (true) {
        unsigned long long nextTimeout;
        if (stopping(nextTimeout))
            return;
        int rc = -1;
        errno = EINTR;
        while (rc < 0 && errno == EINTR) {
            int timeout = -1;
            if (nextTimeout != ~0ull)
                timeout = (int)(nextTimeout / 1000) + 1;
            rc = epoll_wait(m_epfd, events, 64, timeout);
            if (rc < 0 && errno == EINTR)
                nextTimeout = nextTimer();
        }
        MORDOR_LOG_LEVEL(g_log, rc < 0 ? Log::ERROR : Log::VERBOSE) << this
            << " epoll_wait(" << m_epfd << "): " << rc << " (" << errno << ")";
        if (rc < 0)
            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("epoll_wait");
        std::vector<boost::function<void ()> > expired = processTimers();
        schedule(expired.begin(), expired.end());

        for(int i = 0; i < rc; ++i) {
            epoll_event &event = events[i];
            if (event.data.fd == m_tickleFds[0]) {
                unsigned char dummy;
                int rc2 = read(m_tickleFds[0], &dummy, 1);
                MORDOR_VERIFY(rc2 == 1);
                MORDOR_LOG_VERBOSE(g_log) << this << " received tickle";
                continue;
            }
            bool err = event.events & (EPOLLERR | EPOLLHUP);
            boost::mutex::scoped_lock lock(m_mutex);
            std::map<int, AsyncEvent>::iterator it =
m_pendingEvents.find(event.data.fd);
            if (it == m_pendingEvents.end())
                continue;
            AsyncEvent &e = it->second;
            MORDOR_LOG_TRACE(g_log) << " epoll_event {"
                << (epoll_events_t)event.events << ", " << event.data.fd
                << "}, registered for " << (epoll_events_t)e.event.events;
            if ((event.events & EPOLLERR) && (e.event.events & EPOLLERR)) {
                if (e.m_dgError)
                    e.m_schedulerError->schedule(e.m_dgError);
                else
                    e.m_schedulerError->schedule(e.m_fiberError);
                // Block other events from firing
                e.m_dgError = NULL;
                e.m_fiberError.reset();
                e.m_dgIn = NULL;
                e.m_fiberIn.reset();
                e.m_dgOut = NULL;
                e.m_fiberOut.reset();
                event.events = 0;
                e.event.events = 0;
            }
            if ((event.events & EPOLLHUP) && (e.event.events & EPOLLHUP)) {
                if (e.m_dgClose)
                    e.m_schedulerError->schedule(e.m_dgClose);
                else
                    e.m_schedulerError->schedule(e.m_fiberClose);
                // Block write event from firing
                e.m_dgOut = NULL;
                e.m_fiberOut.reset();
                e.m_dgClose = NULL;
                e.m_fiberClose.reset();
                event.events &= EPOLLOUT;
                e.event.events &= EPOLLOUT;
                err = false;
            }

            if (((event.events & EPOLLIN) ||
                err) && (e.event.events & EPOLLIN)) {
                if (e.m_dgIn)
                    e.m_schedulerIn->schedule(e.m_dgIn);
                else
                    e.m_schedulerIn->schedule(e.m_fiberIn);
                e.m_dgIn = NULL;
                e.m_fiberIn.reset();
                event.events |= EPOLLIN;
            }
            if (((event.events & EPOLLOUT) ||
                err) && (e.event.events & EPOLLOUT)) {
                if (e.m_dgOut)
                    e.m_schedulerOut->schedule(e.m_dgOut);
                else
                    e.m_schedulerOut->schedule(e.m_fiberOut);
                e.m_dgOut = NULL;
                e.m_fiberOut.reset();
                event.events |= EPOLLOUT;
            }
            e.event.events &= ~event.events;

            int op = e.event.events == 0 ? EPOLL_CTL_DEL : EPOLL_CTL_MOD;
            int rc2 = epoll_ctl(m_epfd, op, event.data.fd,
                &e.event);
            MORDOR_LOG_LEVEL(g_log, rc2 ? Log::ERROR : Log::VERBOSE) << this
                << " epoll_ctl(" << m_epfd << ", " << (epoll_ctl_op_t)op << ", "
                << event.data.fd << ", " << (epoll_events_t)e.event.events << "): " << rc2
                << " (" << errno << ")";
            if (rc2)
                MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("epoll_ctl");
            if (op == EPOLL_CTL_DEL)
                m_pendingEvents.erase(it);
        }
        Fiber::yield();
    }
}
Beispiel #19
0
int main(

  int   argc,
  char *argv[])

  {
  char  *id = "main";

  struct hostent *hp;
  int  go, c, errflg = 0;
  int  lockfds;
  int  t = 1;
  pid_t  pid;
  char  host[100];
  char  *homedir = PBS_SERVER_HOME;
  unsigned int port;
  char  *dbfile = "sched_out";

  struct sigaction act;
  sigset_t oldsigs;
  caddr_t curr_brk = 0;
  caddr_t next_brk;
  extern char *optarg;
  extern int optind, opterr;
  extern int rpp_fd;
  fd_set fdset;

  int  schedinit(int argc, char **argv);
  int  schedule(int com, int connector);

  glob_argv = argv;
  alarm_time = 180;

  /* The following is code to reduce security risks                */
  /* move this to a place where nss_ldap doesn't hold a socket yet */

  c = sysconf(_SC_OPEN_MAX);

  while (--c > 2)
    (void)close(c); /* close any file desc left open by parent */

  port = get_svrport(PBS_SCHEDULER_SERVICE_NAME, "tcp",
                     PBS_SCHEDULER_SERVICE_PORT);

  pbs_rm_port = get_svrport(PBS_MANAGER_SERVICE_NAME, "tcp",
                            PBS_MANAGER_SERVICE_PORT);

  strcpy(pbs_current_user, "Scheduler");

  msg_daemonname = strdup("pbs_sched");

  opterr = 0;

  while ((c = getopt(argc, argv, "L:S:R:d:p:c:a:-:")) != EOF)
    {
    switch (c)
      {

      case '-':

        if ((optarg == NULL) || (optarg[0] == '\0'))
          {
          errflg = 1;
          }

        if (!strcmp(optarg, "version"))
          {
          fprintf(stderr, "version: %s\n", PACKAGE_VERSION);
          exit(0);
          }
        else
          {
          errflg = 1;
          }

        break;

      case 'L':
        logfile = optarg;
        break;

      case 'S':
        port = atoi(optarg);

        if (port == 0)
          {
          fprintf(stderr,
                  "%s: illegal port\n", optarg);
          errflg = 1;
          }

        break;

      case 'R':

        if ((pbs_rm_port = atoi(optarg)) == 0)
          {
          (void)fprintf(stderr, "%s: bad -R %s\n",
                        argv[0], optarg);
          return 1;
          }

        break;

      case 'd':
        homedir = optarg;
        break;

      case 'p':
        dbfile = optarg;
        break;

      case 'c':
        configfile = optarg;
        break;

      case 'a':
        alarm_time = atoi(optarg);

        if (alarm_time == 0)
          {
          fprintf(stderr,
                  "%s: bad alarm time\n", optarg);
          errflg = 1;
          }

        break;

      case '?':
        errflg = 1;
        break;
      }
    }

  if (errflg)
    {
    fprintf(stderr, "usage: %s %s\n", argv[0], usage);
    exit(1);
    }

#ifndef DEBUG
  if (IamRoot() == 0)
    {
        return (1);
    }
#endif        /* DEBUG */

  /* Save the original working directory for "restart" */
  if ((oldpath = getcwd((char *)NULL, MAXPATHLEN)) == NULL)
    {
    fprintf(stderr, "cannot get current working directory\n");
    exit(1);
    }

  (void)sprintf(log_buffer, "%s/sched_priv", homedir);
#if !defined(DEBUG) && !defined(NO_SECURITY_CHECK)
  c  = chk_file_sec(log_buffer, 1, 0, S_IWGRP | S_IWOTH, 1, NULL);
  c |= chk_file_sec(PBS_ENVIRON, 0, 0, S_IWGRP | S_IWOTH, 0, NULL);

  if (c != 0) exit(1);

#endif  /* not DEBUG and not NO_SECURITY_CHECK */
  if (chdir(log_buffer) == -1)
    {
    perror("chdir");
    exit(1);
    }

  (void)sprintf(path_log,   "%s/sched_logs", homedir);
  (void)sprintf(path_acct,   "%s/%s", log_buffer, PBS_ACCT);


  /* The following is code to reduce security risks                */
  /* start out with standard umask, system resource limit infinite */

  umask(022);

  if (setup_env(PBS_ENVIRON) == -1)
    exit(1);

  c = getgid();

  (void)setgroups(1, (gid_t *)&c); /* secure suppl. groups */

#ifndef DEBUG
#ifdef _CRAY
  (void)limit(C_JOB,      0, L_CPROC, 0);

  (void)limit(C_JOB,      0, L_CPU,   0);

  (void)limit(C_JOBPROCS, 0, L_CPU,   0);

  (void)limit(C_PROC,     0, L_FD,  255);

  (void)limit(C_JOB,      0, L_FSBLK, 0);

  (void)limit(C_JOBPROCS, 0, L_FSBLK, 0);

  (void)limit(C_JOB,      0, L_MEM  , 0);

  (void)limit(C_JOBPROCS, 0, L_MEM  , 0);

#else /* not  _CRAY */
    {

    struct rlimit rlimit;

    rlimit.rlim_cur = RLIM_INFINITY;
    rlimit.rlim_max = RLIM_INFINITY;
    (void)setrlimit(RLIMIT_CPU,   &rlimit);
    (void)setrlimit(RLIMIT_FSIZE, &rlimit);
    (void)setrlimit(RLIMIT_DATA,  &rlimit);
    (void)setrlimit(RLIMIT_STACK, &rlimit);
#ifdef  RLIMIT_RSS
    (void)setrlimit(RLIMIT_RSS  , &rlimit);
#endif  /* RLIMIT_RSS */
#ifdef  RLIMIT_VMEM
    (void)setrlimit(RLIMIT_VMEM  , &rlimit);
#endif  /* RLIMIT_VMEM */
    }
#endif /* not _CRAY */
#endif /* DEBUG */

  if (log_open(logfile, path_log) == -1)
    {
    fprintf(stderr, "%s: logfile could not be opened\n", argv[0]);
    exit(1);
    }

  if (gethostname(host, sizeof(host)) == -1)
    {
    log_err(errno, id, "gethostname");
    die(0);
    }

  if ((hp = gethostbyname(host)) == NULL)
    {
    log_err(errno, id, "gethostbyname");
    die(0);
    }

  if ((server_sock = socket(AF_INET, SOCK_STREAM, 0)) < 0)
    {
    log_err(errno, id, "socket");
    die(0);
    }

  if (setsockopt(server_sock, SOL_SOCKET, SO_REUSEADDR,
                 (char *)&t, sizeof(t)) == -1)
    {
    log_err(errno, id, "setsockopt");
    die(0);
    }

  saddr.sin_family = AF_INET;

  saddr.sin_port = htons(port);
  memcpy(&saddr.sin_addr, hp->h_addr, hp->h_length);

  if (bind(server_sock, (struct sockaddr *)&saddr, sizeof(saddr)) < 0)
    {
    log_err(errno, id, "bind");
    die(0);
    }

  if (listen(server_sock, 5) < 0)
    {
    log_err(errno, id, "listen");
    die(0);
    }

  okclients = (pbs_net_t *)calloc(START_CLIENTS, sizeof(pbs_net_t));

  addclient("localhost");   /* who has permission to call MOM */
  addclient(host);

  if (configfile)
    {
    if (read_config(configfile) != 0)
      die(0);
    }

  lockfds = open("sched.lock", O_CREAT | O_TRUNC | O_WRONLY, 0644);

  if (lockfds < 0)
    {
    log_err(errno, id, "open lock file");
    exit(1);
    }

  lock_out(lockfds, F_WRLCK);

  fullresp(0);

  if (sigemptyset(&allsigs) == -1)
    {
    perror("sigemptyset");
    exit(1);
    }

  if (sigprocmask(SIG_SETMASK, &allsigs, NULL) == -1)   /* unblock */
    {
    perror("sigprocmask");
    exit(1);
    }

  act.sa_flags = 0;

  sigaddset(&allsigs, SIGHUP);    /* remember to block these */
  sigaddset(&allsigs, SIGINT);    /* during critical sections */
  sigaddset(&allsigs, SIGTERM);   /* so we don't get confused */
  act.sa_mask = allsigs;

  act.sa_handler = restart;       /* do a restart on SIGHUP */
  sigaction(SIGHUP, &act, NULL);

  act.sa_handler = toolong; /* handle an alarm call */
  sigaction(SIGALRM, &act, NULL);

  act.sa_handler = die;           /* bite the biscuit for all following */
  sigaction(SIGINT, &act, NULL);
  sigaction(SIGTERM, &act, NULL);

  /*
   * Catch these signals to ensure we core dump even if
   * our rlimit for core dumps is set to 0 initially.
   *
   * Chris Samuel - VPAC
   * [email protected] - 29th July 2003
   *
   * Now conditional on the PBSCOREDUMP environment variable
   */

  if (getenv("PBSCOREDUMP"))
    {
    act.sa_handler = catch_abort;   /* make sure we core dump */

    sigaction(SIGSEGV, &act, NULL);
    sigaction(SIGBUS, &act, NULL);
    sigaction(SIGFPE, &act, NULL);
    sigaction(SIGILL, &act, NULL);
    sigaction(SIGTRAP, &act, NULL);
    sigaction(SIGSYS, &act, NULL);
    }

  /*
   *  Local initialization stuff
   */

  if (schedinit(argc, argv))
    {
    (void) sprintf(log_buffer,
                   "local initialization failed, terminating");
    log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer);
    exit(1);
    }

  if (getenv("PBSDEBUG") == NULL)
    {
    lock_out(lockfds, F_UNLCK);

#ifdef DISABLE_DAEMONS
    pid = getpid();
#else
    if ((pid = fork()) == -1)
      {
      /* error on fork */
      perror("fork");

      exit(1);
      }
    else
    if (pid > 0)               /* parent exits */
      {
      exit(0);
      }

    if ((pid = setsid()) == -1)
      {
      perror("setsid");

      exit(1);
      }
#endif  /*  DISABLE_DAEMONS  */

    lock_out(lockfds, F_WRLCK);

    if (freopen(dbfile, "a", stdout) == NULL)
      {
      perror("opening lockfile");

      exit(1);
      }


    setvbuf(stdout, NULL, _IOLBF, 0);

    dup2(fileno(stdout), fileno(stderr));
    }
  else
    {
    setvbuf(stdout, NULL, _IOLBF, 0);
    setvbuf(stderr, NULL, _IOLBF, 0);

    pid = getpid();
    }

  if (freopen("/dev/null", "r", stdin) == NULL)
    {
    perror("opening /dev/null");

    exit(1);
    }

  /* write scheduler's pid into lockfile */

  (void)sprintf(log_buffer, "%ld\n", (long)pid);

  if (write(lockfds, log_buffer, strlen(log_buffer) + 1) != (ssize_t)(strlen(log_buffer) + 1))
    {
    perror("writing to lockfile");

    exit(1);
    }

#if (PLOCK_DAEMONS & 2)
  (void)plock(PROCLOCK); /* lock daemon into memory */

#endif

  sprintf(log_buffer, "%s startup pid %ld", argv[0], (long)pid);

  log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer);

  FD_ZERO(&fdset);

  for (go = 1;go;)
    {
    int cmd;

    if (rpp_fd != -1)
      FD_SET(rpp_fd, &fdset);

    FD_SET(server_sock, &fdset);

    if (select(FD_SETSIZE, &fdset, NULL, NULL, NULL) == -1)
      {
      if (errno != EINTR)
        {
        log_err(errno, id, "select");
        die(0);
        }

      continue;
      }

    if (rpp_fd != -1 && FD_ISSET(rpp_fd, &fdset))
      {
      if (rpp_io() == -1)
        log_err(errno, id, "rpp_io");
      }

    if (!FD_ISSET(server_sock, &fdset))
      continue;

    cmd = server_command();

    if (sigprocmask(SIG_BLOCK, &allsigs, &oldsigs) == -1)
      log_err(errno, id, "sigprocmaskSIG_BLOCK)");

    alarm(alarm_time);

    if (schedule(cmd, connector)) /* magic happens here */
      go = 0;

    alarm(0);

    if (connector >= 0 && server_disconnect(connector))
      {
      log_err(errno, id, "server_disconnect");
      die(0);
      }

    next_brk = (caddr_t)sbrk(0);

    if (next_brk > curr_brk)
      {
      sprintf(log_buffer, "brk point %ld", (long)next_brk);
      log_record(PBSEVENT_DEBUG, PBS_EVENTCLASS_SERVER,
                 id, log_buffer);
      curr_brk = next_brk;
      }

    if (sigprocmask(SIG_SETMASK, &oldsigs, NULL) == -1)
      log_err(errno, id, "sigprocmask(SIG_SETMASK)");
    }

  sprintf(log_buffer, "%s normal finish pid %ld",

          argv[0],
          (long)pid);

  log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer);

  close(server_sock);

  exit(0);
  }  /* END main() */
Beispiel #20
0
static int
splat_condvar_test1(struct file *file, void *arg)
{
	int i, count = 0, rc = 0;
	condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
	condvar_priv_t cv;

	cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
	cv.cv_file = file;
	mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
	cv_init(&cv.cv_condvar, NULL, CV_DEFAULT, NULL);

	/* Create some threads, the exact number isn't important just as
	 * long as we know how many we managed to create and should expect. */
	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
		ct[i].ct_cvp = &cv;
		ct[i].ct_name = SPLAT_CONDVAR_TEST1_NAME;
		ct[i].ct_rc = 0;
		ct[i].ct_thread = spl_kthread_create(splat_condvar_test12_thread,
		    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);

		if (!IS_ERR(ct[i].ct_thread)) {
			wake_up_process(ct[i].ct_thread);
			count++;
		}
	}

	/* Wait until all threads are waiting on the condition variable */
	while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
		schedule();

	/* Wake a single thread at a time, wait until it exits */
	for (i = 1; i <= count; i++) {
		cv_signal(&cv.cv_condvar);

		while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
			schedule();

		/* Correct behavior 1 thread woken */
		if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
			continue;

                splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Attempted to "
			   "wake %d thread but work %d threads woke\n",
			   1, count - atomic_read(&cv.cv_condvar.cv_waiters));
		rc = -EINVAL;
		break;
	}

	if (!rc)
                splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Correctly woke "
			   "%d sleeping threads %d at a time\n", count, 1);

	/* Wait until that last nutex is dropped */
	while (mutex_owner(&cv.cv_mtx))
		schedule();

	/* Wake everything for the failure case */
	cv_broadcast(&cv.cv_condvar);
	cv_destroy(&cv.cv_condvar);

	/* wait for threads to exit */
	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
		if (!IS_ERR(ct[i].ct_thread))
			kthread_stop(ct[i].ct_thread);
	}
	mutex_destroy(&cv.cv_mtx);

	return rc;
}
Beispiel #21
0
int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, s32 *ctl_len,
			char *data_buf, int data_maxlen, s32 *data_len, int *flags_p)
{
	int error;
	int oldflags;
	struct file *filp;
	struct inode *ino;
	struct sol_socket_struct *sock;
	struct T_unitdata_ind udi;
	mm_segment_t old_fs = get_fs();
	long args[6];
	char *tmpbuf;
	int tmplen;
	int (*sys_socketcall)(int, unsigned long *) =
		(int (*)(int, unsigned long *))SYS(socketcall);
	int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *);
	
	SOLD("entry");
	SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
	read_lock(&current->files->file_lock);
	filp = fcheck(fd);
	read_unlock(&current->files->file_lock);
	if (!filp)
		return -EBADF;
	ino = filp->f_dentry->d_inode;
	sock = (struct sol_socket_struct *)filp->private_data;
	SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL));
	if ( ctl_maxlen > 0 && !sock->pfirst && ino->u.socket_i.type == SOCK_STREAM
		&& sock->state == TS_IDLE) {
		SOLD("calling LISTEN");
		args[0] = fd;
		args[1] = -1;
		set_fs(KERNEL_DS);
		sys_socketcall(SYS_LISTEN, args);
		set_fs(old_fs);
		SOLD("LISTEN done");
	}
	if (!(filp->f_flags & O_NONBLOCK)) {
		poll_table wait_table, *wait;

		poll_initwait(&wait_table);
		wait = &wait_table;
		for(;;) {
			SOLD("loop");
			set_current_state(TASK_INTERRUPTIBLE);
			/* ! ( l<0 || ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( ! l<0 && ! ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( l>=0 && ( ! l>=0 || ! ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( l>=0 && ( l<0 || ( pfirst && ! (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( l>=0 && ( l<0 || ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) ) */ 
			/* ( l>=0 && ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) */ 
			if (ctl_maxlen >= 0 && sock->pfirst && (*flags_p != MSG_HIPRI || sock->pfirst->pri == MSG_HIPRI))
				break;
			SOLD("cond 1 passed");
			if (
			#if 1
				*flags_p != MSG_HIPRI &&
			#endif
				((filp->f_op->poll(filp, wait) & POLLIN) ||
				(filp->f_op->poll(filp, NULL) & POLLIN) ||
				signal_pending(current))
			) {
				break;
			}
			if( *flags_p == MSG_HIPRI ) {
				SOLD("avoiding lockup");
				break ;
			}
			if(wait_table.error) {
				SOLD("wait-table error");
				poll_freewait(&wait_table);
				return wait_table.error;
			}
			SOLD("scheduling");
			schedule();
		}
		SOLD("loop done");
		current->state = TASK_RUNNING;
		poll_freewait(&wait_table);
		if (signal_pending(current)) {
			SOLD("signal pending");
			return -EINTR;
		}
	}
	if (ctl_maxlen >= 0 && sock->pfirst) {
		struct T_primsg *it = sock->pfirst;
		int l = min_t(int, ctl_maxlen, it->length);
		SCHECK_MAGIC((char*)((u64)(((char *)&it->type)+sock->offset+it->length+7)&~7),MKCTL_MAGIC);
		SOLD("purting ctl data");
		if(copy_to_user(ctl_buf,
			(char*)&it->type + sock->offset, l))
			return -EFAULT;
		SOLD("pur it");
		if(put_user(l, ctl_len))
			return -EFAULT;
		SOLD("set ctl_len");
		*flags_p = it->pri;
		it->length -= l;
		if (it->length) {
			SOLD("more ctl");
			sock->offset += l;
			return MORECTL;
		} else {
			SOLD("removing message");
			sock->pfirst = it->next;
			if (!sock->pfirst)
				sock->plast = NULL;
			SOLDD(("getmsg kfree %016lx->%016lx\n", it, sock->pfirst));
			mykfree(it);
			sock->offset = 0;
			SOLD("ctl done");
			return 0;
		}
	}
	*flags_p = 0;
	if (ctl_maxlen >= 0) {
		SOLD("ACCEPT perhaps?");
		if (ino->u.socket_i.type == SOCK_STREAM && sock->state == TS_IDLE) {
			struct T_conn_ind ind;
			char *buf = getpage();
			int len = BUF_SIZE;

			SOLD("trying ACCEPT");
			if (put_user(ctl_maxlen - sizeof(ind), ctl_len))
				return -EFAULT;
			args[0] = fd;
			args[1] = (long)buf;
			args[2] = (long)&len;
			oldflags = filp->f_flags;
			filp->f_flags |= O_NONBLOCK;
			SOLD("calling ACCEPT");
			set_fs(KERNEL_DS);
			error = sys_socketcall(SYS_ACCEPT, args);
			set_fs(old_fs);
			filp->f_flags = oldflags;
			if (error < 0) {
				SOLD("some error");
				putpage(buf);
				return error;
			}
			if (error) {
				SOLD("connect");
				putpage(buf);
				if (sizeof(ind) > ctl_maxlen) {
					SOLD("generating CONN_IND");
					ind.PRIM_type = T_CONN_IND;
					ind.SRC_length = len;
					ind.SRC_offset = sizeof(ind);
					ind.OPT_length = ind.OPT_offset = 0;
					ind.SEQ_number = error;
					if(copy_to_user(ctl_buf, &ind, sizeof(ind))||
					   put_user(sizeof(ind)+ind.SRC_length,ctl_len))
						return -EFAULT;
					SOLD("CONN_IND created");
				}
				if (data_maxlen >= 0)
					put_user(0, data_len);
				SOLD("CONN_IND done");
				return 0;
			}
			if (len>ctl_maxlen) {
				SOLD("data don't fit");
				putpage(buf);
				return -EFAULT;		/* XXX - is this ok ? */
			}
			if(copy_to_user(ctl_buf,buf,len) || put_user(len,ctl_len)){
				SOLD("can't copy data");
				putpage(buf);
				return -EFAULT;
			}
			SOLD("ACCEPT done");
			putpage(buf);
		}
	}
	SOLD("checking data req");
	if (data_maxlen <= 0) {
		if (data_maxlen == 0)
			put_user(0, data_len);
		if (ctl_maxlen >= 0)
			put_user(0, ctl_len);
		return -EAGAIN;
	}
	SOLD("wants data");
	if (ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
		SOLD("udi fits");
		tmpbuf = ctl_buf + sizeof(udi);
		tmplen = ctl_maxlen - sizeof(udi);
	} else {
		SOLD("udi does not fit");
		tmpbuf = NULL;
		tmplen = 0;
	}
	if (put_user(tmplen, ctl_len))
		return -EFAULT;
	SOLD("set ctl_len");
	oldflags = filp->f_flags;
	filp->f_flags |= O_NONBLOCK;
	SOLD("calling recvfrom");
	sys_recvfrom = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
	error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr*)tmpbuf, ctl_len);
	filp->f_flags = oldflags;
	if (error < 0)
		return error;
	SOLD("error >= 0" ) ;
	if (error && ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
		SOLD("generating udi");
		udi.PRIM_type = T_UNITDATA_IND;
		get_user(udi.SRC_length, ctl_len);
		udi.SRC_offset = sizeof(udi);
		udi.OPT_length = udi.OPT_offset = 0;
		copy_to_user(ctl_buf, &udi, sizeof(udi));
		put_user(sizeof(udi)+udi.SRC_length, ctl_len);
		SOLD("udi done");
	} else
		put_user(0, ctl_len);
	put_user(error, data_len);
	SOLD("done");
	return 0;
}
static int mtd_ioctl(struct inode *inode, struct file *file,
		     u_int cmd, u_long arg)
{
	struct mtd_file_info *mfi = file->private_data;
	struct mtd_info *mtd = mfi->mtd;
	void __user *argp = (void __user *)arg;
	int ret = 0;
	u_long size;
	struct mtd_info_user info;
	struct mtd_info_user64 info64;

	DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");

	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
	if (cmd & IOC_IN) {
		if (!access_ok(VERIFY_READ, argp, size))
			return -EFAULT;
	}
	if (cmd & IOC_OUT) {
		if (!access_ok(VERIFY_WRITE, argp, size))
			return -EFAULT;
	}

	switch (cmd) {
	case MEMGETREGIONCOUNT:
		if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
			return -EFAULT;
		break;

	case MEMGETREGIONINFO:
		{
		struct region_info_user ur;
		struct mtd_erase_region_info32 mer;

		if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
			return -EFAULT;

		if (ur.regionindex >= mtd->numeraseregions)
			return -EINVAL;
		mer.offset = (u_int32_t) mtd->eraseregions[ur.regionindex].offset;
		mer.erasesize = mtd->eraseregions[ur.regionindex].erasesize;
		mer.numblocks = mtd->eraseregions[ur.regionindex].numblocks;

		if (copy_to_user(argp, &mer, sizeof(struct mtd_erase_region_info32)))
			return -EFAULT;
		/*
		if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
				sizeof(struct mtd_erase_region_info)))
			return -EFAULT;
			*/
		break;
		}

	case MEMGETREGIONINFO64:
		{
		struct region_info_user64 ur;

		if(copy_from_user(&ur, argp, sizeof(struct region_info_user64)))
			return -EFAULT;
		if (ur.regionindex >= mtd->numeraseregions)
			return -EINVAL;
		if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 
					sizeof(struct mtd_erase_region_info)))
			return -EINVAL;
		break;
		}

	case MEMGETINFO:
		info.type	= mtd->type;
		info.flags	= mtd->flags;
		info.size	= device_size(mtd);
		info.erasesize	= mtd->erasesize;
		info.writesize	= mtd->writesize;
		info.oobsize	= mtd->oobsize;
		info.ecctype	= mtd->ecctype;
		info.eccsize	= mtd->eccsize;
		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 
			return -EFAULT;
		break;

	case MEMGETINFO64:
		info64.type		= mtd->type;
		info64.flags		= mtd->flags;
		info64.size		= device_size(mtd);
		info64.erasesize	= mtd->erasesize;
		info64.writesize	= mtd->writesize;
		info64.oobsize		= mtd->oobsize;
		info64.ecctype		= mtd->ecctype;
		info64.eccsize		= mtd->eccsize;
		if (copy_to_user(argp, &info64, sizeof(struct mtd_info_user64)))
			return -EFAULT;
		break;

	case MEMERASE:
	{
		struct erase_info32 *erase; /* Backward compatible older struct */
		struct erase_info *erase64; /* Actual struct sent to kernel */

		if(!(file->f_mode & 2))
			return -EPERM;

		erase=kmalloc(sizeof(struct erase_info32),GFP_KERNEL);
		erase64 = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
		if (!erase || !erase64)
			ret = -ENOMEM;
		else {
			wait_queue_head_t waitq;
			DECLARE_WAITQUEUE(wait, current);

			init_waitqueue_head(&waitq);

			memset (erase,0,sizeof(struct erase_info32));
			if (copy_from_user(&erase->addr, argp,
				    sizeof(struct erase_info_user))) {
				kfree(erase);
				kfree(erase64);
				return -EFAULT;
			}
			/* Send an erase64 to the kernel mtd layer */
			erase64->addr = (uint64_t) erase->addr;
			erase64->len = erase->len;

			erase64->mtd = mtd;
			erase64->callback = mtdchar_erase_callback;
			erase64->priv = (unsigned long)&waitq;

			/*
			  FIXME: Allow INTERRUPTIBLE. Which means
			  not having the wait_queue head on the stack.

			  If the wq_head is on the stack, and we
			  leave because we got interrupted, then the
			  wq_head is no longer there when the
			  callback routine tries to wake us up.
			*/
			ret = mtd->erase(mtd, erase64);
			if (!ret) {
				set_current_state(TASK_UNINTERRUPTIBLE);
				add_wait_queue(&waitq, &wait);
				if (erase64->state != MTD_ERASE_DONE &&
				    erase64->state != MTD_ERASE_FAILED)
					schedule();
				remove_wait_queue(&waitq, &wait);
				set_current_state(TASK_RUNNING);

				ret = (erase64->state == MTD_ERASE_FAILED)?-EIO:0;
			}
			kfree(erase);
			kfree(erase64);
		}
		break;
	}

	case MEMERASE64:
	{
		struct erase_info *erase;

		if(!(file->f_mode & 2))
			return -EPERM;

		erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
		if (!erase)
			ret = -ENOMEM;
		else {
			wait_queue_head_t waitq;
			DECLARE_WAITQUEUE(wait, current);

			init_waitqueue_head(&waitq);

			memset (erase,0,sizeof(struct erase_info));
			if (copy_from_user(&erase->addr, argp,
				    sizeof(struct erase_info_user64))) {
				kfree(erase);
				return -EFAULT;
			}
			erase->mtd = mtd;
			erase->callback = mtdchar_erase_callback;
			erase->priv = (unsigned long)&waitq;

			/*
			  FIXME: Allow INTERRUPTIBLE. Which means
			  not having the wait_queue head on the stack.

			  If the wq_head is on the stack, and we
			  leave because we got interrupted, then the
			  wq_head is no longer there when the
			  callback routine tries to wake us up.
			*/
			ret = mtd->erase(mtd, erase);
			if (!ret) {
				set_current_state(TASK_UNINTERRUPTIBLE);
				add_wait_queue(&waitq, &wait);
				if (erase->state != MTD_ERASE_DONE &&
				    erase->state != MTD_ERASE_FAILED)
					schedule();
				remove_wait_queue(&waitq, &wait);
				set_current_state(TASK_RUNNING);

				ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
			}
			kfree(erase);
		}
		break;
	}

	case MEMWRITEOOB:
	{
		struct mtd_oob_buf buf;
		struct mtd_oob_ops ops;

		if(!(file->f_mode & 2))
			return -EPERM;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->write_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_READ, buf.ptr,
					buf.length) ? 0 : EFAULT;

		if (ret)
			return ret;

		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;

		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
			kfree(ops.oobbuf);
			return -EFAULT;
		}

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->write_oob(mtd, buf.start, &ops);

		if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
				 sizeof(uint32_t)))
		{
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;

	}

	case MEMWRITEOOB64:
	{
		struct mtd_oob_buf64 buf;
		struct mtd_oob_ops ops;

		if(!(file->f_mode & 2))
			return -EPERM;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf64)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->write_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_READ, buf.ptr,
					buf.length) ? 0 : EFAULT;

		if (ret)
			return ret;

		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;

		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
			kfree(ops.oobbuf);
			return -EFAULT;
		}

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->write_oob(mtd, buf.start, &ops);

		if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
				 sizeof(uint32_t)))
		{
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;

	}

	case MEMREADOOB:
	{
		struct mtd_oob_buf buf;
		struct mtd_oob_ops ops;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->read_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_WRITE, buf.ptr,
					buf.length) ? 0 : -EFAULT;
		if (ret)
			return ret;


		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;


		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->read_oob(mtd, buf.start, &ops);

		if (put_user(ops.retlen, (uint32_t __user *)argp)) {
			ret = -EFAULT;
		}
		else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
						    ops.retlen)) {
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;
	}

	case MEMREADOOB64:
	{
		struct mtd_oob_buf64 buf;
		struct mtd_oob_ops ops;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf64)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->read_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_WRITE, buf.ptr,
					buf.length) ? 0 : -EFAULT;
		if (ret)
			return ret;


		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;


		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->read_oob(mtd, buf.start, &ops);

		if (put_user(ops.retlen, (uint32_t __user *)argp)) {
			ret = -EFAULT;
		}
		else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
						    ops.retlen)) {
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;
	}

	case MEMLOCK:
	{
		struct erase_info_user info;

		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;

		if (!mtd->lock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->lock(mtd, info.start, info.length);
		break;
	}

	case MEMUNLOCK:
	{
		struct erase_info_user info;

		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;

		if (!mtd->unlock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->unlock(mtd, info.start, info.length);
		break;
	}

	case MEMUNLOCK64:
	{
		struct erase_info_user64 info;

		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;

		if (!mtd->unlock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->unlock(mtd, info.start, info.length);
		break;
	}

	/* Legacy interface */
	case MEMGETOOBSEL:
	{
		struct nand_oobinfo32 oi;

		if (!mtd->ecclayout)
			return -EOPNOTSUPP;
		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
			return -EINVAL;

		oi.useecc = MTD_NANDECC_AUTOPLACE;
		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
		       sizeof(oi.oobfree));

		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo32)))
			return -EFAULT;
		break;
	}

	/* Legacy interface */
	case MEMGETOOBSEL64:
	{
		struct nand_oobinfo oi;

		if (!mtd->ecclayout)
			return -EOPNOTSUPP;
		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
			return -EINVAL;

		oi.useecc = MTD_NANDECC_AUTOPLACE;
		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
		       sizeof(oi.oobfree));

		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
			return -EFAULT;
		break;
	}


	case MEMGETBADBLOCK:
	{
		loff_t offs;

		if (copy_from_user(&offs, argp, sizeof(loff_t)))
			return -EFAULT;
		if (!mtd->block_isbad)
			ret = -EOPNOTSUPP;
		else
			return mtd->block_isbad(mtd, offs);
		break;
	}

	case MEMSETBADBLOCK:
	{
		loff_t offs;

		if (copy_from_user(&offs, argp, sizeof(loff_t)))
			return -EFAULT;
		if (!mtd->block_markbad)
			ret = -EOPNOTSUPP;
		else
			return mtd->block_markbad(mtd, offs);
		break;
	}

#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
	case OTPSELECT:
	{
		int mode;
		if (copy_from_user(&mode, argp, sizeof(int)))
			return -EFAULT;

		mfi->mode = MTD_MODE_NORMAL;

		ret = otp_select_filemode(mfi, mode);

		file->f_pos = 0;
		break;
	}

	case OTPGETREGIONCOUNT:
	case OTPGETREGIONINFO:
	{
		struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
		if (!buf)
			return -ENOMEM;
		ret = -EOPNOTSUPP;
		switch (mfi->mode) {
		case MTD_MODE_OTP_FACTORY:
			if (mtd->get_fact_prot_info)
				ret = mtd->get_fact_prot_info(mtd, buf, 4096);
			break;
		case MTD_MODE_OTP_USER:
			if (mtd->get_user_prot_info)
				ret = mtd->get_user_prot_info(mtd, buf, 4096);
			break;
		default:
			break;
		}
		if (ret >= 0) {
			if (cmd == OTPGETREGIONCOUNT) {
				int nbr = ret / sizeof(struct otp_info);
				ret = copy_to_user(argp, &nbr, sizeof(int));
			} else
				ret = copy_to_user(argp, buf, ret);
			if (ret)
				ret = -EFAULT;
		}
		kfree(buf);
		break;
	}

	case OTPLOCK:
	{
		struct otp_info info;

		if (mfi->mode != MTD_MODE_OTP_USER)
			return -EINVAL;
		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;
		if (!mtd->lock_user_prot_reg)
			return -EOPNOTSUPP;
		ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
		break;
	}
#endif

	case ECCGETLAYOUT:
	{
		if (!mtd->ecclayout)
			return -EOPNOTSUPP;

		if (copy_to_user(argp, &mtd->ecclayout,
				 sizeof(struct nand_ecclayout)))
			return -EFAULT;
		break;
	}

	case ECCGETSTATS:
	{
		if (copy_to_user(argp, &mtd->ecc_stats,
				 sizeof(struct mtd_ecc_stats)))
			return -EFAULT;
		break;
	}

	case MTDFILEMODE:
	{
		mfi->mode = 0;

		switch(arg) {
		case MTD_MODE_OTP_FACTORY:
		case MTD_MODE_OTP_USER:
			ret = otp_select_filemode(mfi, arg);
			break;

		case MTD_MODE_RAW:
			if (!mtd->read_oob || !mtd->write_oob)
				return -EOPNOTSUPP;
			mfi->mode = arg;

		case MTD_MODE_NORMAL:
			break;
		default:
			ret = -EINVAL;
		}
		file->f_pos = 0;
		break;
	}

	default:
		ret = -ENOTTY;
	}

	return ret;
} /* memory_ioctl */
static int dlm_recovery_wait(void *word)
{
	schedule();
	return 0;
}
Beispiel #24
0
// do_wait - wait one OR any children with PROC_ZOMBIE state, and free memory space of kernel stack
//         - proc struct of this child.
// NOTE: only after do_wait function, all resources of the child proces are free.
int do_wait(int pid, int *code_store)
{
	struct mm_struct *mm = current->mm;
	if (code_store != NULL) {
		if (!user_mem_check(mm, (uintptr_t) code_store, sizeof(int), 1)) {
			return -E_INVAL;
		}
	}

	struct proc_struct *proc, *cproc;
	bool intr_flag, haskid;
repeat:
	cproc = current;
	haskid = 0;
	if (pid != 0) {
		proc = find_proc(pid);
		if (proc != NULL) {
			do {
				if (proc->parent == cproc) {
					haskid = 1;
					if (proc->state == PROC_ZOMBIE) {
						goto found;
					}
					break;
				}
				cproc = next_thread(cproc);
			} while (cproc != current);
		}
	} else {
		do {
			proc = cproc->cptr;
			for (; proc != NULL; proc = proc->optr) {
				haskid = 1;
				if (proc->state == PROC_ZOMBIE) {
					goto found;
				}
			}
			cproc = next_thread(cproc);
		} while (cproc != current);
	}
	if (haskid) {
		current->state = PROC_SLEEPING;
		current->wait_state = WT_CHILD;
		schedule();
		may_killed();
		goto repeat;
	}
	return -E_BAD_PROC;

found:
	if (proc == idleproc || proc == initproc) {
		panic("wait idleproc or initproc.\n");
	}
	int exit_code = proc->exit_code;
	spin_lock_irqsave(&proc_lock, intr_flag);
	{
		unhash_proc(proc);
		remove_links(proc);
	}
	spin_unlock_irqrestore(&proc_lock, intr_flag);
	put_kstack(proc);
	kfree(proc);

	int ret = 0;
	if (code_store != NULL) {
		lock_mm(mm);
		{
			if (!copy_to_user
			    (mm, code_store, &exit_code, sizeof(int))) {
				ret = -E_INVAL;
			}
		}
		unlock_mm(mm);
	}
	return ret;
}
Beispiel #25
0
void schedule_merge (Ob dep) { schedule(MergeTask(dep)); }
Beispiel #26
0
int do_linux_waitpid(int pid, int *code_store)
{
	struct mm_struct *mm = current->mm;
	if (code_store != NULL) {
		if (!user_mem_check(mm, (uintptr_t) code_store, sizeof(int), 1)) {
			return -E_INVAL;
		}
	}

	struct proc_struct *proc, *cproc;
	bool intr_flag, haskid;
repeat:
	cproc = current;
	haskid = 0;
	if (pid > 0) {
		proc = find_proc(pid);
		if (proc != NULL) {
			do {
				if (proc->parent == cproc) {
					haskid = 1;
					if (proc->state == PROC_ZOMBIE) {
						goto found;
					}
					break;
				}
				cproc = next_thread(cproc);
			} while (cproc != current);
		}
	}
	/* we do NOT have group id, so.. */
	else if (pid == 0 || pid == -1) {	/* pid == 0 */
		do {
			proc = cproc->cptr;
			for (; proc != NULL; proc = proc->optr) {
				haskid = 1;
				if (proc->state == PROC_ZOMBIE) {
					goto found;
				}
			}
			cproc = next_thread(cproc);
		} while (cproc != current);
	} else {		//pid<-1
		//TODO
		return -E_INVAL;
	}
	if (haskid) {
		current->state = PROC_SLEEPING;
		current->wait_state = WT_CHILD;
		schedule();
		may_killed();
		goto repeat;
	}
	return -E_BAD_PROC;

found:
	if (proc == idleproc || proc == initproc) {
		panic("wait idleproc or initproc.\n");
	}
	int exit_code = proc->exit_code;
	int return_pid = proc->pid;
	local_intr_save(intr_flag);
	{
		unhash_proc(proc);
		remove_links(proc);
	}
	local_intr_restore(intr_flag);
	put_kstack(proc);
	kfree(proc);

	int ret = 0;
	if (code_store != NULL) {
		lock_mm(mm);
		{
			int status = exit_code << 8;
			if (!copy_to_user(mm, code_store, &status, sizeof(int))) {
				ret = -E_INVAL;
			}
		}
		unlock_mm(mm);
	}
	return (ret == 0) ? return_pid : ret;
}
Beispiel #27
0
void schedule_less (Ob lhs, Ob rhs) { schedule(PositiveOrderTask(lhs, rhs)); }
Beispiel #28
0
// init_main - the second kernel thread used to create kswapd_main & user_main kernel threads
static int init_main(void *arg)
{
	int pid;
#ifdef UCONFIG_SWAP
	if ((pid = ucore_kernel_thread(kswapd_main, NULL, 0)) <= 0) {
		panic("kswapd init failed.\n");
	}
	kswapd = find_proc(pid);
	set_proc_name(kswapd, "kswapd");
#else
	kprintf("init_main:: swapping is disabled.\n");
#endif

	int ret;
	char root[] = "disk0:";
	if ((ret = vfs_set_bootfs(root)) != 0) {
		panic("set boot fs failed: %e.\n", ret);
	}

	size_t nr_used_pages_store = nr_used_pages();
	size_t slab_allocated_store = slab_allocated();

	unsigned int nr_process_store = nr_process;

	pid = ucore_kernel_thread(user_main, NULL, 0);
	if (pid <= 0) {
		panic("create user_main failed.\n");
	}

	while (do_wait(0, NULL) == 0) {
		if (nr_process_store == nr_process) {
			break;
		}
		schedule();
	}
#ifdef UCONFIG_SWAP
	assert(kswapd != NULL);
	int i;
	for (i = 0; i < 10; i++) {
		if (kswapd->wait_state == WT_TIMER) {
			wakeup_proc(kswapd);
		}
		schedule();
	}
#endif

	mbox_cleanup();
	fs_cleanup();

	kprintf("all user-mode processes have quit.\n");
#ifdef UCONFIG_SWAP
	assert(initproc->cptr == kswapd && initproc->yptr == NULL
	       && initproc->optr == NULL);
	assert(kswapd->cptr == NULL && kswapd->yptr == NULL
	       && kswapd->optr == NULL);
	assert(nr_process == 2 + sysconf.lcpu_count);
#else
	assert(nr_process == 1 + sysconf.lcpu_count);
#endif
	assert(nr_used_pages_store == nr_used_pages());
	assert(slab_allocated_store == slab_allocated());
	kprintf("init check memory pass.\n");
	return 0;
}
Beispiel #29
0
void schedule_nullary_function (const NullaryFunction * fun)
{
    schedule(NullaryFunctionTask(*fun));
}
Beispiel #30
0
static int
get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
{
	for (;;) {
		unsigned long signr;
		struct k_sigaction *ka;

		spin_lock_irq(&current->sigmask_lock);
		signr = dequeue_signal(&current->blocked, info);
		spin_unlock_irq(&current->sigmask_lock);

		if (!signr)
			break;

		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
			/* Let the debugger run.  */
			current->exit_code = signr;
			current->state = TASK_STOPPED;
			notify_parent(current, SIGCHLD);
			schedule();

			/* We're back.  Did the debugger cancel the sig?  */
			signr = current->exit_code;
			if (signr == 0)
				continue;
			current->exit_code = 0;

			/* The debugger continued.  Ignore SIGSTOP.  */
			if (signr == SIGSTOP)
				continue;

			/* Update the siginfo structure.  Is this good?  */
			if (signr != info->si_signo) {
				info->si_signo = signr;
				info->si_errno = 0;
				info->si_code = SI_USER;
				info->si_pid = current->p_pptr->pid;
				info->si_uid = current->p_pptr->uid;
			}

			/* If the (new) signal is now blocked, requeue it.  */
			if (sigismember(&current->blocked, signr)) {
				send_sig_info(signr, info, current);
				continue;
			}
		}

		ka = &current->sig->action[signr-1];

		if (ka->sa.sa_handler == SIG_IGN) {
			if (signr != SIGCHLD)
				continue;
			/* Check for SIGCHLD: it's special.  */
			while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
				/* nothing */;
			continue;
		}

		if (ka->sa.sa_handler == SIG_DFL) {
			int exit_code = signr;

			/* Init gets no signals it doesn't want.  */
			if (current->pid == 1)
				continue;

			switch (signr) {
			case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
				continue;

			case SIGTSTP: case SIGTTIN: case SIGTTOU:
				if (is_orphaned_pgrp(current->pgrp))
					continue;
				/* FALLTHRU */

			case SIGSTOP: {
				struct signal_struct *sig;
				current->state = TASK_STOPPED;
				current->exit_code = signr;
				sig = current->p_pptr->sig;
				if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
					notify_parent(current, SIGCHLD);
				schedule();
				continue;
			}

			case SIGQUIT: case SIGILL: case SIGTRAP:
			case SIGABRT: case SIGFPE: case SIGSEGV:
			case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
				if (do_coredump(signr, regs))
					exit_code |= 0x80;
				/* FALLTHRU */

			default:
				sig_exit(signr, exit_code, info);
				/* NOTREACHED */
			}
		}
		return signr;
	}
	return 0;
}