示例#1
0
static __init void romdisk_init(void)
{
	struct blkdev * dev;
	struct romdisk * romdisk;
	u64_t size, rem;

	dev = malloc(sizeof(struct blkdev));
	if(!dev)
		return;

	romdisk = malloc(sizeof(struct romdisk));
	if(!romdisk)
	{
		free(dev);
		return;
	}

	snprintf(romdisk->name, 32, "romdisk");
	romdisk->start = (void *)__romdisk_start;
	romdisk->end = (void *)__romdisk_end;
	romdisk->busy = FALSE;

	if((romdisk->end - romdisk->start) <= 0)
	{
		free(romdisk);
		free(dev);
		return;
	}

	size = (u64_t)(romdisk->end - romdisk->start);
	rem = div64_64(&size, SZ_512);
	if(rem > 0)
		size++;

	romdisk->busy	= FALSE;

	dev->name		= romdisk->name;
	dev->type		= BLK_DEV_ROMDISK;
	dev->blksz		= SZ_512;
	dev->blkcnt		= size;
	dev->open 		= romdisk_open;
	dev->read 		= romdisk_read;
	dev->write		= romdisk_write;
	dev->ioctl 		= romdisk_ioctl;
	dev->close		= romdisk_close;
	dev->driver	 = romdisk;

	if(!register_blkdev(dev))
	{
		free(romdisk);
		free(dev);
		return;
	}
}
示例#2
0
static int
match(const struct sk_buff *skb,
      const struct net_device *in,
      const struct net_device *out,
      const struct xt_match *match,
      const void *matchinfo,
      int offset,
      unsigned int protoff,
      int *hotdrop)
{
	const struct xt_connbytes_info *sinfo = matchinfo;
	u_int64_t what = 0;	/* initialize to make gcc happy */
	u_int64_t bytes = 0;
	u_int64_t pkts = 0;
	const struct ip_conntrack_counter *counters;

	if (!(counters = nf_ct_get_counters(skb)))
		return 0; /* no match */

	switch (sinfo->what) {
	case XT_CONNBYTES_PKTS:
		switch (sinfo->direction) {
		case XT_CONNBYTES_DIR_ORIGINAL:
			what = counters[IP_CT_DIR_ORIGINAL].packets;
			break;
		case XT_CONNBYTES_DIR_REPLY:
			what = counters[IP_CT_DIR_REPLY].packets;
			break;
		case XT_CONNBYTES_DIR_BOTH:
			what = counters[IP_CT_DIR_ORIGINAL].packets;
			what += counters[IP_CT_DIR_REPLY].packets;
			break;
		}
		break;
	case XT_CONNBYTES_BYTES:
		switch (sinfo->direction) {
		case XT_CONNBYTES_DIR_ORIGINAL:
			what = counters[IP_CT_DIR_ORIGINAL].bytes;
			break;
		case XT_CONNBYTES_DIR_REPLY:
			what = counters[IP_CT_DIR_REPLY].bytes;
			break;
		case XT_CONNBYTES_DIR_BOTH:
			what = counters[IP_CT_DIR_ORIGINAL].bytes;
			what += counters[IP_CT_DIR_REPLY].bytes;
			break;
		}
		break;
	case XT_CONNBYTES_AVGPKT:
		switch (sinfo->direction) {
		case XT_CONNBYTES_DIR_ORIGINAL:
			bytes = counters[IP_CT_DIR_ORIGINAL].bytes;
			pkts  = counters[IP_CT_DIR_ORIGINAL].packets;
			break;
		case XT_CONNBYTES_DIR_REPLY:
			bytes = counters[IP_CT_DIR_REPLY].bytes;
			pkts  = counters[IP_CT_DIR_REPLY].packets;
			break;
		case XT_CONNBYTES_DIR_BOTH:
			bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
				counters[IP_CT_DIR_REPLY].bytes;
			pkts  = counters[IP_CT_DIR_ORIGINAL].packets +
				counters[IP_CT_DIR_REPLY].packets;
			break;
		}
		if (pkts != 0)
			what = div64_64(bytes, pkts);
		break;
	}

	if (sinfo->count.to)
		return (what <= sinfo->count.to && what >= sinfo->count.from);
	else
		return (what >= sinfo->count.from);
}
示例#3
0
/*
 * disk read function, just used by partition parser.
 */
loff_t disk_read(struct disk * disk, u8_t * buf, loff_t offset, loff_t count)
{
	u8_t * secbuf;
	size_t secno, secsz, seccnt;
	u64_t div, rem;
	size_t len;
	loff_t tmp;
	loff_t size = 0;

	if(!buf)
		return 0;

	if(!disk)
		return 0;

	secsz = disk->sector_size;
	if(secsz <= 0)
		return 0;

	seccnt = disk->sector_count;
	if(seccnt <= 0)
		return 0;

	tmp = secsz * seccnt;
	if( (count <= 0) || (offset < 0) || (offset >= tmp) )
		return 0;

	tmp = tmp - offset;
	if(count > tmp)
		count = tmp;

	secbuf = malloc(secsz);
	if(!secbuf)
		return 0;

	div = offset;
	rem = div64_64(&div, secsz);
	secno = div;

	if(rem > 0)
	{
		len = secsz - rem;
		if(count < len)
			len = count;

		if(disk->read_sectors(disk, secbuf, secno, 1) != 1)
		{
			free(secbuf);
			return 0;
		}

		memcpy((void *)buf, (const void *)(&secbuf[rem]), len);
		buf += len;
		count -= len;
		size += len;
		secno += 1;
	}

	div = count;
	rem = div64_64(&div, secsz);

	if(div > 0)
	{
		len = div * secsz;

		if(disk->read_sectors(disk, buf, secno, div) != div)
		{
			free(secbuf);
			return size;
		}

		buf += len;
		count -= len;
		size += len;
		secno += div;
	}

	if(count > 0)
	{
		len = count;

		if(disk->read_sectors(disk, secbuf, secno, 1) != 1)
		{
			free(secbuf);
			return size;
		}

		memcpy((void *)buf, (const void *)(&secbuf[0]), len);
		size += len;
	}

	free(secbuf);
	return size;
}
示例#4
0
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
	unsigned long nr_switches;
	unsigned long flags;
	int num_threads = 1;

	rcu_read_lock();
	if (lock_task_sighand(p, &flags)) {
		num_threads = atomic_read(&p->signal->count);
		unlock_task_sighand(p, &flags);
	}
	rcu_read_unlock();

	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
	SEQ_printf(m,
		"---------------------------------------------------------\n");
#define __P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))

	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);

	nr_switches = p->nvcsw + p->nivcsw;

#ifdef CONFIG_SCHEDSTATS
	PN(se.wait_start);
	PN(se.sleep_start);
	PN(se.block_start);
	PN(se.sleep_max);
	PN(se.block_max);
	PN(se.exec_max);
	PN(se.slice_max);
	PN(se.wait_max);
	P(sched_info.bkl_count);
	P(se.nr_migrations);
	P(se.nr_migrations_cold);
	P(se.nr_failed_migrations_affine);
	P(se.nr_failed_migrations_running);
	P(se.nr_failed_migrations_hot);
	P(se.nr_forced_migrations);
	P(se.nr_forced2_migrations);
	P(se.nr_wakeups);
	P(se.nr_wakeups_sync);
	P(se.nr_wakeups_migrate);
	P(se.nr_wakeups_local);
	P(se.nr_wakeups_remote);
	P(se.nr_wakeups_affine);
	P(se.nr_wakeups_affine_attempts);
	P(se.nr_wakeups_passive);
	P(se.nr_wakeups_idle);

	{
		u64 avg_atom, avg_per_cpu;

		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
			do_div(avg_atom, nr_switches);
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
		if (p->se.nr_migrations) {
			avg_per_cpu = div64_64(avg_per_cpu,
					       p->se.nr_migrations);
		} else {
			avg_per_cpu = -1LL;
		}

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
#endif
	__P(nr_switches);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_voluntary_switches", (long long)p->nvcsw);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_involuntary_switches", (long long)p->nivcsw);

	P(se.load.weight);
	P(policy);
	P(prio);
#undef PN
#undef __PN
#undef P
#undef __P

	{
		u64 t0, t1;

		t0 = sched_clock();
		t1 = sched_clock();
		SEQ_printf(m, "%-35s:%21Ld\n",
			   "clock-delta", (long long)(t1-t0));
	}
}