Example #1
0
static long pfair_admit_task(struct task_struct* t)
{
	lt_t quanta;
	lt_t period;
	s64  quantum_length = ktime_to_ns(tick_period);
	struct pfair_param* param;
	unsigned long i;

	/* Pfair is a tick-based method, so the time
	 * of interest is jiffies. Calculate tick-based
	 * times for everything.
	 * (Ceiling of exec cost, floor of period.)
	 */

	quanta = get_exec_cost(t);
	period = get_rt_period(t);

	quanta = time2quanta(get_exec_cost(t), CEIL);

	if (do_div(period, quantum_length))
		printk(KERN_WARNING
		       "The period of %s/%d is not a multiple of %llu.\n",
		       t->comm, t->pid, (unsigned long long) quantum_length);

	if (period >= PFAIR_MAX_PERIOD) {
		printk(KERN_WARNING
		       "PFAIR: Rejecting task %s/%d; its period is too long.\n",
		       t->comm, t->pid);
		return -EINVAL;
	}

	if (quanta == period) {
		/* special case: task has weight 1.0 */
		printk(KERN_INFO
		       "Admitting weight 1.0 task. (%s/%d, %llu, %llu).\n",
		       t->comm, t->pid, quanta, period);
		quanta = 1;
		period = 1;
	}

	param = kmalloc(sizeof(*param) +
			quanta * sizeof(struct subtask), GFP_ATOMIC);

	if (!param)
		return -ENOMEM;

	param->quanta  = quanta;
	param->cur     = 0;
	param->release = 0;
	param->period  = period;

	for (i = 0; i < quanta; i++)
		init_subtask(param->subtasks + i, i, quanta, period);

	if (t->rt_param.pfair)
		/* get rid of stale allocation */
		kfree(t->rt_param.pfair);

	t->rt_param.pfair = param;

	/* spew out some debug info */
	dump_subtasks(t);

	return 0;
}
static long long vpe_do_div(long long num, long long den)
{
    do_div(num, den);
    return num;
}
void record_probe_data(struct sock *sk, int type, size_t size, unsigned long long t_pre)
{
	char Tmp1[MAXTMPSIZE];
	int Tmp1_len;
	struct inet_sock *inet = inet_sk(sk);
	__be16 sport, dport;
	__be32 daddr, saddr;
	unsigned long long t_now;
	unsigned long nanosec_rem;
	unsigned long nanosec_rem_pre;
	t_now = sched_clock();
	nanosec_rem=do_div(t_now, 1000000000U);
	nanosec_rem_pre=do_div(t_pre, 1000000000U);

	if (!inet)
		return;

	saddr=inet->inet_rcv_saddr;
	sport=inet->inet_num;
	daddr=inet->inet_daddr;
	dport=inet->inet_dport;

	
	if (0x00000000==saddr || 0x0100007f==saddr)
		return;

	memset(Tmp1,0,sizeof(char)*MAXTMPSIZE);

	switch (type)
	{
		case 1: 
		{
			unsigned long long t_diff=t_now-t_pre;
			unsigned long nanosec_rem_diff;

			if (nanosec_rem>=nanosec_rem_pre)
				nanosec_rem_diff=nanosec_rem-nanosec_rem_pre;
			else {
				if (t_diff>0) {
					t_diff=t_diff-1;
					nanosec_rem_diff=1000000000+nanosec_rem-nanosec_rem_pre;
				} else {
					t_diff=t_pre;
					nanosec_rem_diff=nanosec_rem_pre;
				}
			}
			snprintf(Tmp1,MAXTMPSIZE,"[%05u.%09lu] UID%05d PID%05d        SEND S.IP:%03d.%03d.%03d.%03d/%05d, D.IP:%03d.%03d.%03d.%03d/%05d,%08d Bytes,D.T[%01u.%09lu]\n",
					(unsigned)t_now,nanosec_rem,
					current->cred->uid, current->pid,
					NIPQUAD(saddr),sport,
					NIPQUAD(daddr),dport,
					size,(unsigned)t_diff,nanosec_rem_diff);
			break;
		}
		case 2: 
		{
			unsigned long long t_diff=t_now-t_pre;
			unsigned long nanosec_rem_diff;

			if (nanosec_rem>=nanosec_rem_pre)
				nanosec_rem_diff=nanosec_rem-nanosec_rem_pre;
			else {
				if (t_diff>0) {
					t_diff=t_diff-1;
					nanosec_rem_diff=1000000000+nanosec_rem-nanosec_rem_pre;
				} else {
					t_diff=t_pre;
					nanosec_rem_diff=nanosec_rem_pre;
				}
			}
			snprintf(Tmp1,MAXTMPSIZE,"[%05u.%09lu] UID%05d PID%05d        RECV S.IP:%03d.%03d.%03d.%03d/%05d, D.IP:%03d.%03d.%03d.%03d/%05d,%08d Bytes,D.T[%01u.%09lu]\n",
					(unsigned)t_now,nanosec_rem,
					current->cred->uid,current->pid,
					NIPQUAD(saddr),sport,
					NIPQUAD(daddr),dport,
					size,(unsigned)t_diff,nanosec_rem_diff);
			break;
		}
		case 3: 
			snprintf(Tmp1,MAXTMPSIZE,"[%05u.%09lu] UID%05d PID%05d      ACCEPT S.IP:%03d.%03d.%03d.%03d/%05d, D.IP:%03d.%03d.%03d.%03d/%05d,              ,                \n",(unsigned)t_now,nanosec_rem,current->cred->uid,current->pid,NIPQUAD(saddr),sport,NIPQUAD(daddr),dport);
			break;
		case 4: 
			snprintf(Tmp1,MAXTMPSIZE,"[%05u.%09lu] UID%05d PID%05d TCP CONNECT S.IP:%03d.%03d.%03d.%03d/%05d, D.IP:%03d.%03d.%03d.%03d/%05d,              ,                \n",(unsigned)t_now,nanosec_rem,current->cred->uid,current->pid,NIPQUAD(saddr),sport,NIPQUAD(daddr),dport);
			break;
		case 5: 
			snprintf(Tmp1,MAXTMPSIZE,"[%05u.%09lu] UID%05d PID%05d UDP CONNECT S.IP:%03d.%03d.%03d.%03d/%05d, D.IP:%03d.%03d.%03d.%03d/%05d,              ,                \n",(unsigned)t_now,nanosec_rem,current->cred->uid,current->pid,NIPQUAD(saddr),sport,NIPQUAD(daddr),dport);
			break;
		case 6: 
			snprintf(Tmp1,MAXTMPSIZE,"[%05u.%09lu] UID%05d PID%05d       CLOSE S.IP:%03d.%03d.%03d.%03d/%05d, D.IP:%03d.%03d.%03d.%03d/%05d,              ,                \n",(unsigned)t_now,nanosec_rem,current->cred->uid,current->pid,NIPQUAD(saddr),sport,NIPQUAD(daddr),dport);
			break;
		default:
			break;
	}

	Tmp1_len = strlen(Tmp1);

	mutex_lock(&probe_data_mutexlock);
	if(WritingLength + Tmp1_len < MAXDATASIZE) {
		memcpy(&ProcBuffer[WritingLength], Tmp1, Tmp1_len);
		WritingLength += Tmp1_len;
	} else {
		WritingLength=0;
		Ring=1;
		memcpy(&ProcBuffer[WritingLength], Tmp1, Tmp1_len);
		WritingLength += Tmp1_len;
	}
	
	if ( enable_log ) {
		enable_log = 0;
		pr_info("[htc_monitor]%s", Tmp1);
	}
	
	mutex_unlock(&probe_data_mutexlock);
	return;
}
Example #4
0
struct inode *omfs_iget(struct super_block *sb, ino_t ino)
{
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	struct omfs_inode *oi;
	struct buffer_head *bh;
	u64 ctime;
	unsigned long nsecs;
	struct inode *inode;

	inode = iget_locked(sb, ino);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	bh = omfs_bread(inode->i_sb, ino);
	if (!bh)
		goto iget_failed;

	oi = (struct omfs_inode *)bh->b_data;

	/* check self */
	if (ino != be64_to_cpu(oi->i_head.h_self))
		goto fail_bh;

	inode->i_uid = sbi->s_uid;
	inode->i_gid = sbi->s_gid;

	ctime = be64_to_cpu(oi->i_ctime);
	nsecs = do_div(ctime, 1000) * 1000L;

	inode->i_atime.tv_sec = ctime;
	inode->i_mtime.tv_sec = ctime;
	inode->i_ctime.tv_sec = ctime;
	inode->i_atime.tv_nsec = nsecs;
	inode->i_mtime.tv_nsec = nsecs;
	inode->i_ctime.tv_nsec = nsecs;

	inode->i_mapping->a_ops = &omfs_aops;

	switch (oi->i_type) {
	case OMFS_DIR:
		inode->i_mode = S_IFDIR | (S_IRWXUGO & ~sbi->s_dmask);
		inode->i_op = &omfs_dir_inops;
		inode->i_fop = &omfs_dir_operations;
		inode->i_size = sbi->s_sys_blocksize;
		inc_nlink(inode);
		break;
	case OMFS_FILE:
		inode->i_mode = S_IFREG | (S_IRWXUGO & ~sbi->s_fmask);
		inode->i_fop = &omfs_file_operations;
		inode->i_size = be64_to_cpu(oi->i_size);
		break;
	}
	brelse(bh);
	unlock_new_inode(inode);
	return inode;
fail_bh:
	brelse(bh);
iget_failed:
	iget_failed(inode);
	return ERR_PTR(-EIO);
}
static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
{
	struct can_priv *priv = netdev_priv(dev);
	const struct can_bittiming_const *btc = priv->bittiming_const;
	long rate, best_rate = 0;
	long best_error = 1000000000, error = 0;
	int best_tseg = 0, best_brp = 0, brp = 0;
	int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0;
	int spt_error = 1000, spt = 0, sampl_pt;
	u64 v64;

	if (!priv->bittiming_const)
		return -ENOTSUPP;

	/* Use CIA recommended sample points */
	if (bt->sample_point) {
		sampl_pt = bt->sample_point;
	} else {
		if (bt->bitrate > 800000)
			sampl_pt = 750;
		else if (bt->bitrate > 500000)
			sampl_pt = 800;
		else
			sampl_pt = 875;
	}

	/* tseg even = round down, odd = round up */
	for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
	     tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
		tsegall = 1 + tseg / 2;
		/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
		brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
		/* chose brp step which is possible in system */
		brp = (brp / btc->brp_inc) * btc->brp_inc;
		if ((brp < btc->brp_min) || (brp > btc->brp_max))
			continue;
		rate = priv->clock.freq / (brp * tsegall);
		error = bt->bitrate - rate;
		/* tseg brp biterror */
		if (error < 0)
			error = -error;
		if (error > best_error)
			continue;
		best_error = error;
		if (error == 0) {
			spt = can_update_spt(btc, sampl_pt, tseg / 2,
					     &tseg1, &tseg2);
			error = sampl_pt - spt;
			if (error < 0)
				error = -error;
			if (error > spt_error)
				continue;
			spt_error = error;
		}
		best_tseg = tseg / 2;
		best_brp = brp;
		best_rate = rate;
		if (error == 0)
			break;
	}

	if (best_error) {
		/* Error in one-tenth of a percent */
		error = (best_error * 1000) / bt->bitrate;
		if (error > CAN_CALC_MAX_ERROR) {
			netdev_err(dev,
				   "bitrate error %ld.%ld%% too high\n",
				   error / 10, error % 10);
			return -EDOM;
		} else {
			netdev_warn(dev, "bitrate error %ld.%ld%%\n",
				    error / 10, error % 10);
		}
	}

	/* real sample point */
	bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg,
					  &tseg1, &tseg2);

	v64 = (u64)best_brp * 1000000000UL;
	do_div(v64, priv->clock.freq);
	bt->tq = (u32)v64;
	bt->prop_seg = tseg1 / 2;
	bt->phase_seg1 = tseg1 - bt->prop_seg;
	bt->phase_seg2 = tseg2;

	/* check for sjw user settings */
	if (!bt->sjw || !btc->sjw_max)
		bt->sjw = 1;
	else {
		/* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
		if (bt->sjw > btc->sjw_max)
			bt->sjw = btc->sjw_max;
		/* bt->sjw must not be higher than tseg2 */
		if (tseg2 < bt->sjw)
			bt->sjw = tseg2;
	}

	bt->brp = best_brp;
	/* real bit-rate */
	bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1));

	return 0;
}
Example #6
0
/*
 * This function constructs a virtual MTD device by concatenating
 * num_devs MTD devices. A pointer to the new device object is
 * stored to *new_dev upon success. This function does _not_
 * register any devices: this is the caller's responsibility.
 */
struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */
				   int num_devs,	/* number of subdevices      */
				   const char *name)
{				/* name for the new device   */
	int i;
	size_t size;
	struct mtd_concat *concat;
	uint32_t max_erasesize, curr_erasesize;
	int num_erase_region;
	int max_writebufsize = 0;

	printk(KERN_NOTICE "Concatenating MTD devices:\n");
	for (i = 0; i < num_devs; i++)
		printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
	printk(KERN_NOTICE "into device \"%s\"\n", name);

	/* allocate the device structure */
	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
	concat = kzalloc(size, GFP_KERNEL);
	if (!concat) {
		printk
		    ("memory allocation error while creating concatenated device \"%s\"\n",
		     name);
		return NULL;
	}
	concat->subdev = (struct mtd_info **) (concat + 1);

	/*
	 * Set up the new "super" device's MTD object structure, check for
	 * incompatibilites between the subdevices.
	 */
	concat->mtd.type = subdev[0]->type;
	concat->mtd.flags = subdev[0]->flags;
	concat->mtd.size = subdev[0]->size;
	concat->mtd.erasesize = subdev[0]->erasesize;
	concat->mtd.writesize = subdev[0]->writesize;

	for (i = 0; i < num_devs; i++)
		if (max_writebufsize < subdev[i]->writebufsize)
			max_writebufsize = subdev[i]->writebufsize;
	concat->mtd.writebufsize = max_writebufsize;

	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
	concat->mtd.oobsize = subdev[0]->oobsize;
	concat->mtd.oobavail = subdev[0]->oobavail;
	if (subdev[0]->writev)
		concat->mtd.writev = concat_writev;
	if (subdev[0]->read_oob)
		concat->mtd.read_oob = concat_read_oob;
	if (subdev[0]->write_oob)
		concat->mtd.write_oob = concat_write_oob;
	if (subdev[0]->block_isbad)
		concat->mtd.block_isbad = concat_block_isbad;
	if (subdev[0]->block_markbad)
		concat->mtd.block_markbad = concat_block_markbad;

	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;

	concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;

	concat->subdev[0] = subdev[0];

	for (i = 1; i < num_devs; i++) {
		if (concat->mtd.type != subdev[i]->type) {
			kfree(concat);
			printk("Incompatible device type on \"%s\"\n",
			       subdev[i]->name);
			return NULL;
		}
		if (concat->mtd.flags != subdev[i]->flags) {
			/*
			 * Expect all flags except MTD_WRITEABLE to be
			 * equal on all subdevices.
			 */
			if ((concat->mtd.flags ^ subdev[i]->
			     flags) & ~MTD_WRITEABLE) {
				kfree(concat);
				printk("Incompatible device flags on \"%s\"\n",
				       subdev[i]->name);
				return NULL;
			} else
				/* if writeable attribute differs,
				   make super device writeable */
				concat->mtd.flags |=
				    subdev[i]->flags & MTD_WRITEABLE;
		}

		/* only permit direct mapping if the BDIs are all the same
		 * - copy-mapping is still permitted
		 */
		if (concat->mtd.backing_dev_info !=
		    subdev[i]->backing_dev_info)
			concat->mtd.backing_dev_info =
				&default_backing_dev_info;

		concat->mtd.size += subdev[i]->size;
		concat->mtd.ecc_stats.badblocks +=
			subdev[i]->ecc_stats.badblocks;
		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
		    !concat->mtd.read_oob  != !subdev[i]->read_oob ||
		    !concat->mtd.write_oob != !subdev[i]->write_oob) {
			kfree(concat);
			printk("Incompatible OOB or ECC data on \"%s\"\n",
			       subdev[i]->name);
			return NULL;
		}
		concat->subdev[i] = subdev[i];

	}

	concat->mtd.ecclayout = subdev[0]->ecclayout;

	concat->num_subdev = num_devs;
	concat->mtd.name = name;

	concat->mtd.erase = concat_erase;
	concat->mtd.read = concat_read;
	concat->mtd.write = concat_write;
	concat->mtd.sync = concat_sync;
	concat->mtd.lock = concat_lock;
	concat->mtd.unlock = concat_unlock;
	concat->mtd.suspend = concat_suspend;
	concat->mtd.resume = concat_resume;
	concat->mtd.get_unmapped_area = concat_get_unmapped_area;

	/*
	 * Combine the erase block size info of the subdevices:
	 *
	 * first, walk the map of the new device and see how
	 * many changes in erase size we have
	 */
	max_erasesize = curr_erasesize = subdev[0]->erasesize;
	num_erase_region = 1;
	for (i = 0; i < num_devs; i++) {
		if (subdev[i]->numeraseregions == 0) {
			/* current subdevice has uniform erase size */
			if (subdev[i]->erasesize != curr_erasesize) {
				/* if it differs from the last subdevice's erase size, count it */
				++num_erase_region;
				curr_erasesize = subdev[i]->erasesize;
				if (curr_erasesize > max_erasesize)
					max_erasesize = curr_erasesize;
			}
		} else {
			/* current subdevice has variable erase size */
			int j;
			for (j = 0; j < subdev[i]->numeraseregions; j++) {

				/* walk the list of erase regions, count any changes */
				if (subdev[i]->eraseregions[j].erasesize !=
				    curr_erasesize) {
					++num_erase_region;
					curr_erasesize =
					    subdev[i]->eraseregions[j].
					    erasesize;
					if (curr_erasesize > max_erasesize)
						max_erasesize = curr_erasesize;
				}
			}
		}
	}

	if (num_erase_region == 1) {
		/*
		 * All subdevices have the same uniform erase size.
		 * This is easy:
		 */
		concat->mtd.erasesize = curr_erasesize;
		concat->mtd.numeraseregions = 0;
	} else {
		uint64_t tmp64;

		/*
		 * erase block size varies across the subdevices: allocate
		 * space to store the data describing the variable erase regions
		 */
		struct mtd_erase_region_info *erase_region_p;
		uint64_t begin, position;

		concat->mtd.erasesize = max_erasesize;
		concat->mtd.numeraseregions = num_erase_region;
		concat->mtd.eraseregions = erase_region_p =
		    kmalloc(num_erase_region *
			    sizeof (struct mtd_erase_region_info), GFP_KERNEL);
		if (!erase_region_p) {
			kfree(concat);
			printk
			    ("memory allocation error while creating erase region list"
			     " for device \"%s\"\n", name);
			return NULL;
		}

		/*
		 * walk the map of the new device once more and fill in
		 * in erase region info:
		 */
		curr_erasesize = subdev[0]->erasesize;
		begin = position = 0;
		for (i = 0; i < num_devs; i++) {
			if (subdev[i]->numeraseregions == 0) {
				/* current subdevice has uniform erase size */
				if (subdev[i]->erasesize != curr_erasesize) {
					/*
					 *  fill in an mtd_erase_region_info structure for the area
					 *  we have walked so far:
					 */
					erase_region_p->offset = begin;
					erase_region_p->erasesize =
					    curr_erasesize;
					tmp64 = position - begin;
					do_div(tmp64, curr_erasesize);
					erase_region_p->numblocks = tmp64;
					begin = position;

					curr_erasesize = subdev[i]->erasesize;
					++erase_region_p;
				}
				position += subdev[i]->size;
			} else {
				/* current subdevice has variable erase size */
				int j;
				for (j = 0; j < subdev[i]->numeraseregions; j++) {
					/* walk the list of erase regions, count any changes */
					if (subdev[i]->eraseregions[j].
					    erasesize != curr_erasesize) {
						erase_region_p->offset = begin;
						erase_region_p->erasesize =
						    curr_erasesize;
						tmp64 = position - begin;
						do_div(tmp64, curr_erasesize);
						erase_region_p->numblocks = tmp64;
						begin = position;

						curr_erasesize =
						    subdev[i]->eraseregions[j].
						    erasesize;
						++erase_region_p;
					}
					position +=
					    subdev[i]->eraseregions[j].
					    numblocks * (uint64_t)curr_erasesize;
				}
			}
		}
		/* Now write the final entry */
		erase_region_p->offset = begin;
		erase_region_p->erasesize = curr_erasesize;
		tmp64 = position - begin;
		do_div(tmp64, curr_erasesize);
		erase_region_p->numblocks = tmp64;
	}

	return &concat->mtd;
}
Example #7
0
static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
{
	unsigned long long freqreg = (u64) fout * (u64) (1 << AD9834_FREQ_BITS);
	do_div(freqreg, mclk);
	return freqreg;
}
Example #8
0
static inline unsigned long long us_to_tick(unsigned long long us)
{
	us += US_PER_TICK - 1;
	do_div(us, US_PER_TICK);
	return us;
}
Example #9
0
static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
		       unsigned int Fout)
{
	u64 Kpart;
	unsigned int K, Ndiv, Nmod, target;
	unsigned int div;
	int i;

	/*                        */
	div = 1;
	while ((Fref / div) > 13500000) {
		div *= 2;

		if (div > 8) {
			pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
			       Fref);
			return -EINVAL;
		}
	}
	fll_div->fll_clk_ref_div = div / 2;

	pr_debug("Fref=%u Fout=%u\n", Fref, Fout);

	/*                                                   */
	Fref /= div;

	/*                                                       */
	div = 0;
	target = Fout * 2;
	while (target < 90000000) {
		div++;
		target *= 2;
		if (div > 7) {
			pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
			       Fout);
			return -EINVAL;
		}
	}
	fll_div->fll_outdiv = div;

	pr_debug("Fvco=%dHz\n", target);

	/*                                                                */
	for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
		if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
			fll_div->fll_fratio = fll_fratios[i].fll_fratio;
			target /= fll_fratios[i].ratio;
			break;
		}
	}
	if (i == ARRAY_SIZE(fll_fratios)) {
		pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
		return -EINVAL;
	}

	/*                    */
	Ndiv = target / Fref;

	fll_div->n = Ndiv;
	Nmod = target % Fref;
	pr_debug("Nmod=%d\n", Nmod);

	/*                                                       */
	Kpart = FIXED_FLL_SIZE * (long long)Nmod;

	do_div(Kpart, Fref);

	K = Kpart & 0xFFFFFFFF;

	if ((K % 10) >= 5)
		K += 5;

	/*                                                */
	fll_div->k = K / 10;

	pr_debug("N=%x K=%x FLL_FRATIO=%x FLL_OUTDIV=%x FLL_CLK_REF_DIV=%x\n",
		 fll_div->n, fll_div->k,
		 fll_div->fll_fratio, fll_div->fll_outdiv,
		 fll_div->fll_clk_ref_div);

	return 0;
}
Example #10
0
static inline unsigned long long us_to_tick(unsigned long long us)
{
	us = us * CONFIG_MX27_CLK32 + 999999;
	do_div(us, 1000000);
	return us;
}
Example #11
0
static inline unsigned long long tick_to_time(unsigned long long tick)
{
	do_div(tick, TICK_PER_TIME);
	return tick;
}
Example #12
0
static inline unsigned long long time_to_tick(unsigned long long time)
{
	time *= CONFIG_MX27_CLK32;
	do_div(time, CONFIG_SYS_HZ);
	return time;
}
Example #13
0
/* ~0.4% error - measured with stop-watch on 100s boot-delay */
static inline unsigned long long tick_to_time(unsigned long long tick)
{
	tick *= CONFIG_SYS_HZ;
	do_div(tick, CONFIG_MX27_CLK32);
	return tick;
}
Example #14
0
static int iwl_send_rxon_timing(struct iwl_priv *priv,
				struct iwl_rxon_context *ctx)
{
	u64 tsf;
	s32 interval_tm, rem;
	struct ieee80211_conf *conf = NULL;
	u16 beacon_int;
	struct ieee80211_vif *vif = ctx->vif;

	conf = &priv->hw->conf;

	lockdep_assert_held(&priv->mutex);

	memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));

	ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
	ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);

	beacon_int = vif ? vif->bss_conf.beacon_int : 0;

	/*
	 * TODO: For IBSS we need to get atim_window from mac80211,
	 *	 for now just always use 0
	 */
	ctx->timing.atim_window = 0;

	if (ctx->ctxid == IWL_RXON_CTX_PAN &&
	    (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
	    iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
	    priv->contexts[IWL_RXON_CTX_BSS].vif &&
	    priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
		ctx->timing.beacon_interval =
			priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
	} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
		   iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
		   priv->contexts[IWL_RXON_CTX_PAN].vif &&
		   priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
		   (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
		    !ctx->vif->bss_conf.beacon_int)) {
		ctx->timing.beacon_interval =
			priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
	} else {
		beacon_int = iwl_adjust_beacon_interval(beacon_int,
			IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
		ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
	}

	ctx->beacon_int = beacon_int;

	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
	interval_tm = beacon_int * TIME_UNIT;
	rem = do_div(tsf, interval_tm);
	ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);

	ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;

	IWL_DEBUG_ASSOC(priv,
			"beacon interval %d beacon timer %d beacon tim %d\n",
			le16_to_cpu(ctx->timing.beacon_interval),
			le32_to_cpu(ctx->timing.beacon_init_val),
			le16_to_cpu(ctx->timing.atim_window));

	return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
				0, sizeof(ctx->timing), &ctx->timing);
}
Example #15
0
static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type)
{
	/* we are called with base 8, 10 or 16, only, thus don't need "G..."  */
	static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */

	char tmp[66];
	char sign;
	char locase;
	int need_pfx = ((type & SPECIAL) && base != 10);
	int i;

	/* locase = 0 or 0x20. ORing digits or letters with 'locase'
	 * produces same digits or (maybe lowercased) letters */
	locase = (type & SMALL);
	if (type & LEFT)
		type &= ~ZEROPAD;
	sign = 0;
	if (type & SIGN) {
		if ((signed long long) num < 0) {
			sign = '-';
			num = - (signed long long) num;
			size--;
		} else if (type & PLUS) {
			sign = '+';
			size--;
		} else if (type & SPACE) {
			sign = ' ';
			size--;
		}
	}
	if (need_pfx) {
		size--;
		if (base == 16)
			size--;
	}
	/* generate full string in tmp[], in reverse order */
	i = 0;
	if (num == 0)
		tmp[i++] = '0';
	else do {
		tmp[i++] = (digits[do_div(num,base)] | locase);
	} while (num != 0);

	/* printing 100 using %2d gives "100", not "00" */
	if (i > precision)
		precision = i;
	size -= precision;
	if (!(type & (ZEROPAD+LEFT))) {
		while(--size >= 0) {
			if (buf < end)
				*buf = ' ';
			++buf;
		}
	}
	/* sign */
	if (sign) {
		if (buf < end)
			*buf = sign;
		++buf;
	}
	/* "0x" / "0" prefix */
	if (need_pfx) {
		if (buf < end)
			*buf = '0';
		++buf;
		if (base == 16) {
			if (buf < end)
				*buf = ('X' | locase);
			++buf;
		}
	}
	/* zero or space padding */
	if (!(type & LEFT)) {
		char c = (type & ZEROPAD) ? '0' : ' ';
		while (--size >= 0) {
			if (buf < end)
				*buf = c;
			++buf;
		}
	}
	/* hmm even more zero padding? */
	while (i <= --precision) {
		if (buf < end)
			*buf = '0';
		++buf;
	}
	/* actual digits of result */
	while (--i >= 0) {
		if (buf < end)
			*buf = tmp[i];
		++buf;
	}
	/* trailing space padding */
	while (--size >= 0) {
		if (buf < end)
			*buf = ' ';
		++buf;
	}
	return buf;
}
Example #16
0
static int ubi_volume_read(char *volume, char *buf, size_t size)
{
	int err, lnum, off, len, tbuf_size, i = 0;
	size_t count_save = size;
	void *tbuf;
	unsigned long long tmp;
	struct ubi_volume *vol = NULL;
	loff_t offp = 0;

	for (i = 0; i < ubi->vtbl_slots; i++) {
		vol = ubi->volumes[i];
		if (vol && !strcmp(vol->name, volume)) {
			printf("Volume %s found at volume id %d\n",
				volume, vol->vol_id);
			break;
		}
	}
	if (i == ubi->vtbl_slots) {
		printf("%s volume not found\n", volume);
		return 0;
	}

	printf("read %i bytes from volume %d to %x(buf address)\n",
	       (int) size, vol->vol_id, (unsigned)buf);

	if (vol->updating) {
		printf("updating");
		return -EBUSY;
	}
	if (vol->upd_marker) {
		printf("damaged volume, update marker is set");
		return -EBADF;
	}
	if (offp == vol->used_bytes)
		return 0;

	if (size == 0) {
		printf("Read [%lu] bytes\n", (unsigned long) vol->used_bytes);
		size = vol->used_bytes;
	}

	if (vol->corrupted)
		printf("read from corrupted volume %d", vol->vol_id);
	if (offp + size > vol->used_bytes)
		count_save = size = vol->used_bytes - offp;

	tbuf_size = vol->usable_leb_size;
	if (size < tbuf_size)
		tbuf_size = ALIGN(size, ubi->min_io_size);
	tbuf = malloc(tbuf_size);
	if (!tbuf) {
		printf("NO MEM\n");
		return -ENOMEM;
	}
	len = size > tbuf_size ? tbuf_size : size;

	tmp = offp;
	off = do_div(tmp, vol->usable_leb_size);
	lnum = tmp;
	do {
		if (off + len >= vol->usable_leb_size)
			len = vol->usable_leb_size - off;

		err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
		if (err) {
			printf("read err %x\n", err);
			break;
		}
		off += len;
		if (off == vol->usable_leb_size) {
			lnum += 1;
			off -= vol->usable_leb_size;
		}

		size -= len;
		offp += len;

		memcpy(buf, tbuf, len);

		buf += len;
		len = size > tbuf_size ? tbuf_size : size;
	} while (size);

	free(tbuf);
	return err ? err : count_save - size;
}
Example #17
0
static int
concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
		unsigned long count, loff_t to, size_t * retlen)
{
	struct mtd_concat *concat = CONCAT(mtd);
	struct kvec *vecs_copy;
	unsigned long entry_low, entry_high;
	size_t total_len = 0;
	int i;
	int err = -EINVAL;

	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;

	*retlen = 0;

	/* Calculate total length of data */
	for (i = 0; i < count; i++)
		total_len += vecs[i].iov_len;

	/* Do not allow write past end of device */
	if ((to + total_len) > mtd->size)
		return -EINVAL;

	/* Check alignment */
	if (mtd->writesize > 1) {
		uint64_t __to = to;
		if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
			return -EINVAL;
	}

	/* make a copy of vecs */
	vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
	if (!vecs_copy)
		return -ENOMEM;

	entry_low = 0;
	for (i = 0; i < concat->num_subdev; i++) {
		struct mtd_info *subdev = concat->subdev[i];
		size_t size, wsize, retsize, old_iov_len;

		if (to >= subdev->size) {
			to -= subdev->size;
			continue;
		}

		size = min_t(uint64_t, total_len, subdev->size - to);
		wsize = size; /* store for future use */

		entry_high = entry_low;
		while (entry_high < count) {
			if (size <= vecs_copy[entry_high].iov_len)
				break;
			size -= vecs_copy[entry_high++].iov_len;
		}

		old_iov_len = vecs_copy[entry_high].iov_len;
		vecs_copy[entry_high].iov_len = size;

		if (!(subdev->flags & MTD_WRITEABLE))
			err = -EROFS;
		else
			err = subdev->writev(subdev, &vecs_copy[entry_low],
				entry_high - entry_low + 1, to, &retsize);

		vecs_copy[entry_high].iov_len = old_iov_len - size;
		vecs_copy[entry_high].iov_base += size;

		entry_low = entry_high;

		if (err)
			break;

		*retlen += retsize;
		total_len -= wsize;

		if (total_len == 0)
			break;

		err = -EINVAL;
		to = 0;
	}

	kfree(vecs_copy);
	return err;
}
 static char16 * number_android (char16 *str, int64 num, int32 base, int32 size, int32 precision, int32 type)
 {
     const char16 *digits = L"0123456789abcdefghijklmnopqrstuvwxyz";
     if (type & LARGE)
         digits = L"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
     if (type & LEFT)
         type &= ~ZEROPAD;
     if (base < 2 || base > 36)
         return 0;
     
     char16 c = (type & ZEROPAD) ? L'0' : L' ';
     char16 sign = 0;
     
     if (type & SIGN)
     {
         if (num < 0)
         {
             sign = L'-';
             num = -num;
             --size;
         }
         else if (type & PLUS)
         {
             sign = L'+';
             --size;
         }
         else if (type & SPACE)
         {
             sign = L' ';
             --size;
         }
     }
     
     if (type & SPECIAL)
     {
         if (base == 16)
         {
             size -= 2;
         }
         else if (base == 8)
         {
             --size;
         }
     }
     
     char16 tmp[66];
     int32 i = 0;
     if (num == 0)
     {
         tmp[i++]='0';
     }
     else
     {
         while (num != 0)
         {
             tmp[i++] = digits[do_div(num,base)];
         }
     }
     
     if (i > precision)
     {
         precision = i;
     }
     
     size -= precision;
     if (!(type & (ZEROPAD+LEFT)))
     {
         while(size-- > 0)
         {
             *str++ = L' ';
         }
     }
     
     if (sign)
     {
         *str++ = sign;
     }
     
     if (type & SPECIAL)
     {
         if (base==8)
         {
             *str++ = L'0';
         }
         else if (base==16)
         {
             *str++ = L'0';
             *str++ = digits[33];
         }
     }
     
     if (!(type & LEFT))
     {
         while (size-- > 0)
         {
             *str++ = c;
         }
     }
     
     while (i < precision--)
     {
         *str++ = '0';
     }
     
     while (i-- > 0)
     {
         *str++ = tmp[i];
     }
     
     while (size-- > 0)
     {
         *str++ = L' ';
     }
     
     return str;
 }
Example #19
0
static void
codegen14(enum node_op op, 
          enum size_tag size,
          gp_boolean is_const,
          int value,
          char *name,
          char *bank_addr)
{
  switch (op) {
  case op_assign:
    assert(0);
    break;
  case op_add:
    do_add(size, is_const, value, name, bank_addr);
    break;
  case op_sub:
    do_sub(size, is_const, value, name, bank_addr);
    break;
  case op_neg:
    do_neg(size, is_const, value, name, bank_addr);
    break;
  case op_com:
    do_com(size, is_const, value, name, bank_addr);
    break;
  case op_and:
    do_and(size, is_const, value, name, bank_addr);
    break;
  case op_or:
    do_or(size, is_const, value, name, bank_addr);
    break;
  case op_xor:
    do_xor(size, is_const, value, name, bank_addr);
    break;
  case op_not:
    do_not(size, is_const, value, name, bank_addr);
    break;
  case op_lsh:
    do_lsh(size, is_const, value, name, bank_addr);
    break;
  case op_rsh:
    do_rsh(size, is_const, value, name, bank_addr);
    break;
  case op_land:
    do_and(size_uint8, is_const, value, name, bank_addr);
    break;
  case op_lor:
    do_or(size_uint8, is_const, value, name, bank_addr);
    break;
  case op_eq:
    do_eq(size, is_const, value, name, bank_addr);
    break;
  case op_ne:
    do_ne(size, is_const, value, name, bank_addr);
    break;
  case op_lt:
    do_lt(size, is_const, value, name, bank_addr);
    break;
  case op_lte:
    do_lte(size, is_const, value, name, bank_addr);
    break;
  case op_gt:
  case op_gte:
    /* This is replaced in the optimizer.*/
    assert(0);
    break;
  case op_mult:
    do_mult(size, is_const, value, name, bank_addr);
    break;
  case op_div:
    do_div(size, is_const, value, name, bank_addr);
    break;
  case op_mod:
    do_mod(size, is_const, value, name, bank_addr);
    break;
  case op_clr:
  case op_inc:
  case op_dec:
    /* Shoud use unopgen14.*/
    assert(0);
    break;
  default:
    assert(0); /* Unhandled binary operator */
  }

}
Example #20
0
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
	unsigned long nr_switches;

	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
						get_nr_threads(p));
	SEQ_printf(m,
		"---------------------------------------------------------\n");
#define __P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))

	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);

	nr_switches = p->nvcsw + p->nivcsw;

#ifdef CONFIG_SCHEDSTATS
	PN(se.statistics.wait_start);
	PN(se.statistics.sleep_start);
	PN(se.statistics.block_start);
	PN(se.statistics.sleep_max);
	PN(se.statistics.block_max);
	PN(se.statistics.exec_max);
	PN(se.statistics.slice_max);
	PN(se.statistics.wait_max);
	PN(se.statistics.wait_sum);
	P(se.statistics.wait_count);
	PN(se.statistics.iowait_sum);
	P(se.statistics.iowait_count);
	P(se.nr_migrations);
	P(se.statistics.nr_migrations_cold);
	P(se.statistics.nr_failed_migrations_affine);
	P(se.statistics.nr_failed_migrations_running);
	P(se.statistics.nr_failed_migrations_hot);
	P(se.statistics.nr_forced_migrations);
	P(se.statistics.nr_wakeups);
	P(se.statistics.nr_wakeups_sync);
	P(se.statistics.nr_wakeups_migrate);
	P(se.statistics.nr_wakeups_local);
	P(se.statistics.nr_wakeups_remote);
	P(se.statistics.nr_wakeups_affine);
	P(se.statistics.nr_wakeups_affine_attempts);
	P(se.statistics.nr_wakeups_passive);
	P(se.statistics.nr_wakeups_idle);

	{
		u64 avg_atom, avg_per_cpu;

		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
			do_div(avg_atom, nr_switches);
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
		if (p->se.nr_migrations) {
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
		} else {
			avg_per_cpu = -1LL;
		}

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
#endif
	__P(nr_switches);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_voluntary_switches", (long long)p->nvcsw);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_involuntary_switches", (long long)p->nivcsw);

	P(se.load.weight);
	P(policy);
	P(prio);
#undef PN
#undef __PN
#undef P
#undef __P

	{
		unsigned int this_cpu = raw_smp_processor_id();
		u64 t0, t1;

		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
		SEQ_printf(m, "%-35s:%21Ld\n",
			   "clock-delta", (long long)(t1-t0));
	}
}
static int __init mtd_oobtest_init(void)
{
	int err = 0;
	unsigned int i;
	uint64_t tmp;
	struct mtd_oob_ops ops;
	loff_t addr = 0, addr0;

	printk(KERN_INFO "\n");
	printk(KERN_INFO "=================================================\n");
	printk(PRINT_PREF "MTD device: %d\n", dev);

	mtd = get_mtd_device(NULL, dev);
	if (IS_ERR(mtd)) {
		err = PTR_ERR(mtd);
		printk(PRINT_PREF "error: cannot get MTD device\n");
		return err;
	}

	if (mtd->type != MTD_NANDFLASH) {
		printk(PRINT_PREF "this test requires NAND flash\n");
		goto out;
	}

	tmp = mtd->size;
	do_div(tmp, mtd->erasesize);
	ebcnt = tmp;
	pgcnt = mtd->erasesize / mtd->writesize;

	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
	       "page size %u, count of eraseblocks %u, pages per "
	       "eraseblock %u, OOB size %u\n",
	       (unsigned long long)mtd->size, mtd->erasesize,
	       mtd->writesize, ebcnt, pgcnt, mtd->oobsize);

	err = -ENOMEM;
	readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
	if (!readbuf) {
		printk(PRINT_PREF "error: cannot allocate memory\n");
		goto out;
	}
	writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
	if (!writebuf) {
		printk(PRINT_PREF "error: cannot allocate memory\n");
		goto out;
	}

	err = scan_for_bad_eraseblocks();
	if (err)
		goto out;

	use_offset = 0;
	use_len = mtd->ecclayout->oobavail;
	use_len_max = mtd->ecclayout->oobavail;
	vary_offset = 0;

	/* First test: write all OOB, read it back and verify */
	printk(PRINT_PREF "test 1 of 5\n");

	err = erase_whole_device();
	if (err)
		goto out;

	simple_srand(1);
	err = write_whole_device();
	if (err)
		goto out;

	simple_srand(1);
	err = verify_all_eraseblocks();
	if (err)
		goto out;

	/*
	 * Second test: write all OOB, a block at a time, read it back and
	 * verify.
	 */
	printk(PRINT_PREF "test 2 of 5\n");

	err = erase_whole_device();
	if (err)
		goto out;

	simple_srand(3);
	err = write_whole_device();
	if (err)
		goto out;

	/* Check all eraseblocks */
	simple_srand(3);
	printk(PRINT_PREF "verifying all eraseblocks\n");
	for (i = 0; i < ebcnt; ++i) {
		if (bbt[i])
			continue;
		err = verify_eraseblock_in_one_go(i);
		if (err)
			goto out;
		if (i % 256 == 0)
			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
		cond_resched();
	}
	printk(PRINT_PREF "verified %u eraseblocks\n", i);

	/*
	 * Third test: write OOB at varying offsets and lengths, read it back
	 * and verify.
	 */
	printk(PRINT_PREF "test 3 of 5\n");

	err = erase_whole_device();
	if (err)
		goto out;

	/* Write all eraseblocks */
	use_offset = 0;
	use_len = mtd->ecclayout->oobavail;
	use_len_max = mtd->ecclayout->oobavail;
	vary_offset = 1;
	simple_srand(5);

	err = write_whole_device();
	if (err)
		goto out;

	/* Check all eraseblocks */
	use_offset = 0;
	use_len = mtd->ecclayout->oobavail;
	use_len_max = mtd->ecclayout->oobavail;
	vary_offset = 1;
	simple_srand(5);
	err = verify_all_eraseblocks();
	if (err)
		goto out;

	use_offset = 0;
	use_len = mtd->ecclayout->oobavail;
	use_len_max = mtd->ecclayout->oobavail;
	vary_offset = 0;

	/* Fourth test: try to write off end of device */
	printk(PRINT_PREF "test 4 of 5\n");

	err = erase_whole_device();
	if (err)
		goto out;

	addr0 = 0;
	for (i = 0; i < ebcnt && bbt[i]; ++i)
		addr0 += mtd->erasesize;

	/* Attempt to write off end of OOB */
	ops.mode      = MTD_OOB_AUTO;
	ops.len       = 0;
	ops.retlen    = 0;
	ops.ooblen    = 1;
	ops.oobretlen = 0;
	ops.ooboffs   = mtd->ecclayout->oobavail;
	ops.datbuf    = NULL;
	ops.oobbuf    = writebuf;
	printk(PRINT_PREF "attempting to start write past end of OOB\n");
	printk(PRINT_PREF "an error is expected...\n");
	err = mtd->write_oob(mtd, addr0, &ops);
	if (err) {
		printk(PRINT_PREF "error occurred as expected\n");
		err = 0;
	} else {
		printk(PRINT_PREF "error: can write past end of OOB\n");
		errcnt += 1;
	}

	/* Attempt to read off end of OOB */
	ops.mode      = MTD_OOB_AUTO;
	ops.len       = 0;
	ops.retlen    = 0;
	ops.ooblen    = 1;
	ops.oobretlen = 0;
	ops.ooboffs   = mtd->ecclayout->oobavail;
	ops.datbuf    = NULL;
	ops.oobbuf    = readbuf;
	printk(PRINT_PREF "attempting to start read past end of OOB\n");
	printk(PRINT_PREF "an error is expected...\n");
	err = mtd->read_oob(mtd, addr0, &ops);
	if (err) {
		printk(PRINT_PREF "error occurred as expected\n");
		err = 0;
	} else {
		printk(PRINT_PREF "error: can read past end of OOB\n");
		errcnt += 1;
	}

	if (bbt[ebcnt - 1])
		printk(PRINT_PREF "skipping end of device tests because last "
		       "block is bad\n");
	else {
		/* Attempt to write off end of device */
		ops.mode      = MTD_OOB_AUTO;
		ops.len       = 0;
		ops.retlen    = 0;
		ops.ooblen    = mtd->ecclayout->oobavail + 1;
		ops.oobretlen = 0;
		ops.ooboffs   = 0;
		ops.datbuf    = NULL;
		ops.oobbuf    = writebuf;
		printk(PRINT_PREF "attempting to write past end of device\n");
		printk(PRINT_PREF "an error is expected...\n");
		err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
		if (err) {
			printk(PRINT_PREF "error occurred as expected\n");
			err = 0;
		} else {
			printk(PRINT_PREF "error: wrote past end of device\n");
			errcnt += 1;
		}

		/* Attempt to read off end of device */
		ops.mode      = MTD_OOB_AUTO;
		ops.len       = 0;
		ops.retlen    = 0;
		ops.ooblen    = mtd->ecclayout->oobavail + 1;
		ops.oobretlen = 0;
		ops.ooboffs   = 0;
		ops.datbuf    = NULL;
		ops.oobbuf    = readbuf;
		printk(PRINT_PREF "attempting to read past end of device\n");
		printk(PRINT_PREF "an error is expected...\n");
		err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
		if (err) {
			printk(PRINT_PREF "error occurred as expected\n");
			err = 0;
		} else {
			printk(PRINT_PREF "error: read past end of device\n");
			errcnt += 1;
		}

		err = erase_eraseblock(ebcnt - 1);
		if (err)
			goto out;

		/* Attempt to write off end of device */
		ops.mode      = MTD_OOB_AUTO;
		ops.len       = 0;
		ops.retlen    = 0;
		ops.ooblen    = mtd->ecclayout->oobavail;
		ops.oobretlen = 0;
		ops.ooboffs   = 1;
		ops.datbuf    = NULL;
		ops.oobbuf    = writebuf;
		printk(PRINT_PREF "attempting to write past end of device\n");
		printk(PRINT_PREF "an error is expected...\n");
		err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
		if (err) {
			printk(PRINT_PREF "error occurred as expected\n");
			err = 0;
		} else {
			printk(PRINT_PREF "error: wrote past end of device\n");
			errcnt += 1;
		}

		/* Attempt to read off end of device */
		ops.mode      = MTD_OOB_AUTO;
		ops.len       = 0;
		ops.retlen    = 0;
		ops.ooblen    = mtd->ecclayout->oobavail;
		ops.oobretlen = 0;
		ops.ooboffs   = 1;
		ops.datbuf    = NULL;
		ops.oobbuf    = readbuf;
		printk(PRINT_PREF "attempting to read past end of device\n");
		printk(PRINT_PREF "an error is expected...\n");
		err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
		if (err) {
			printk(PRINT_PREF "error occurred as expected\n");
			err = 0;
		} else {
			printk(PRINT_PREF "error: read past end of device\n");
			errcnt += 1;
		}
	}

	/* Fifth test: write / read across block boundaries */
	printk(PRINT_PREF "test 5 of 5\n");

	/* Erase all eraseblocks */
	err = erase_whole_device();
	if (err)
		goto out;

	/* Write all eraseblocks */
	simple_srand(11);
	printk(PRINT_PREF "writing OOBs of whole device\n");
	for (i = 0; i < ebcnt - 1; ++i) {
		int cnt = 2;
		int pg;
		size_t sz = mtd->ecclayout->oobavail;
		if (bbt[i] || bbt[i + 1])
			continue;
		addr = (i + 1) * mtd->erasesize - mtd->writesize;
		for (pg = 0; pg < cnt; ++pg) {
			set_random_data(writebuf, sz);
			ops.mode      = MTD_OOB_AUTO;
			ops.len       = 0;
			ops.retlen    = 0;
			ops.ooblen    = sz;
			ops.oobretlen = 0;
			ops.ooboffs   = 0;
			ops.datbuf    = NULL;
			ops.oobbuf    = writebuf;
			err = mtd->write_oob(mtd, addr, &ops);
			if (err)
				goto out;
			if (i % 256 == 0)
				printk(PRINT_PREF "written up to eraseblock "
				       "%u\n", i);
			cond_resched();
			addr += mtd->writesize;
		}
	}
	printk(PRINT_PREF "written %u eraseblocks\n", i);

	/* Check all eraseblocks */
	simple_srand(11);
	printk(PRINT_PREF "verifying all eraseblocks\n");
	for (i = 0; i < ebcnt - 1; ++i) {
		if (bbt[i] || bbt[i + 1])
			continue;
		set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
		addr = (i + 1) * mtd->erasesize - mtd->writesize;
		ops.mode      = MTD_OOB_AUTO;
		ops.len       = 0;
		ops.retlen    = 0;
		ops.ooblen    = mtd->ecclayout->oobavail * 2;
		ops.oobretlen = 0;
		ops.ooboffs   = 0;
		ops.datbuf    = NULL;
		ops.oobbuf    = readbuf;
		err = mtd->read_oob(mtd, addr, &ops);
		if (err)
			goto out;
		if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
			printk(PRINT_PREF "error: verify failed at %#llx\n",
			       (long long)addr);
			errcnt += 1;
			if (errcnt > 1000) {
				printk(PRINT_PREF "error: too many errors\n");
				goto out;
			}
		}
		if (i % 256 == 0)
			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
		cond_resched();
	}
	printk(PRINT_PREF "verified %u eraseblocks\n", i);

	printk(PRINT_PREF "finished with %d errors\n", errcnt);
out:
	kfree(bbt);
	kfree(writebuf);
	kfree(readbuf);
	put_mtd_device(mtd);
	if (err)
		printk(PRINT_PREF "error %d occurred\n", err);
	printk(KERN_INFO "=================================================\n");
	return err;
}
Example #22
0
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	u64 now = ktime_get_ns();
	struct fq_flow_head *head;
	struct sk_buff *skb;
	struct fq_flow *f;
	u32 rate, plen;

	skb = fq_dequeue_head(sch, &q->internal);
	if (skb)
		goto out;
	fq_check_throttled(q, now);
begin:
	head = &q->new_flows;
	if (!head->first) {
		head = &q->old_flows;
		if (!head->first) {
			if (q->time_next_delayed_flow != ~0ULL)
				qdisc_watchdog_schedule_ns(&q->watchdog,
							   q->time_next_delayed_flow);
			return NULL;
		}
	}
	f = head->first;

	if (f->credit <= 0) {
		f->credit += q->quantum;
		head->first = f->next;
		fq_flow_add_tail(&q->old_flows, f);
		goto begin;
	}

	skb = f->head;
	if (unlikely(skb && now < f->time_next_packet &&
		     !skb_is_tcp_pure_ack(skb))) {
		head->first = f->next;
		fq_flow_set_throttled(q, f);
		goto begin;
	}

	skb = fq_dequeue_head(sch, f);
	if (!skb) {
		head->first = f->next;
		/* force a pass through old_flows to prevent starvation */
		if ((head == &q->new_flows) && q->old_flows.first) {
			fq_flow_add_tail(&q->old_flows, f);
		} else {
			fq_flow_set_detached(f);
			q->inactive_flows++;
		}
		goto begin;
	}
	prefetch(&skb->end);
	f->credit -= qdisc_pkt_len(skb);

	if (!q->rate_enable)
		goto out;

	/* Do not pace locally generated ack packets */
	if (skb_is_tcp_pure_ack(skb))
		goto out;

	rate = q->flow_max_rate;
	if (skb->sk)
		rate = min(skb->sk->sk_pacing_rate, rate);

	if (rate <= q->low_rate_threshold) {
		f->credit = 0;
		plen = qdisc_pkt_len(skb);
	} else {
		plen = max(qdisc_pkt_len(skb), q->quantum);
		if (f->credit > 0)
			goto out;
	}
	if (rate != ~0U) {
		u64 len = (u64)plen * NSEC_PER_SEC;

		if (likely(rate))
			do_div(len, rate);
		/* Since socket rate can change later,
		 * clamp the delay to 1 second.
		 * Really, providers of too big packets should be fixed !
		 */
		if (unlikely(len > NSEC_PER_SEC)) {
			len = NSEC_PER_SEC;
			q->stat_pkts_too_long++;
		}
		/* Account for schedule/timers drifts.
		 * f->time_next_packet was set when prior packet was sent,
		 * and current time (@now) can be too late by tens of us.
		 */
		if (f->time_next_packet)
			len -= min(len/2, now - f->time_next_packet);
		f->time_next_packet = now + len;
	}
out:
	qdisc_bstats_update(sch, skb);
	return skb;
}
Example #23
0
static int st_rtc_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct st_rtc *rtc;
	struct resource *res;
	uint32_t mode;
	int ret = 0;

	ret = of_property_read_u32(np, "st,lpc-mode", &mode);
	if (ret) {
		dev_err(&pdev->dev, "An LPC mode must be provided\n");
		return -EINVAL;
	}

	/* LPC can either run as a Clocksource or in RTC or WDT mode */
	if (mode != ST_LPC_MODE_RTC)
		return -ENODEV;

	rtc = devm_kzalloc(&pdev->dev, sizeof(struct st_rtc), GFP_KERNEL);
	if (!rtc)
		return -ENOMEM;

	rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
	if (IS_ERR(rtc->rtc_dev))
		return PTR_ERR(rtc->rtc_dev);

	spin_lock_init(&rtc->lock);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	rtc->ioaddr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(rtc->ioaddr))
		return PTR_ERR(rtc->ioaddr);

	rtc->irq = irq_of_parse_and_map(np, 0);
	if (!rtc->irq) {
		dev_err(&pdev->dev, "IRQ missing or invalid\n");
		return -EINVAL;
	}

	ret = devm_request_irq(&pdev->dev, rtc->irq, st_rtc_handler, 0,
			       pdev->name, rtc);
	if (ret) {
		dev_err(&pdev->dev, "Failed to request irq %i\n", rtc->irq);
		return ret;
	}

	enable_irq_wake(rtc->irq);
	disable_irq(rtc->irq);

	rtc->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(rtc->clk)) {
		dev_err(&pdev->dev, "Unable to request clock\n");
		return PTR_ERR(rtc->clk);
	}

	clk_prepare_enable(rtc->clk);

	rtc->clkrate = clk_get_rate(rtc->clk);
	if (!rtc->clkrate) {
		dev_err(&pdev->dev, "Unable to fetch clock rate\n");
		return -EINVAL;
	}

	device_set_wakeup_capable(&pdev->dev, 1);

	platform_set_drvdata(pdev, rtc);

	rtc->rtc_dev->ops = &st_rtc_ops;
	rtc->rtc_dev->range_max = U64_MAX;
	do_div(rtc->rtc_dev->range_max, rtc->clkrate);

	ret = rtc_register_device(rtc->rtc_dev);
	if (ret) {
		clk_disable_unprepare(rtc->clk);
		return ret;
	}

	return 0;
}
Example #24
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's
			 * scheduler lock after setting TIF_FREEZE, it's
			 * guaranteed that either we see TASK_RUNNING or
			 * try_to_stop() after schedule() in ptrace/signal
			 * stop sees TIF_FREEZE.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
		       wakeup ? "aborted" : "failed",
		       elapsed_csecs / 100, elapsed_csecs % 100,
		       todo - wq_busy, wq_busy);

		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (!wakeup && !freezer_should_skip(p) &&
			    p != current && freezing(p) && !frozen(p))
				sched_show_task(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
	enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
	enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
{
	int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0;
	uint16_t raw_data;
	int32_t rsense_u_ohms = 0;
	int64_t result_current;

	if (qpnp_iadc_is_valid(iadc) < 0)
		return -EPROBE_DEFER;

	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
		pr_err("raw offset errors! run iadc calibration again\n");
		return -EINVAL;
	}

	mutex_lock(&iadc->adc->adc_lock);

	if (iadc->iadc_poll_eoc) {
		pr_debug("acquiring iadc eoc wakelock\n");
		pm_stay_awake(iadc->dev);
	}

	iadc->iadc_mode_sel = true;

	rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel);
	if (rc) {
		pr_err("Configuring VADC failed\n");
		goto fail;
	}

	rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel);
	if (rc < 0) {
		pr_err("qpnp adc result read failed with %d\n", rc);
		goto fail_release_vadc;
	}

	rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
	pr_debug("current raw:0%x and rsense:%d\n",
			raw_data, rsense_n_ohms);
	rsense_u_ohms = rsense_n_ohms/1000;
	num = raw_data - iadc->adc->calib.offset_raw;
	if (num < 0) {
		sign = 1;
		num = -num;
	}

	i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/
		(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
	result_current = i_result->result_uv;
	result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
	/* Intentional fall through. Process the result w/o comp */
	if (!rsense_u_ohms) {
		pr_err("rsense error=%d\n", rsense_u_ohms);
		goto fail_release_vadc;
	}

	do_div(result_current, rsense_u_ohms);

	if (sign) {
		i_result->result_uv = -i_result->result_uv;
		result_current = -result_current;
	}
	result_current *= -1;
	rc = qpnp_iadc_comp_result(iadc, &result_current);
	if (rc < 0)
		pr_err("Error during compensating the IADC\n");
	rc = 0;
	result_current *= -1;

	i_result->result_ua = (int32_t) result_current;

fail_release_vadc:
	rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel,
							v_result);
	if (rc)
		pr_err("Releasing VADC failed\n");
fail:
	iadc->iadc_mode_sel = false;

	if (iadc->iadc_poll_eoc) {
		pr_debug("releasing iadc eoc wakelock\n");
		pm_relax(iadc->dev);
	}
	mutex_unlock(&iadc->adc->adc_lock);

	return rc;
}
Example #26
0
/* adjtimex mainly allows reading (and writing, if superuser) of
 * kernel time-keeping variables. used by xntpd.
 */
int do_adjtimex(struct timex *txc)
{
	long mtemp, save_adjust, rem;
	s64 freq_adj, temp64;
	int result;

	/* In order to modify anything, you gotta be super-user! */
	if (txc->modes && !capable(CAP_SYS_TIME))
		return -EPERM;

	/* Now we validate the data before disabling interrupts */

	if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
	  /* singleshot must not be used with any other mode bits */
		if (txc->modes != ADJ_OFFSET_SINGLESHOT &&
					txc->modes != ADJ_OFFSET_SS_READ)
			return -EINVAL;
	}

	if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
	  /* adjustment Offset limited to +- .512 seconds */
		if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE )
			return -EINVAL;

	/* if the quartz is off by more than 10% something is VERY wrong ! */
	if (txc->modes & ADJ_TICK)
		if (txc->tick <  900000/USER_HZ ||
		    txc->tick > 1100000/USER_HZ)
			return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	result = time_state;	/* mostly `TIME_OK' */

	/* Save for later - semantics of adjtime is to return old value */
	save_adjust = time_adjust;

#if 0	/* STA_CLOCKERR is never set yet */
	time_status &= ~STA_CLOCKERR;		/* reset STA_CLOCKERR */
#endif
	/* If there are input parameters, then process them */
	if (txc->modes)
	{
	    if (txc->modes & ADJ_STATUS)	/* only set allowed bits */
		time_status =  (txc->status & ~STA_RONLY) |
			      (time_status & STA_RONLY);

	    if (txc->modes & ADJ_FREQUENCY) {	/* p. 22 */
		if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) {
		    result = -EINVAL;
		    goto leave;
		}
		time_freq = ((s64)txc->freq * NSEC_PER_USEC)
				>> (SHIFT_USEC - SHIFT_NSEC);
	    }

	    if (txc->modes & ADJ_MAXERROR) {
		if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) {
		    result = -EINVAL;
		    goto leave;
		}
		time_maxerror = txc->maxerror;
	    }

	    if (txc->modes & ADJ_ESTERROR) {
		if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) {
		    result = -EINVAL;
		    goto leave;
		}
		time_esterror = txc->esterror;
	    }

	    if (txc->modes & ADJ_TIMECONST) {	/* p. 24 */
		if (txc->constant < 0) {	/* NTP v4 uses values > 6 */
		    result = -EINVAL;
		    goto leave;
		}
		time_constant = min(txc->constant + 4, (long)MAXTC);
	    }

	    if (txc->modes & ADJ_OFFSET) {	/* values checked earlier */
		if (txc->modes == ADJ_OFFSET_SINGLESHOT) {
		    /* adjtime() is independent from ntp_adjtime() */
		    time_adjust = txc->offset;
		}
		else if (time_status & STA_PLL) {
		    time_offset = txc->offset * NSEC_PER_USEC;

		    /*
		     * Scale the phase adjustment and
		     * clamp to the operating range.
		     */
		    time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC);
		    time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC);

		    /*
		     * Select whether the frequency is to be controlled
		     * and in which mode (PLL or FLL). Clamp to the operating
		     * range. Ugly multiply/divide should be replaced someday.
		     */

		    if (time_status & STA_FREQHOLD || time_reftime == 0)
		        time_reftime = xtime.tv_sec;
		    mtemp = xtime.tv_sec - time_reftime;
		    time_reftime = xtime.tv_sec;

		    freq_adj = time_offset * mtemp;
		    freq_adj = shift_right(freq_adj, time_constant * 2 +
					   (SHIFT_PLL + 2) * 2 - SHIFT_NSEC);
		    if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
			temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL);
			if (time_offset < 0) {
			    temp64 = -temp64;
			    do_div(temp64, mtemp);
			    freq_adj -= temp64;
			} else {
			    do_div(temp64, mtemp);
			    freq_adj += temp64;
			}
		    }
		    freq_adj += time_freq;
		    freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
		    time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
		    time_offset = div_long_long_rem_signed(time_offset,
							   NTP_INTERVAL_FREQ,
							   &rem);
		    time_offset <<= SHIFT_UPDATE;
		} /* STA_PLL */
	    } /* txc->modes & ADJ_OFFSET */
	    if (txc->modes & ADJ_TICK)
		tick_usec = txc->tick;

	    if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
		    ntp_update_frequency();
	} /* txc->modes */
leave:	if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
		result = TIME_ERROR;

	if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
			(txc->modes == ADJ_OFFSET_SS_READ))
		txc->offset = save_adjust;
	else
		txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
	    			NTP_INTERVAL_FREQ / 1000;
	txc->freq	   = (time_freq / NSEC_PER_USEC) <<
				(SHIFT_USEC - SHIFT_NSEC);
	txc->maxerror	   = time_maxerror;
	txc->esterror	   = time_esterror;
	txc->status	   = time_status;
	txc->constant	   = time_constant;
	txc->precision	   = 1;
	txc->tolerance	   = MAXFREQ;
	txc->tick	   = tick_usec;

	/* PPS is not implemented, so these are zero */
	txc->ppsfreq	   = 0;
	txc->jitter	   = 0;
	txc->shift	   = 0;
	txc->stabil	   = 0;
	txc->jitcnt	   = 0;
	txc->calcnt	   = 0;
	txc->errcnt	   = 0;
	txc->stbcnt	   = 0;
	write_sequnlock_irq(&xtime_lock);
	do_gettimeofday(&txc->time);
	notify_cmos_timer();
	return(result);
}
Example #27
0
void
number(long num, int base, int size, int precision
       ,int type)
{
    char c, sign, tmp[36];
    const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz";
    int i;

    if (type & LARGE)
	digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
    if (type & LEFT)
	type &= ~ZEROPAD;
    if (base < 2 || base > 36)
	return;
    c = (type & ZEROPAD) ? '0' : ' ';
    sign = 0;
    if (type & SIGN) {
	if (num < 0) {
	    sign = '-';
	    num = -num;
	    size--;
	} else if (type & PLUS) {
	    sign = '+';
	    size--;
	} else if (type & SPACE) {
	    sign = ' ';
	    size--;
	}
    }
    if (type & SPECIAL) {
	if (base == 16)
	    size -= 2;
	else if (base == 8)
	    size--;
    }
    i = 0;
    if (num == 0)
	tmp[i++] = '0';
    else
	while (num != 0)
	    tmp[i++] = digits[do_div(num, base)];
    if (i > precision)
	precision = i;
    size -= precision;
    if (!(type & (ZEROPAD + LEFT)))
	while (size-- > 0)
	    bing(' ');
    if (sign)
	bing(sign);
    if (type & SPECIAL) {
	if (base == 8) {
	    bing('0');
	} else if (base == 16) {
	    bing('0');
	    bing(digits[33]);
	}
    }
    if (!(type & LEFT))
	while (size-- > 0)
	    bing(c);
    while (i < precision--)
	bing('0');
    while (i-- > 0)
	bing(tmp[i]);
    while (size-- > 0)
	bing(' ');
    return;
}
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's scheduler lock, it's
			 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
			 * transition can't race with task state testing here.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the refrigerator.  Start with an initial
		 * 1 ms sleep followed by exponential backoff until 8 ms.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
					"(%d tasks refusing to freeze, wq_busy=%d):\n",
					wakeup ? "aborted" : "failed",
					elapsed_msecs / 1000, elapsed_msecs % 1000,
					todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p))
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
Example #29
0
static void lirc_lirc_irq_handler(void *blah)
{
	ktime_t kt, delkt;
	static ktime_t lastkt;
	static int init;
	long signal;
	int data;
	unsigned int level, newlevel;
	unsigned int timeout;

	if (!is_open)
		return;

	if (!is_claimed)
		return;

#if 0
	/* disable interrupt */
	  disable_irq(irq);
	  out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ) & (~LP_PINTEN));
#endif
	if (check_pselecd && (in(1) & LP_PSELECD))
		return;

#ifdef LIRC_TIMER
	if (init) {
		kt = ktime_get();

		delkt = ktime_sub(kt, lastkt);
		if (ktime_compare(delkt, ktime_set(15, 0)) > 0)
			/* really long time */
			data = PULSE_MASK;
		else
			data = (int)(ktime_to_us(delkt) + LIRC_SFH506_DELAY);

		rbuf_write(data); /* space */
	} else {
		if (timer == 0) {
			/*
			 * wake up; we'll lose this signal, but it will be
			 * garbage if the device is turned on anyway
			 */
			timer = init_lirc_timer();
			/* enable_irq(irq); */
			return;
		}
		init = 1;
	}

	timeout = timer / 10;	/* timeout after 1/10 sec. */
	signal = 1;
	level = lirc_get_timer();
	do {
		newlevel = lirc_get_timer();
		if (level == 0 && newlevel != 0)
			signal++;
		level = newlevel;

		/* giving up */
		if (signal > timeout
		    || (check_pselecd && (in(1) & LP_PSELECD))) {
			signal = 0;
			pr_notice("timeout\n");
			break;
		}
	} while (lirc_get_signal());

	if (signal != 0) {
		/* adjust value to usecs */
		__u64 helper;

		helper = ((__u64)signal) * 1000000;
		do_div(helper, timer);
		signal = (long)helper;

		if (signal > LIRC_SFH506_DELAY)
			data = signal - LIRC_SFH506_DELAY;
		else
			data = 1;
		rbuf_write(PULSE_BIT | data); /* pulse */
	}
	lastkt = ktime_get();
#else
	/* add your code here */
#endif

	wake_up_interruptible(&lirc_wait);

	/* enable interrupt */
	/*
	  enable_irq(irq);
	  out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN);
	*/
}
int
xfs_iomap_write_direct(
	xfs_inode_t	*ip,
	xfs_off_t	offset,
	size_t		count,
	int		flags,
	xfs_bmbt_irec_t *ret_imap,
	int		*nmaps,
	int		found)
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_iocore_t	*io = &ip->i_iocore;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
	xfs_filblks_t	count_fsb;
	xfs_fsblock_t	firstfsb;
	int		nimaps;
	int		error;
	int		bmapi_flag;
	int		quota_flag;
	int		rt;
	xfs_trans_t	*tp;
	xfs_bmbt_irec_t imap;
	xfs_bmap_free_t free_list;
	xfs_filblks_t	qblocks, resblks;
	int		committed;
	int		resrtextents;

	/*
	 * Make sure that the dquots are there. This doesn't hold
	 * the ilock across a disk read.
	 */
	error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED);
	if (error)
		return XFS_ERROR(error);

	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
	count_fsb = last_fsb - offset_fsb;
	if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
		xfs_fileoff_t	map_last_fsb;

		map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
		if (map_last_fsb < last_fsb) {
			last_fsb = map_last_fsb;
			count_fsb = last_fsb - offset_fsb;
		}
		ASSERT(count_fsb > 0);
	}

	/*
	 * Determine if reserving space on the data or realtime partition.
	 */
	if ((rt = XFS_IS_REALTIME_INODE(ip))) {
		xfs_extlen_t	extsz;

		if (!(extsz = ip->i_d.di_extsize))
			extsz = mp->m_sb.sb_rextsize;
		resrtextents = qblocks = (count_fsb + extsz - 1);
		do_div(resrtextents, mp->m_sb.sb_rextsize);
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
		quota_flag = XFS_QMOPT_RES_RTBLKS;
	} else {
		resrtextents = 0;
		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb);
		quota_flag = XFS_QMOPT_RES_REGBLKS;
	}

	/*
	 * Allocate and setup the transaction
	 */
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
	error = xfs_trans_reserve(tp, resblks,
			XFS_WRITE_LOG_RES(mp), resrtextents,
			XFS_TRANS_PERM_LOG_RES,
			XFS_WRITE_LOG_COUNT);

	/*
	 * Check for running out of space, note: need lock to return
	 */
	if (error)
		xfs_trans_cancel(tp, 0);
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	if (error)
		goto error_out;

	if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) {
		error = (EDQUOT);
		goto error1;
	}

	bmapi_flag = XFS_BMAPI_WRITE;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_ihold(tp, ip);

	if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt))
		bmapi_flag |= XFS_BMAPI_PREALLOC;

	/*
	 * Issue the bmapi() call to allocate the blocks
	 */
	XFS_BMAP_INIT(&free_list, &firstfsb);
	nimaps = 1;
	error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
		bmapi_flag, &firstfsb, 0, &imap, &nimaps, &free_list);
	if (error)
		goto error0;

	/*
	 * Complete the transaction
	 */
	error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
	if (error)
		goto error0;
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
	if (error)
		goto error_out;

	/*
	 * Copy any maps to caller's array and return any error.
	 */
	if (nimaps == 0) {
		error = (ENOSPC);
		goto error_out;
	}

	*ret_imap = imap;
	*nmaps = 1;
	if ( !(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {
                cmn_err(CE_PANIC,"Access to block zero:  fs <%s> inode: %lld "
                        "start_block : %llx start_off : %llx blkcnt : %llx "
                        "extent-state : %x \n",
                        (ip->i_mount)->m_fsname,
                        (long long)ip->i_ino,
                        ret_imap->br_startblock, ret_imap->br_startoff,
                        ret_imap->br_blockcount,ret_imap->br_state);
        }
	return 0;

error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
	xfs_bmap_cancel(&free_list);
	XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);

error1:	/* Just cancel transaction */
	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
	*nmaps = 0;	/* nothing set-up here */

error_out:
	return XFS_ERROR(error);
}