static void cvm_oct_adjust_link(struct net_device *dev)
{
	struct octeon_ethernet *priv = netdev_priv(dev);
	cvmx_helper_link_info_t link_info;

	if (priv->last_link != priv->phydev->link) {
		priv->last_link = priv->phydev->link;
		link_info.u64 = 0;
		link_info.s.link_up = priv->last_link ? 1 : 0;
		link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
		link_info.s.speed = priv->phydev->speed;
		cvmx_helper_link_set( priv->port, link_info);
		if (priv->last_link) {
			netif_carrier_on(dev);
			if (priv->queue != -1)
				printk_ratelimited("%s: %u Mbps %s duplex, "
						   "port %2d, queue %2d\n",
						   dev->name, priv->phydev->speed,
						   priv->phydev->duplex ?
						   "Full" : "Half",
						   priv->port, priv->queue);
			else
				printk_ratelimited("%s: %u Mbps %s duplex, "
						   "port %2d, POW\n",
						   dev->name, priv->phydev->speed,
						   priv->phydev->duplex ?
						   "Full" : "Half",
						   priv->port);
		} else {
			netif_carrier_off(dev);
			printk_ratelimited("%s: Link down\n", dev->name);
		}
	}
}
Exemplo n.º 2
0
static void cvm_oct_sgmii_poll(struct net_device *dev)
{
    struct octeon_ethernet *priv = netdev_priv(dev);
    cvmx_helper_link_info_t link_info;

    link_info = cvmx_helper_link_get(priv->port);
    if (link_info.u64 == priv->link_info)
        return;

    link_info = cvmx_helper_link_autoconf(priv->port);
    priv->link_info = link_info.u64;

    /* Tell Linux */
    if (link_info.s.link_up) {

        if (!netif_carrier_ok(dev))
            netif_carrier_on(dev);
        if (priv->queue != -1)
            printk_ratelimited
            ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
             dev->name, link_info.s.speed,
             (link_info.s.full_duplex) ? "Full" : "Half",
             priv->port, priv->queue);
        else
            printk_ratelimited
            ("%s: %u Mbps %s duplex, port %2d, POW\n",
             dev->name, link_info.s.speed,
             (link_info.s.full_duplex) ? "Full" : "Half",
             priv->port);
    } else {
        if (netif_carrier_ok(dev))
            netif_carrier_off(dev);
        printk_ratelimited("%s: Link down\n", dev->name);
    }
}
Exemplo n.º 3
0
static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
			    struct inode *inode,
			    ext4_direction_t rw,
			    pgoff_t index,
			    struct page *src_page,
			    struct page *dest_page)

{
	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
	struct ablkcipher_request *req = NULL;
	DECLARE_EXT4_COMPLETION_RESULT(ecr);
	struct scatterlist dst, src;
	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
	int res = 0;

	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		printk_ratelimited(KERN_ERR
				   "%s: crypto_request_alloc() failed\n",
				   __func__);
		return -ENOMEM;
	}
	ablkcipher_request_set_callback(
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		ext4_crypt_complete, &ecr);

	BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
	memcpy(xts_tweak, &index, sizeof(index));
	memset(&xts_tweak[sizeof(index)], 0,
	       EXT4_XTS_TWEAK_SIZE - sizeof(index));

	sg_init_table(&dst, 1);
	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
	sg_init_table(&src, 1);
	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
				     xts_tweak);
	if (rw == EXT4_DECRYPT)
		res = crypto_ablkcipher_decrypt(req);
	else
		res = crypto_ablkcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		BUG_ON(req->base.data != &ecr);
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	ablkcipher_request_free(req);
	if (res) {
		printk_ratelimited(
			KERN_ERR
			"%s: crypto_ablkcipher_encrypt() returned %d\n",
			__func__, res);
		return res;
	}
	return 0;
}
Exemplo n.º 4
0
/*
 * ext4_fname_decrypt()
 *	This function decrypts the input filename, and returns
 *	the length of the plaintext.
 *	Errors are returned as negative numbers.
 *	We trust the caller to allocate sufficient memory to oname string.
 */
static int ext4_fname_decrypt(struct inode *inode,
			      const struct ext4_str *iname,
			      struct ext4_str *oname)
{
	struct ext4_str tmp_in[2], tmp_out[1];
	struct ablkcipher_request *req = NULL;
	DECLARE_EXT4_COMPLETION_RESULT(ecr);
	struct scatterlist src_sg, dst_sg;
	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
	int res = 0;
	char iv[EXT4_CRYPTO_BLOCK_SIZE];
	unsigned lim = max_name_len(inode);

	if (iname->len <= 0 || iname->len > lim)
		return -EIO;

	tmp_in[0].name = iname->name;
	tmp_in[0].len = iname->len;
	tmp_out[0].name = oname->name;

	/* Allocate request */
	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		printk_ratelimited(
		    KERN_ERR "%s: crypto_request_alloc() failed\n",  __func__);
		return -ENOMEM;
	}
	ablkcipher_request_set_callback(req,
		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		ext4_dir_crypt_complete, &ecr);

	/* Initialize IV */
	memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);

	/* Create encryption request */
	sg_init_one(&src_sg, iname->name, iname->len);
	sg_init_one(&dst_sg, oname->name, oname->len);
	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
	res = crypto_ablkcipher_decrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		BUG_ON(req->base.data != &ecr);
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	ablkcipher_request_free(req);
	if (res < 0) {
		printk_ratelimited(
		    KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
		    __func__, res);
		return res;
	}

	oname->len = strnlen(oname->name, iname->len);
	return oname->len;
}
Exemplo n.º 5
0
static int do_page_crypto(struct inode *inode,
			fscrypt_direction_t rw, pgoff_t index,
			struct page *src_page, struct page *dest_page,
			gfp_t gfp_flags)
{
	struct {
		__le64 index;
		u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
	} xts_tweak;
	struct skcipher_request *req = NULL;
	DECLARE_FS_COMPLETION_RESULT(ecr);
	struct scatterlist dst, src;
	struct fscrypt_info *ci = inode->i_crypt_info;
	struct crypto_skcipher *tfm = ci->ci_ctfm;
	int res = 0;

	req = skcipher_request_alloc(tfm, gfp_flags);
	if (!req) {
		printk_ratelimited(KERN_ERR
				"%s: crypto_request_alloc() failed\n",
				__func__);
		return -ENOMEM;
	}

	skcipher_request_set_callback(
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		page_crypt_complete, &ecr);

	BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
	xts_tweak.index = cpu_to_le64(index);
	memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));

	sg_init_table(&dst, 1);
	sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
	sg_init_table(&src, 1);
	sg_set_page(&src, src_page, PAGE_SIZE, 0);
	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
	if (rw == FS_DECRYPT)
		res = crypto_skcipher_decrypt(req);
	else
		res = crypto_skcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		BUG_ON(req->base.data != &ecr);
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	skcipher_request_free(req);
	if (res) {
		printk_ratelimited(KERN_ERR
			"%s: crypto_skcipher_encrypt() returned %d\n",
			__func__, res);
		return res;
	}
	return 0;
}
Exemplo n.º 6
0
Arquivo: fname.c Projeto: mdamt/linux
/**
 * fname_decrypt() - decrypt a filename
 *
 * The caller must have allocated sufficient memory for the @oname string.
 *
 * Return: 0 on success, -errno on failure
 */
static int fname_decrypt(struct inode *inode,
				const struct fscrypt_str *iname,
				struct fscrypt_str *oname)
{
	struct skcipher_request *req = NULL;
	DECLARE_FS_COMPLETION_RESULT(ecr);
	struct scatterlist src_sg, dst_sg;
	struct fscrypt_info *ci = inode->i_crypt_info;
	struct crypto_skcipher *tfm = ci->ci_ctfm;
	int res = 0;
	char iv[FS_CRYPTO_BLOCK_SIZE];
	unsigned lim;

	lim = inode->i_sb->s_cop->max_namelen(inode);
	if (iname->len <= 0 || iname->len > lim)
		return -EIO;

	/* Allocate request */
	req = skcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		printk_ratelimited(KERN_ERR
			"%s: crypto_request_alloc() failed\n",  __func__);
		return -ENOMEM;
	}
	skcipher_request_set_callback(req,
		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		fname_crypt_complete, &ecr);

	/* Initialize IV */
	memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);

	/* Create decryption request */
	sg_init_one(&src_sg, iname->name, iname->len);
	sg_init_one(&dst_sg, oname->name, oname->len);
	skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
	res = crypto_skcipher_decrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	skcipher_request_free(req);
	if (res < 0) {
		printk_ratelimited(KERN_ERR
				"%s: Error (error code %d)\n", __func__, res);
		return res;
	}

	oname->len = strnlen(oname->name, iname->len);
	return 0;
}
Exemplo n.º 7
0
int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
{
	if (bd && bd == bd->bd_contains)
		return 0;

	switch (cmd) {
	case SCSI_IOCTL_GET_IDLUN:
	case SCSI_IOCTL_GET_BUS_NUMBER:
	case SCSI_IOCTL_GET_PCI:
	case SCSI_IOCTL_PROBE_HOST:
	case SG_GET_VERSION_NUM:
	case SG_SET_TIMEOUT:
	case SG_GET_TIMEOUT:
	case SG_GET_RESERVED_SIZE:
	case SG_SET_RESERVED_SIZE:
	case SG_EMULATED_HOST:
		return 0;
	case CDROM_GET_CAPABILITY:
		return -ENOIOCTLCMD;
	default:
		break;
	}
	if (capable(CAP_SYS_RAWIO))
		return 0;

	/* In particular, rule out all resets and host-specific ioctls.  */
	printk_ratelimited(KERN_WARNING
			   "%s: sending ioctl %x to a partition!\n", current->comm, cmd);

	return -ENOIOCTLCMD;
}
Exemplo n.º 8
0
asmlinkage int sys_modify_ldt(int func, void __user *ptr,
			      unsigned long bytecount)
{
	int ret = -ENOSYS;

	if (!sysctl_modify_ldt) {
		printk_ratelimited(KERN_INFO
			"Denied a call to modify_ldt() from %s[%d] (uid: %d)."
			" Adjust sysctl if this was not an exploit attempt.\n",
			current->comm, task_pid_nr(current),
			from_kuid_munged(current_user_ns(), current_uid()));
		return ret;
	}

	switch (func) {
	case 0:
		ret = read_ldt(ptr, bytecount);
		break;
	case 1:
		ret = write_ldt(ptr, bytecount, 1);
		break;
	case 2:
		ret = read_default_ldt(ptr, bytecount);
		break;
	case 0x11:
		ret = write_ldt(ptr, bytecount, 0);
		break;
	}
	return ret;
}
Exemplo n.º 9
0
static int usblp_check_status(struct usblp *usblp, int err)
{
	unsigned char status, newerr = 0;
	int error;

	mutex_lock(&usblp->mut);
	if ((error = usblp_read_status(usblp, usblp->statusbuf)) < 0) {
		mutex_unlock(&usblp->mut);
		printk_ratelimited(KERN_ERR
				"usblp%d: error %d reading printer status\n",
				usblp->minor, error);
		return 0;
	}
	status = *usblp->statusbuf;
	mutex_unlock(&usblp->mut);

	if (~status & LP_PERRORP)
		newerr = 3;
	if (status & LP_POUTPA)
		newerr = 1;
	if (~status & LP_PSELECD)
		newerr = 2;

	if (newerr != err) {
		printk(KERN_INFO "usblp%d: %s\n",
		   usblp->minor, usblp_messages[newerr]);
	}

	return newerr;
}
Exemplo n.º 10
0
/*
 * Print an buffer I/O error compatible with the fs/buffer.c.  This
 * provides compatibility with dmesg scrapers that look for a specific
 * buffer I/O error message.  We really need a unified error reporting
 * structure to userspace ala Digital Unix's uerf system, but it's
 * probably not going to happen in my lifetime, due to LKML politics...
 */
static void buffer_io_error(struct buffer_head *bh)
{
	char b[BDEVNAME_SIZE];
	printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
			bdevname(bh->b_bdev, b),
			(unsigned long long)bh->b_blocknr);
}
Exemplo n.º 11
0
unsigned long __init rtas_get_boot_time(void)
{
	int ret[8];
	int error;
	unsigned int wait_time;
	u64 max_wait_tb;

	max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
	do {
		error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);

		wait_time = rtas_busy_delay_time(error);
		if (wait_time) {
			/*                               */
			udelay(wait_time*1000);
		}
	} while (wait_time && (get_tb() < max_wait_tb));

	if (error != 0) {
		printk_ratelimited(KERN_WARNING
				   "error: reading the clock failed (%d)\n",
				   error);
		return 0;
	}

	return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
}
Exemplo n.º 12
0
int rtas_set_rtc_time(struct rtc_time *tm)
{
	int error, wait_time;
	u64 max_wait_tb;

	max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
	do {
	        error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
				  tm->tm_year + 1900, tm->tm_mon + 1,
				  tm->tm_mday, tm->tm_hour, tm->tm_min,
				  tm->tm_sec, 0);

		wait_time = rtas_busy_delay_time(error);
		if (wait_time) {
			if (in_interrupt())
				return 1;	/*                      */
			msleep(wait_time);
		}
	} while (wait_time && (get_tb() < max_wait_tb));

	if (error != 0)
		printk_ratelimited(KERN_WARNING
				   "error: setting the clock failed (%d)\n",
				   error);

        return 0;
}
Exemplo n.º 13
0
static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
{
	struct mmc_host *host = dev_id;
	struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
	int status;

	disable_irq_nosync(host->hotplug.irq);
	tasklet_schedule(&enable_detection_tasklet);

	status = mmc_cd_get_status(host);
	if (unlikely(status < 0))
		goto out;

	if (status ^ cd->status) {
		printk_ratelimited(KERN_INFO "%s: slot status change detected (%d -> %d), GPIO_ACTIVE_%s\n",
				mmc_hostname(host), cd->status, status,
				(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) ?
				"HIGH" : "LOW");
		cd->status = status;
		
		host->caps |= host->caps_uhs;
		host->redetect_cnt = 0;
		
		mmc_detect_change(host, msecs_to_jiffies(100));
	}
out:
	return IRQ_HANDLED;
}
Exemplo n.º 14
0
/* Returns the inode number of the directory entry at offset pos. If bh is
   non-NULL, it is brelse'd before. Pos is incremented. The buffer header is
   returned in bh.
   AV. Most often we do it item-by-item. Makes sense to optimize.
   AV. OK, there we go: if both bh and de are non-NULL we assume that we just
   AV. want the next entry (took one explicit de=NULL in vfat/namei.c).
   AV. It's done in fat_get_entry() (inlined), here the slow case lives.
   AV. Additionally, when we return -1 (i.e. reached the end of directory)
   AV. we make bh NULL.
 */
static int fat__get_entry(struct inode *dir, loff_t *pos,
			  struct buffer_head **bh, struct msdos_dir_entry **de)
{
	struct super_block *sb = dir->i_sb;
	sector_t phys, iblock;
	unsigned long mapped_blocks;
	int err, offset;

next:
	if (*bh)
		brelse(*bh);

	*bh = NULL;
	iblock = *pos >> sb->s_blocksize_bits;
	err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0);
	if (err || !phys)
		return -1;	/* beyond EOF or error */

	fat_dir_readahead(dir, iblock, phys);

	*bh = sb_bread(sb, phys);
	if (*bh == NULL) {
		printk_ratelimited(KERN_ERR "FAT: Directory bread(block %llu) failed\n",
		       (llu)phys);
		/* skip this block */
		*pos = (iblock + 1) << sb->s_blocksize_bits;
		goto next;
	}

	offset = *pos & (sb->s_blocksize - 1);
	*pos += sizeof(struct msdos_dir_entry);
	*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);

	return 0;
}
Exemplo n.º 15
0
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		     unsigned long r6, unsigned long r7, unsigned long r8,
		     struct pt_regs *regs)
{
	struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
	sigset_t set;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	unsigned long msr;
#endif

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	if (!access_ok(VERIFY_READ, uc, sizeof(*uc)))
		goto badframe;

	if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
		goto badframe;
	set_current_blocked(&set);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
		goto badframe;
	if (MSR_TM_ACTIVE(msr)) {
		/* We recheckpoint on return. */
		struct ucontext __user *uc_transact;
		if (__get_user(uc_transact, &uc->uc_link))
			goto badframe;
		if (restore_tm_sigcontexts(regs, &uc->uc_mcontext,
					   &uc_transact->uc_mcontext))
			goto badframe;
	}
	else
	/* Fall through, for non-TM restore */
#endif
	if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
		goto badframe;

	if (restore_altstack(&uc->uc_stack))
		goto badframe;

	set_thread_flag(TIF_RESTOREALL);
	return 0;

badframe:
#if DEBUG_SIG
	printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
	       regs, uc, &uc->uc_mcontext);
#endif
	if (show_unhandled_signals)
		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
				   current->comm, current->pid, "rt_sigreturn",
				   (long)uc, regs->nip, regs->link);

	force_sig(SIGSEGV, current);
	return 0;
}
Exemplo n.º 16
0
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
			      const char *message)
{
	if (!show_unhandled_signals)
		return;

	printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
			   level, current->comm, task_pid_nr(current),
			   message, regs->ip, regs->cs,
			   regs->sp, regs->ax, regs->si, regs->di);
}
Exemplo n.º 17
0
static inline void print_err_status(struct stk1160 *dev,
				     int packet, int status)
{
	char *errmsg = "Unknown";

	switch (status) {
	case -ENOENT:
		errmsg = "unlinked synchronuously";
		break;
	case -ECONNRESET:
		errmsg = "unlinked asynchronuously";
		break;
	case -ENOSR:
		errmsg = "Buffer error (overrun)";
		break;
	case -EPIPE:
		errmsg = "Stalled (device not responding)";
		break;
	case -EOVERFLOW:
		errmsg = "Babble (bad cable?)";
		break;
	case -EPROTO:
		errmsg = "Bit-stuff error (bad cable?)";
		break;
	case -EILSEQ:
		errmsg = "CRC/Timeout (could be anything)";
		break;
	case -ETIME:
		errmsg = "Device does not respond";
		break;
	}

	if (packet < 0)
		printk_ratelimited(KERN_WARNING "URB status %d [%s].\n",
				status, errmsg);
	else
		printk_ratelimited(KERN_INFO "URB packet %d, status %d [%s].\n",
			       packet, status, errmsg);
}
Exemplo n.º 18
0
static int link_pm_runtime_get_active(struct link_pm_data *pm_data)
{
	int ret;
	struct usb_link_device *usb_ld = pm_data->usb_ld;
	struct device *dev = &usb_ld->usbdev->dev;

	if (!usb_ld->if_usb_connected || usb_ld->ld.com_state == COM_NONE)
		return -ENODEV;

	if (pm_data->dpm_suspending) {
		printk_ratelimited(KERN_DEBUG "[MIF] Kernel in suspending "
						"try get_active later\n");
		/* during dpm_suspending..
		 * if AP get tx data, wake up. */
		wake_lock(&pm_data->l2_wake);
		return -EAGAIN;
	}

	if (dev->power.runtime_status == RPM_ACTIVE) {
		pm_data->resume_retry_cnt = 0;
		return 0;
	}

	if (!pm_data->resume_requested) {
		mif_debug("QW PM\n");
		queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, 0);
	}
	mif_debug("Wait pm\n");
	INIT_COMPLETION(pm_data->active_done);
	ret = wait_for_completion_timeout(&pm_data->active_done,
						msecs_to_jiffies(500));

	/* If usb link was disconnected while waiting ACTIVE State, usb device
	 * was removed, usb_ld->usbdev->dev is invalid and below
	 * dev->power.runtime_status is also invalid address.
	 * It will be occured LPA L3 -> AP iniated L0 -> disconnect -> link
	 * timeout
	 */
	if (!usb_ld->if_usb_connected || usb_ld->ld.com_state == COM_NONE) {
		mif_info("link disconnected after timed-out\n");
		return -ENODEV;
	}

	if (dev->power.runtime_status != RPM_ACTIVE) {
		mif_info("link_active (%d) retry\n",
						dev->power.runtime_status);
		return -EAGAIN;
	}
	mif_debug("link_active success(%d)\n", ret);
	return 0;
}
Exemplo n.º 19
0
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
        int ret[8];
	int error;
	unsigned int wait_time;
	u64 max_wait_tb;

	max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
	do {
		error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);

		wait_time = rtas_busy_delay_time(error);
		if (wait_time) {
			if (in_interrupt()) {
				memset(rtc_tm, 0, sizeof(struct rtc_time));
				printk_ratelimited(KERN_WARNING
						   "error: reading clock "
						   "would delay interrupt\n");
				return;	/*                   */
			}
			msleep(wait_time);
		}
	} while (wait_time && (get_tb() < max_wait_tb));

	if (error != 0) {
		printk_ratelimited(KERN_WARNING
				   "error: reading the clock failed (%d)\n",
				   error);
		return;
        }

	rtc_tm->tm_sec = ret[5];
	rtc_tm->tm_min = ret[4];
	rtc_tm->tm_hour = ret[3];
	rtc_tm->tm_mday = ret[2];
	rtc_tm->tm_mon = ret[1] - 1;
	rtc_tm->tm_year = ret[0] - 1900;
}
Exemplo n.º 20
0
/**
 * do_lo_send_write - helper for writing data to a loop device
 *
 * This is the slow, transforming version that needs to double buffer the
 * data as it cannot do the transformations in place without having direct
 * access to the destination pages of the backing file.
 */
static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
		loff_t pos, struct page *page)
{
	int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
			bvec->bv_offset, bvec->bv_len, pos >> 9);
	if (likely(!ret))
		return __do_lo_send_write(lo->lo_backing_file,
				page_address(page), bvec->bv_len,
				pos);
	printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
			"length %i.\n", (unsigned long long)pos, bvec->bv_len);
	if (ret > 0)
		ret = -EIO;
	return ret;
}
Exemplo n.º 21
0
/*
 * TODO(keescook): This check should just return -EPERM for all registers.
 * crosbug.com/38756
 */
static int msr_write_allowed(u32 reg)
{
	switch (reg) {
	case 0x19a:
	case 0x610:
	case 0x64c:
		/* Allowed: i915 thermal controls. */
		return 0;
	default:
		break;
	}

	/* Everything else: denied. */
	printk_ratelimited(KERN_ERR "msr: write denied: register 0x%x " \
			   "not whitelisted by driver.\n", reg);
	return -EPERM;
}
Exemplo n.º 22
0
static inline int
lo_do_transfer(struct loop_device *lo, int cmd,
	       struct page *rpage, unsigned roffs,
	       struct page *lpage, unsigned loffs,
	       int size, sector_t rblock)
{
	int ret;

	ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
	if (likely(!ret))
		return 0;

	printk_ratelimited(KERN_ERR
		"loop: Transfer error at byte offset %llu, length %i.\n",
		(unsigned long long)rblock << 9, size);
	return ret;
}
static irqreturn_t cp_dump_irq_handler(int irq, void *data)
{
	/*
	struct idpram_link_pm_data *pm_data = data;
	int val = gpio_get_value(pm_data->mdata->gpio_cp_dump_int);

	pr_info(KERN_DEBUG "MIF: <%s> val = %d\n", __func__, val);

	if (val)
		irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
	else
		irq_set_irq_type(irq, IRQF_TRIGGER_RISING);
	*/

	printk_ratelimited(KERN_DEBUG "MIF: <%s>\n", __func__);
	return IRQ_HANDLED;
}
Exemplo n.º 24
0
/**
 * __do_lo_send_write - helper for writing data to a loop device
 *
 * This helper just factors out common code between do_lo_send_direct_write()
 * and do_lo_send_write().
 */
static int __do_lo_send_write(struct file *file,
		u8 *buf, const int len, loff_t pos)
{
	ssize_t bw;
	mm_segment_t old_fs = get_fs();

	file_start_write(file);
	set_fs(get_ds());
	bw = file->f_op->write(file, buf, len, &pos);
	set_fs(old_fs);
	file_end_write(file);
	if (likely(bw == len))
		return 0;
	printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
			(unsigned long long)pos, len);
	if (bw >= 0)
		bw = -EIO;
	return bw;
}
Exemplo n.º 25
0
static int compat_drm_addmap(struct file *file, unsigned int cmd,
			     unsigned long arg)
{
	drm_map32_t __user *argp = (void __user *)arg;
	drm_map32_t m32;
	struct drm_map __user *map;
	int err;
	void *handle;

	if (copy_from_user(&m32, argp, sizeof(m32)))
		return -EFAULT;

	map = compat_alloc_user_space(sizeof(*map));
	if (!map)
		return -EFAULT;
	if (__put_user(m32.offset, &map->offset)
	    || __put_user(m32.size, &map->size)
	    || __put_user(m32.type, &map->type)
	    || __put_user(m32.flags, &map->flags))
		return -EFAULT;

	err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
	if (err)
		return err;

	if (__get_user(m32.offset, &map->offset)
	    || __get_user(m32.mtrr, &map->mtrr)
	    || __get_user(handle, &map->handle))
		return -EFAULT;

	m32.handle = (unsigned long)handle;
	if (m32.handle != (unsigned long)handle)
		printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
				   " %p for type %d offset %x\n",
				   handle, m32.type, m32.offset);

	if (copy_to_user(argp, &m32, sizeof(m32)))
		return -EFAULT;

	return 0;
}
Exemplo n.º 26
0
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
	struct iov_iter i;
	ssize_t bw;

	iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);

	file_start_write(file);
	bw = vfs_iter_write(file, &i, ppos);
	file_end_write(file);

	if (likely(bw ==  bvec->bv_len))
		return 0;

	printk_ratelimited(KERN_ERR
		"loop: Write error at byte offset %llu, length %i.\n",
		(unsigned long long)*ppos, bvec->bv_len);
	if (bw >= 0)
		bw = -EIO;
	return bw;
}
Exemplo n.º 27
0
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		     unsigned long r6, unsigned long r7, unsigned long r8,
		     struct pt_regs *regs)
{
	struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
	sigset_t set;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	if (!access_ok(VERIFY_READ, uc, sizeof(*uc)))
		goto badframe;

	if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
		goto badframe;
	set_current_blocked(&set);
	if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
		goto badframe;

	/* do_sigaltstack expects a __user pointer and won't modify
	 * what's in there anyway
	 */
	do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]);

	set_thread_flag(TIF_RESTOREALL);
	return 0;

badframe:
#if DEBUG_SIG
	printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
	       regs, uc, &uc->uc_mcontext);
#endif
	if (show_unhandled_signals)
		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
				   current->comm, current->pid, "rt_sigreturn",
				   (long)uc, regs->nip, regs->link);

	force_sig(SIGSEGV, current);
	return 0;
}
int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
{
	if (bd && bd == bd->bd_contains)
		return 0;

	/* Actually none of these is particularly useful on a partition,
	 * but they are safe.
	 */
	switch (cmd) {
	case SCSI_IOCTL_GET_IDLUN:
	case SCSI_IOCTL_GET_BUS_NUMBER:
	case SCSI_IOCTL_GET_PCI:
	case SCSI_IOCTL_PROBE_HOST:
	case SG_GET_VERSION_NUM:
	case SG_SET_TIMEOUT:
	case SG_GET_TIMEOUT:
	case SG_GET_RESERVED_SIZE:
	case SG_SET_RESERVED_SIZE:
	case SG_EMULATED_HOST:
		return 0;
	case CDROM_GET_CAPABILITY:
		/* Keep this until we remove the printk below.  udev sends it
		 * and we do not want to spam dmesg about it.   CD-ROMs do
		 * not have partitions, so we get here only for disks.
		 */
		return -ENOTTY;
	default:
		break;
	}

	if (capable(CAP_SYS_RAWIO))
		return 0;

	/* In particular, rule out all resets and host-specific ioctls.  */
	printk_ratelimited(KERN_WARNING
			   "%s: sending ioctl %x to a partition!\n", current->comm, cmd);

	return -ENOTTY;
}
Exemplo n.º 29
0
/* some work can't be done in tasklets, so we use keventd
 *
 * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
 * but tasklet_schedule() doesn't.  hope the failure is rare.
 */
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
	set_bit (work, &dev->flags);
	if (!schedule_work (&dev->kevent)) {
	// BEGIN OUYA
	// Rate limit these error messages.  The allocation failure that triggers this message
	// is already rate limited, but under heavy cpu load, the number of times that
	// usbnet attempts to defer the kevent becomes too excessive to print each time.
	// (ex: >5000 lines in the kernel log for a single "batch" of failures - this causes
	// massive system slowdown).
	//
	// Note: I'm keeping the general format that netdev_err translates into, but I can't seem to access the bus_id
	// field, and so this message does not have it.  Everything else is the same as netdev_err
        printk_ratelimited(KERN_ERR "%s: " "%s: ""kevent %d may have been dropped\n" ,
                        dev_driver_string((dev->net)->dev.parent),
                        netdev_name(dev->net),
                        work);
//		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
    // END OUYA
	}
	else
		netdev_dbg(dev->net, "kevent %d scheduled\n", work);
}
u32 mxr_irq_underrun_handle(struct mxr_device *mdev, u32 val)
{
	if (val & MXR_INT_STATUS_MX0_VIDEO) {
		printk_ratelimited("mixer0 video layer underrun occur\n");
		val |= MXR_INT_STATUS_MX0_VIDEO;
	} else if (val & MXR_INT_STATUS_MX0_GRP0) {
		printk_ratelimited("mixer0 graphic0 layer underrun occur\n");
		val |= MXR_INT_STATUS_MX0_GRP0;
	} else if (val & MXR_INT_STATUS_MX0_GRP1) {
		printk_ratelimited("mixer0 graphic1 layer underrun occur\n");
		val |= MXR_INT_STATUS_MX0_GRP1;
	} else if (val & MXR_INT_STATUS_MX1_VIDEO) {
		printk_ratelimited("mixer1 video layer underrun occur\n");
		val |= MXR_INT_STATUS_MX1_VIDEO;
	} else if (val & MXR_INT_STATUS_MX1_GRP0) {
		printk_ratelimited("mixer1 graphic0 layer underrun occur\n");
		val |= MXR_INT_STATUS_MX1_GRP0;
	} else if (val & MXR_INT_STATUS_MX1_GRP1) {
		printk_ratelimited("mixer1 graphic1 layer underrun occur\n");
		val |= MXR_INT_STATUS_MX1_GRP1;
	}

	return val;
}