Exemple #1
0
int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
                       sigset_t *set, struct pt_regs *regs)
{
    /* Handler is *really* a pointer to the function descriptor for
     * the signal routine.  The first entry in the function
     * descriptor is the entry address of signal and the second
     * entry is the TOC value we need to use.
     */
    func_descr_t __user *funct_desc_ptr;
    struct rt_sigframe __user *frame;
    unsigned long newsp = 0;
    long err = 0;

    frame = get_sigframe(ka, regs, sizeof(*frame), 0);
    if (unlikely(frame == NULL))
        goto badframe;

    err |= __put_user(&frame->info, &frame->pinfo);
    err |= __put_user(&frame->uc, &frame->puc);
    err |= copy_siginfo_to_user(&frame->info, info);
    if (err)
        goto badframe;

    /* Create the ucontext.  */
    err |= __put_user(0, &frame->uc.uc_flags);
    err |= __put_user(0, &frame->uc.uc_link);
    err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
    err |= __put_user(sas_ss_flags(regs->gpr[1]),
                      &frame->uc.uc_stack.ss_flags);
    err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
    err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL,
                            (unsigned long)ka->sa.sa_handler, 1);
    err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
    if (err)
        goto badframe;

    /* Make sure signal handler doesn't get spurious FP exceptions */
    current->thread.fpscr.val = 0;

    /* Set up to return from userspace. */
    if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
        regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
    } else {
        err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
        if (err)
            goto badframe;
        regs->link = (unsigned long) &frame->tramp[0];
    }
    funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler;

    /* Allocate a dummy caller frame for the signal handler. */
    newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
    err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);

    /* Set up "regs" so we "return" to the signal handler. */
    err |= get_user(regs->nip, &funct_desc_ptr->entry);
    /* enter the signal handler in big-endian mode */
    regs->msr &= ~MSR_LE;
    regs->gpr[1] = newsp;
    err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
    regs->gpr[3] = signr;
    regs->result = 0;
    if (ka->sa.sa_flags & SA_SIGINFO) {
        err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
        err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
        regs->gpr[6] = (unsigned long) frame;
    } else {
        regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
    }
    if (err)
        goto badframe;

    return 1;

badframe:
#if DEBUG_SIG
    printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
           regs, frame, newsp);
#endif
    if (show_unhandled_signals && printk_ratelimit())
        printk(regs->msr & MSR_SF ? fmt64 : fmt32,
               current->comm, current->pid, "setup_rt_frame",
               (long)frame, regs->nip, regs->link);

    force_sigsegv(signr, current);
    return 0;
}
Exemple #2
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);

	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}

		p->se.islowprio = 0;	/* 中谷: 追加 */
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct usblp *usblp = file->private_data;
	int length, err, i;
	unsigned char newChannel;
	int status;
	int twoints[2];
	int retval = 0;

	mutex_lock(&usblp->mut);
	if (!usblp->present) {
		retval = -ENODEV;
		goto done;
	}

	dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
		_IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd));

	if (_IOC_TYPE(cmd) == 'P')	/* new-style ioctl number */

		switch (_IOC_NR(cmd)) {

		case IOCNR_GET_DEVICE_ID: /* get the DEVICE_ID string */
			if (_IOC_DIR(cmd) != _IOC_READ) {
				retval = -EINVAL;
				goto done;
			}

			length = usblp_cache_device_id_string(usblp);
			if (length < 0) {
				retval = length;
				goto done;
			}
			if (length > _IOC_SIZE(cmd))
				length = _IOC_SIZE(cmd); /* truncate */

			if (copy_to_user((void __user *) arg,
					usblp->device_id_string,
					(unsigned long) length)) {
				retval = -EFAULT;
				goto done;
			}

			break;

		case IOCNR_GET_PROTOCOLS:
			if (_IOC_DIR(cmd) != _IOC_READ ||
			    _IOC_SIZE(cmd) < sizeof(twoints)) {
				retval = -EINVAL;
				goto done;
			}

			twoints[0] = usblp->current_protocol;
			twoints[1] = 0;
			for (i = USBLP_FIRST_PROTOCOL;
			     i <= USBLP_LAST_PROTOCOL; i++) {
				if (usblp->protocol[i].alt_setting >= 0)
					twoints[1] |= (1<<i);
			}

			if (copy_to_user((void __user *)arg,
					(unsigned char *)twoints,
					sizeof(twoints))) {
				retval = -EFAULT;
				goto done;
			}

			break;

		case IOCNR_SET_PROTOCOL:
			if (_IOC_DIR(cmd) != _IOC_WRITE) {
				retval = -EINVAL;
				goto done;
			}

#ifdef DEBUG
			if (arg == -10) {
				usblp_dump(usblp);
				break;
			}
#endif

			usblp_unlink_urbs(usblp);
			retval = usblp_set_protocol(usblp, arg);
			if (retval < 0) {
				usblp_set_protocol(usblp,
					usblp->current_protocol);
			}
			break;

		case IOCNR_HP_SET_CHANNEL:
			if (_IOC_DIR(cmd) != _IOC_WRITE ||
			    le16_to_cpu(usblp->dev->descriptor.idVendor) != 0x03F0 ||
			    usblp->quirks & USBLP_QUIRK_BIDIR) {
				retval = -EINVAL;
				goto done;
			}

			err = usblp_hp_channel_change_request(usblp,
				arg, &newChannel);
			if (err < 0) {
				dev_err(&usblp->dev->dev,
					"usblp%d: error = %d setting "
					"HP channel\n",
					usblp->minor, err);
				retval = -EIO;
				goto done;
			}

			dbg("usblp%d requested/got HP channel %ld/%d",
				usblp->minor, arg, newChannel);
			break;

		case IOCNR_GET_BUS_ADDRESS:
			if (_IOC_DIR(cmd) != _IOC_READ ||
			    _IOC_SIZE(cmd) < sizeof(twoints)) {
				retval = -EINVAL;
				goto done;
			}

			twoints[0] = usblp->dev->bus->busnum;
			twoints[1] = usblp->dev->devnum;
			if (copy_to_user((void __user *)arg,
					(unsigned char *)twoints,
					sizeof(twoints))) {
				retval = -EFAULT;
				goto done;
			}

			dbg("usblp%d is bus=%d, device=%d",
				usblp->minor, twoints[0], twoints[1]);
			break;

		case IOCNR_GET_VID_PID:
			if (_IOC_DIR(cmd) != _IOC_READ ||
			    _IOC_SIZE(cmd) < sizeof(twoints)) {
				retval = -EINVAL;
				goto done;
			}

			twoints[0] = le16_to_cpu(usblp->dev->descriptor.idVendor);
			twoints[1] = le16_to_cpu(usblp->dev->descriptor.idProduct);
			if (copy_to_user((void __user *)arg,
					(unsigned char *)twoints,
					sizeof(twoints))) {
				retval = -EFAULT;
				goto done;
			}

			dbg("usblp%d is VID=0x%4.4X, PID=0x%4.4X",
				usblp->minor, twoints[0], twoints[1]);
			break;

		case IOCNR_SOFT_RESET:
			if (_IOC_DIR(cmd) != _IOC_NONE) {
				retval = -EINVAL;
				goto done;
			}
			retval = usblp_reset(usblp);
			break;
		default:
			retval = -ENOTTY;
		}
	else	/* old-style ioctl value */
		switch (cmd) {

		case LPGETSTATUS:
			if ((retval = usblp_read_status(usblp, usblp->statusbuf))) {
				if (printk_ratelimit())
					printk(KERN_ERR "usblp%d:"
					    "failed reading printer status (%d)\n",
					    usblp->minor, retval);
				retval = -EIO;
				goto done;
			}
			status = *usblp->statusbuf;
			if (copy_to_user((void __user *)arg, &status, sizeof(int)))
				retval = -EFAULT;
			break;

		case LPABORT:
			if (arg)
				usblp->flags |= LP_ABORT;
			else
				usblp->flags &= ~LP_ABORT;
			break;

		default:
			retval = -ENOTTY;
		}

done:
	mutex_unlock(&usblp->mut);
	return retval;
}
Exemple #4
0
/* Recv data */
static int h4_recv(struct hci_uart *hu, void *data, int count)
{
	struct h4_struct *h4 = hu->priv;
	register char *ptr;
	struct hci_event_hdr *eh;
	struct hci_acl_hdr   *ah;
	struct hci_sco_hdr   *sh;
	register int len, type, dlen;

	BT_DBG("hu %p count %d rx_state %ld rx_count %ld", 
			hu, count, h4->rx_state, h4->rx_count);

	ptr = data;
	while (count) {
		if (h4->rx_count) {
			len = min_t(unsigned int, h4->rx_count, count);
			memcpy(skb_put(h4->rx_skb, len), ptr, len);
			h4->rx_count -= len; count -= len; ptr += len;

			if (h4->rx_count)
				continue;
			switch (h4->rx_state) {
			case H4_W4_DATA:
				BT_DBG("Complete data");
				hci_recv_frame(h4->rx_skb);
				bluesleep_start_sleep_timer();
				h4->rx_state = H4_W4_PACKET_TYPE;
				h4->rx_skb = NULL;
				continue;
			case H4_W4_EVENT_HDR:
				eh = hci_event_hdr(h4->rx_skb);
				BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
				h4_check_data_len(h4, eh->plen);
				continue;
			case H4_W4_ACL_HDR:
				ah = hci_acl_hdr(h4->rx_skb);
				dlen = __le16_to_cpu(ah->dlen);
				BT_DBG("ACL header: dlen %d", dlen);
				h4_check_data_len(h4, dlen);
				continue;
			case H4_W4_SCO_HDR:
				sh = hci_sco_hdr(h4->rx_skb);
				BT_DBG("SCO header: dlen %d", sh->dlen);
				h4_check_data_len(h4, sh->dlen);
				continue;
			}
		}
		switch (*ptr) {
		case HCI_EVENT_PKT:
			BT_DBG("Event packet");
			h4->rx_state = H4_W4_EVENT_HDR;
			h4->rx_count = HCI_EVENT_HDR_SIZE;
			type = HCI_EVENT_PKT;
			break;
		case HCI_ACLDATA_PKT:
			BT_DBG("ACL packet");
			h4->rx_state = H4_W4_ACL_HDR;
			h4->rx_count = HCI_ACL_HDR_SIZE;
			type = HCI_ACLDATA_PKT;
			break;
		case HCI_SCODATA_PKT:
			BT_DBG("SCO packet");
			h4->rx_state = H4_W4_SCO_HDR;
			h4->rx_count = HCI_SCO_HDR_SIZE;
			type = HCI_SCODATA_PKT;
			break;
		default:
			if (printk_ratelimit())
			{
				BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr);
			}
			hu->hdev->stat.err_rx++;
			ptr++; count--;
			continue;
		};
		ptr++; count--;
		h4->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
		if (!h4->rx_skb) {
			BT_ERR("Can't allocate mem for new packet");
			h4->rx_state = H4_W4_PACKET_TYPE;
			h4->rx_count = 0;
			return 0;
		}
		h4->rx_skb->dev = (void *) hu->hdev;
		bt_cb(h4->rx_skb)->pkt_type = type;
	}
Exemple #5
0
static void __kprobes
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
	long error_code, siginfo_t *info)
{
	struct task_struct *tsk = current;

#ifdef CONFIG_X86_32
	if (regs->flags & X86_VM_MASK) {
		/*
		 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
		 * On nmi (interrupt 2), do_trap should not be called.
		 */
		if (trapnr < 6)
			goto vm86_trap;
		goto trap_signal;
	}
#endif

	if (!user_mode(regs))
		goto kernel_trap;

#ifdef CONFIG_X86_32
trap_signal:
#endif
	/*
	 * We want error_code and trap_no set for userspace faults and
	 * kernelspace faults which result in die(), but not
	 * kernelspace faults which are fixed up.  die() gives the
	 * process no chance to handle the signal and notice the
	 * kernel fault information, so that won't result in polluting
	 * the information about previously queued, but not yet
	 * delivered, faults.  See also do_general_protection below.
	 */
	tsk->thread.error_code = error_code;
	tsk->thread.trap_no = trapnr;

#ifdef CONFIG_X86_64
	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
	    printk_ratelimit()) {
		printk(KERN_INFO
		       "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
		       tsk->comm, tsk->pid, str,
		       regs->ip, regs->sp, error_code);
		print_vma_addr(" in ", regs->ip);
		printk("\n");
	}
#endif

	if (info)
		force_sig_info(signr, info, tsk);
	else
		force_sig(signr, tsk);
	return;

kernel_trap:
	if (!fixup_exception(regs)) {
		tsk->thread.error_code = error_code;
		tsk->thread.trap_no = trapnr;
		die(str, regs, error_code);
	}
	return;

#ifdef CONFIG_X86_32
vm86_trap:
	if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
						error_code, trapnr))
		goto trap_signal;
	return;
#endif
}
Exemple #6
0
int ipsec_sa_init(struct ipsec_sa *ipsp)
{
        int i;
        int error = 0;
        char sa[SATOT_BUF];
	size_t sa_len;
	char ipaddr_txt[ADDRTOA_BUF];
	char ipaddr2_txt[ADDRTOA_BUF];
#if defined (CONFIG_KLIPS_AUTH_HMAC_MD5) || defined (CONFIG_KLIPS_AUTH_HMAC_SHA1)
	unsigned char kb[AHMD596_BLKLEN];
#endif
	struct ipsec_alg_enc *ixt_e = NULL;
	struct ipsec_alg_auth *ixt_a = NULL;

	if(ipsp == NULL) {
		KLIPS_PRINT(debug_pfkey,
			    "ipsec_sa_init: "
			    "ipsp is NULL, fatal\n");
		SENDERR(EINVAL);
	}

	sa_len = KLIPS_SATOT(debug_pfkey, &ipsp->ips_said, 0, sa, sizeof(sa));

        KLIPS_PRINT(debug_pfkey,
		    "ipsec_sa_init: "
		    "(pfkey defined) called for SA:%s\n",
		    sa_len ? sa : " (error)");

	KLIPS_PRINT(debug_pfkey,
		    "ipsec_sa_init: "
		    "calling init routine of %s%s%s\n",
		    IPS_XFORM_NAME(ipsp));
	
	switch(ipsp->ips_said.proto) {
#ifdef CONFIG_KLIPS_IPIP
	case IPPROTO_IPIP: {
		ipsp->ips_xformfuncs = ipip_xform_funcs;
		addrtoa(((struct sockaddr_in*)(ipsp->ips_addr_s))->sin_addr,
			0,
			ipaddr_txt, sizeof(ipaddr_txt));
		addrtoa(((struct sockaddr_in*)(ipsp->ips_addr_d))->sin_addr,
			0,
			ipaddr2_txt, sizeof(ipaddr_txt));
		KLIPS_PRINT(debug_pfkey,
			    "ipsec_sa_init: "
			    "(pfkey defined) IPIP ipsec_sa set for %s->%s.\n",
			    ipaddr_txt,
			    ipaddr2_txt);
	}
	break;
#endif /* !CONFIG_KLIPS_IPIP */

#ifdef CONFIG_KLIPS_AH
	case IPPROTO_AH:
		ipsp->ips_xformfuncs = ah_xform_funcs;

		switch(ipsp->ips_authalg) {
# ifdef CONFIG_KLIPS_AUTH_HMAC_MD5
		case AH_MD5: {
			unsigned char *akp;
			unsigned int aks;
			MD5_CTX *ictx;
			MD5_CTX *octx;
			
			if(ipsp->ips_key_bits_a != (AHMD596_KLEN * 8)) {
				KLIPS_PRINT(debug_pfkey,
					    "ipsec_sa_init: "
					    "incorrect key size: %d bits -- must be %d bits\n"/*octets (bytes)\n"*/,
					    ipsp->ips_key_bits_a, AHMD596_KLEN * 8);
				SENDERR(EINVAL);
			}
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "hmac md5-96 key is 0x%08x %08x %08x %08x\n",
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+0)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+1)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+2)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+3)));
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			
			ipsp->ips_auth_bits = AHMD596_ALEN * 8;
			
			/* save the pointer to the key material */
			akp = ipsp->ips_key_a;
			aks = ipsp->ips_key_a_size;
			
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
			           "ipsec_sa_init: "
			           "allocating %lu bytes for md5_ctx.\n",
			           (unsigned long) sizeof(struct md5_ctx));
			if((ipsp->ips_key_a = (caddr_t)
			    kmalloc(sizeof(struct md5_ctx), GFP_ATOMIC)) == NULL) {
				ipsp->ips_key_a = akp;
				SENDERR(ENOMEM);
			}
			ipsp->ips_key_a_size = sizeof(struct md5_ctx);

			for (i = 0; i < DIVUP(ipsp->ips_key_bits_a, 8); i++) {
				kb[i] = akp[i] ^ HMAC_IPAD;
			}
			for (; i < AHMD596_BLKLEN; i++) {
				kb[i] = HMAC_IPAD;
			}

			ictx = &(((struct md5_ctx*)(ipsp->ips_key_a))->ictx);
			osMD5Init(ictx);
			osMD5Update(ictx, kb, AHMD596_BLKLEN);

			for (i = 0; i < AHMD596_BLKLEN; i++) {
				kb[i] ^= (HMAC_IPAD ^ HMAC_OPAD);
			}

			octx = &(((struct md5_ctx*)(ipsp->ips_key_a))->octx);
			osMD5Init(octx);
			osMD5Update(octx, kb, AHMD596_BLKLEN);
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "MD5 ictx=0x%08x %08x %08x %08x octx=0x%08x %08x %08x %08x\n",
				    ((__u32*)ictx)[0],
				    ((__u32*)ictx)[1],
				    ((__u32*)ictx)[2],
				    ((__u32*)ictx)[3],
				    ((__u32*)octx)[0],
				    ((__u32*)octx)[1],
				    ((__u32*)octx)[2],
				    ((__u32*)octx)[3] );
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			
			/* zero key buffer -- paranoid */
			memset(akp, 0, aks);
			kfree(akp);
		}
		break;
# endif /* CONFIG_KLIPS_AUTH_HMAC_MD5 */
# ifdef CONFIG_KLIPS_AUTH_HMAC_SHA1
		case AH_SHA: {
			unsigned char *akp;
			unsigned int aks;
			SHA1_CTX *ictx;
			SHA1_CTX *octx;
			
			if(ipsp->ips_key_bits_a != (AHSHA196_KLEN * 8)) {
				KLIPS_PRINT(debug_pfkey,
					    "ipsec_sa_init: "
					    "incorrect key size: %d bits -- must be %d bits\n"/*octets (bytes)\n"*/,
					    ipsp->ips_key_bits_a, AHSHA196_KLEN * 8);
				SENDERR(EINVAL);
			}
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "hmac sha1-96 key is 0x%08x %08x %08x %08x\n",
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+0)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+1)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+2)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+3)));
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			
			ipsp->ips_auth_bits = AHSHA196_ALEN * 8;
			
			/* save the pointer to the key material */
			akp = ipsp->ips_key_a;
			aks = ipsp->ips_key_a_size;
			
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
			            "ipsec_sa_init: "
			            "allocating %lu bytes for sha1_ctx.\n",
			            (unsigned long) sizeof(struct sha1_ctx));
			if((ipsp->ips_key_a = (caddr_t)
			    kmalloc(sizeof(struct sha1_ctx), GFP_ATOMIC)) == NULL) {
				ipsp->ips_key_a = akp;
				SENDERR(ENOMEM);
			}
			ipsp->ips_key_a_size = sizeof(struct sha1_ctx);

			for (i = 0; i < DIVUP(ipsp->ips_key_bits_a, 8); i++) {
				kb[i] = akp[i] ^ HMAC_IPAD;
			}
			for (; i < AHMD596_BLKLEN; i++) {
				kb[i] = HMAC_IPAD;
			}

			ictx = &(((struct sha1_ctx*)(ipsp->ips_key_a))->ictx);
			SHA1Init(ictx);
			SHA1Update(ictx, kb, AHSHA196_BLKLEN);

			for (i = 0; i < AHSHA196_BLKLEN; i++) {
				kb[i] ^= (HMAC_IPAD ^ HMAC_OPAD);
			}

			octx = &(((struct sha1_ctx*)(ipsp->ips_key_a))->octx);
			SHA1Init(octx);
			SHA1Update(octx, kb, AHSHA196_BLKLEN);
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "SHA1 ictx=0x%08x %08x %08x %08x octx=0x%08x %08x %08x %08x\n", 
				    ((__u32*)ictx)[0],
				    ((__u32*)ictx)[1],
				    ((__u32*)ictx)[2],
				    ((__u32*)ictx)[3],
				    ((__u32*)octx)[0],
				    ((__u32*)octx)[1],
				    ((__u32*)octx)[2],
				    ((__u32*)octx)[3] );
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			/* zero key buffer -- paranoid */
			memset(akp, 0, aks);
			kfree(akp);
		}
		break;
# endif /* CONFIG_KLIPS_AUTH_HMAC_SHA1 */
		default:
			KLIPS_PRINT(debug_pfkey,
				    "ipsec_sa_init: "
				    "authalg=%d support not available in the kernel",
				    ipsp->ips_authalg);
			SENDERR(EINVAL);
		}
	break;
#endif /* CONFIG_KLIPS_AH */

#ifdef CONFIG_KLIPS_ESP
	case IPPROTO_ESP:
		ipsp->ips_xformfuncs = esp_xform_funcs;
	{
#if defined (CONFIG_KLIPS_AUTH_HMAC_MD5) || defined (CONFIG_KLIPS_AUTH_HMAC_SHA1)
		unsigned char *akp;
		unsigned int aks;
#endif

		ipsec_alg_sa_init(ipsp);
		ixt_e=ipsp->ips_alg_enc;

		if (ixt_e == NULL) {
			if(printk_ratelimit()) {
				printk(KERN_ERR
				       "ipsec_sa_init: "
				       "encalg=%d support not available in the kernel",
				       ipsp->ips_encalg);
			}
			SENDERR(ENOENT);
		}

		ipsp->ips_iv_size = ixt_e->ixt_common.ixt_support.ias_ivlen/8;

		/* Create IV */
		if (ipsp->ips_iv_size) {
			if((ipsp->ips_iv = (caddr_t)
			    kmalloc(ipsp->ips_iv_size, GFP_ATOMIC)) == NULL) {
				SENDERR(ENOMEM);
			}
			prng_bytes(&ipsec_prng,
				   (char *)ipsp->ips_iv,
				   ipsp->ips_iv_size);
			ipsp->ips_iv_bits = ipsp->ips_iv_size * 8;
		}
		
		if ((error=ipsec_alg_enc_key_create(ipsp)) < 0)
			SENDERR(-error);

		if ((ixt_a=ipsp->ips_alg_auth)) {
			if ((error=ipsec_alg_auth_key_create(ipsp)) < 0)
				SENDERR(-error);
		} else	
		
		switch(ipsp->ips_authalg) {
# ifdef CONFIG_KLIPS_AUTH_HMAC_MD5
		case AH_MD5: {
			MD5_CTX *ictx;
			MD5_CTX *octx;

			if(ipsp->ips_key_bits_a != (AHMD596_KLEN * 8)) {
				KLIPS_PRINT(debug_pfkey,
					    "ipsec_sa_init: "
					    "incorrect authorisation key size: %d bits -- must be %d bits\n"/*octets (bytes)\n"*/,
					    ipsp->ips_key_bits_a,
					    AHMD596_KLEN * 8);
				SENDERR(EINVAL);
			}
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "hmac md5-96 key is 0x%08x %08x %08x %08x\n",
				    ntohl(*(((__u32 *)(ipsp->ips_key_a))+0)),
				    ntohl(*(((__u32 *)(ipsp->ips_key_a))+1)),
				    ntohl(*(((__u32 *)(ipsp->ips_key_a))+2)),
				    ntohl(*(((__u32 *)(ipsp->ips_key_a))+3)));
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			ipsp->ips_auth_bits = AHMD596_ALEN * 8;
			
			/* save the pointer to the key material */
			akp = ipsp->ips_key_a;
			aks = ipsp->ips_key_a_size;
			
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
			            "ipsec_sa_init: "
			            "allocating %lu bytes for md5_ctx.\n",
			            (unsigned long) sizeof(struct md5_ctx));
			if((ipsp->ips_key_a = (caddr_t)
			    kmalloc(sizeof(struct md5_ctx), GFP_ATOMIC)) == NULL) {
				ipsp->ips_key_a = akp;
				SENDERR(ENOMEM);
			}
			ipsp->ips_key_a_size = sizeof(struct md5_ctx);

			for (i = 0; i < DIVUP(ipsp->ips_key_bits_a, 8); i++) {
				kb[i] = akp[i] ^ HMAC_IPAD;
			}
			for (; i < AHMD596_BLKLEN; i++) {
				kb[i] = HMAC_IPAD;
			}

			ictx = &(((struct md5_ctx*)(ipsp->ips_key_a))->ictx);
			osMD5Init(ictx);
			osMD5Update(ictx, kb, AHMD596_BLKLEN);

			for (i = 0; i < AHMD596_BLKLEN; i++) {
				kb[i] ^= (HMAC_IPAD ^ HMAC_OPAD);
			}

			octx = &(((struct md5_ctx*)(ipsp->ips_key_a))->octx);
			osMD5Init(octx);
			osMD5Update(octx, kb, AHMD596_BLKLEN);
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "MD5 ictx=0x%08x %08x %08x %08x octx=0x%08x %08x %08x %08x\n",
				    ((__u32*)ictx)[0],
				    ((__u32*)ictx)[1],
				    ((__u32*)ictx)[2],
				    ((__u32*)ictx)[3],
				    ((__u32*)octx)[0],
				    ((__u32*)octx)[1],
				    ((__u32*)octx)[2],
				    ((__u32*)octx)[3] );
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			/* paranoid */
			memset(akp, 0, aks);
			kfree(akp);
			break;
		}
# endif /* CONFIG_KLIPS_AUTH_HMAC_MD5 */
# ifdef CONFIG_KLIPS_AUTH_HMAC_SHA1
		case AH_SHA: {
			SHA1_CTX *ictx;
			SHA1_CTX *octx;

			if(ipsp->ips_key_bits_a != (AHSHA196_KLEN * 8)) {
				KLIPS_PRINT(debug_pfkey,
					    "ipsec_sa_init: "
					    "incorrect authorisation key size: %d bits -- must be %d bits\n"/*octets (bytes)\n"*/,
					    ipsp->ips_key_bits_a,
					    AHSHA196_KLEN * 8);
				SENDERR(EINVAL);
			}
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "hmac sha1-96 key is 0x%08x %08x %08x %08x\n",
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+0)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+1)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+2)),
				    ntohl(*(((__u32 *)ipsp->ips_key_a)+3)));
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			ipsp->ips_auth_bits = AHSHA196_ALEN * 8;
			
			/* save the pointer to the key material */
			akp = ipsp->ips_key_a;
			aks = ipsp->ips_key_a_size;

			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
			            "ipsec_sa_init: "
			            "allocating %lu bytes for sha1_ctx.\n",
			            (unsigned long) sizeof(struct sha1_ctx));
			if((ipsp->ips_key_a = (caddr_t)
			    kmalloc(sizeof(struct sha1_ctx), GFP_ATOMIC)) == NULL) {
				ipsp->ips_key_a = akp;
				SENDERR(ENOMEM);
			}
			ipsp->ips_key_a_size = sizeof(struct sha1_ctx);

			for (i = 0; i < DIVUP(ipsp->ips_key_bits_a, 8); i++) {
				kb[i] = akp[i] ^ HMAC_IPAD;
			}
			for (; i < AHMD596_BLKLEN; i++) {
				kb[i] = HMAC_IPAD;
			}

			ictx = &(((struct sha1_ctx*)(ipsp->ips_key_a))->ictx);
			SHA1Init(ictx);
			SHA1Update(ictx, kb, AHSHA196_BLKLEN);

			for (i = 0; i < AHSHA196_BLKLEN; i++) {
				kb[i] ^= (HMAC_IPAD ^ HMAC_OPAD);
			}

			octx = &((struct sha1_ctx*)(ipsp->ips_key_a))->octx;
			SHA1Init(octx);
			SHA1Update(octx, kb, AHSHA196_BLKLEN);
			
#  if KLIPS_DIVULGE_HMAC_KEY
			KLIPS_PRINT(debug_pfkey && sysctl_ipsec_debug_verbose,
				    "ipsec_sa_init: "
				    "SHA1 ictx=0x%08x %08x %08x %08x octx=0x%08x %08x %08x %08x\n",
				    ((__u32*)ictx)[0],
				    ((__u32*)ictx)[1],
				    ((__u32*)ictx)[2],
				    ((__u32*)ictx)[3],
				    ((__u32*)octx)[0],
				    ((__u32*)octx)[1],
				    ((__u32*)octx)[2],
				    ((__u32*)octx)[3] );
#  endif /* KLIPS_DIVULGE_HMAC_KEY */
			memset(akp, 0, aks);
			kfree(akp);
			break;
		}
# endif /* CONFIG_KLIPS_AUTH_HMAC_SHA1 */
		case AH_NONE:
			break;
		default:
			KLIPS_PRINT(debug_pfkey,
				    "ipsec_sa_init: "
				    "authalg=%d support not available in the kernel.\n",
				    ipsp->ips_authalg);
			SENDERR(EINVAL);
		}
	}
			break;
#endif /* !CONFIG_KLIPS_ESP */
#ifdef CONFIG_KLIPS_IPCOMP
	case IPPROTO_COMP:
		ipsp->ips_xformfuncs = ipcomp_xform_funcs;
		ipsp->ips_comp_adapt_tries = 0;
		ipsp->ips_comp_adapt_skip = 0;
		ipsp->ips_comp_ratio_cbytes = 0;
		ipsp->ips_comp_ratio_dbytes = 0;
		break;
#endif /* CONFIG_KLIPS_IPCOMP */
	default:
		printk(KERN_ERR "KLIPS sa initialization: "
		       "proto=%d unknown.\n",
		       ipsp->ips_said.proto);
		SENDERR(EINVAL);
	}
	
 errlab:
	return(error);
}
Exemple #7
0
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
				     long npages, unsigned long uaddr,
				     enum dma_data_direction direction)
{
	u64 rc;
	u64 proto_tce;
	u64 *tcep;
	u64 rpn;
	long l, limit;

	if (TCE_PAGE_FACTOR == 0 && npages == 1)
		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
					   direction);

	tcep = __get_cpu_var(tce_page);

	/* This is safe to do since interrupts are off when we're called
	 * from iommu_alloc{,_sg}()
	 */
	if (!tcep) {
		tcep = (u64 *)__get_free_page(GFP_ATOMIC);
		/* If allocation fails, fall back to the loop implementation */
		if (!tcep)
			return tce_build_pSeriesLP(tbl, tcenum, npages,
						   uaddr, direction);
		__get_cpu_var(tce_page) = tcep;
	}

	tcenum <<= TCE_PAGE_FACTOR;
	npages <<= TCE_PAGE_FACTOR;

	rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
	proto_tce = TCE_PCI_READ;
	if (direction != DMA_TO_DEVICE)
		proto_tce |= TCE_PCI_WRITE;

	/* We can map max one pageful of TCEs at a time */
	do {
		/*
		 * Set up the page with TCE data, looping through and setting
		 * the values.
		 */
		limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);

		for (l = 0; l < limit; l++) {
			tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
			rpn++;
		}

		rc = plpar_tce_put_indirect((u64)tbl->it_index,
					    (u64)tcenum << 12,
					    (u64)virt_to_abs(tcep),
					    limit);

		npages -= limit;
		tcenum += limit;
	} while (npages > 0 && !rc);

	if (rc && printk_ratelimit()) {
		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%lx\n", (u64)npages);
		printk("\ttce[0] val = 0x%lx\n", tcep[0]);
		show_stack(current, (unsigned long *)__get_SP());
	}
}
Exemple #8
0
/* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
			      struct codegen_context *ctx,
			      unsigned int *addrs)
{
	const struct sock_filter *filter = fp->insns;
	int flen = fp->len;
	u8 *func;
	unsigned int true_cond;
	int i;

	/* Start of epilogue code */
	unsigned int exit_addr = addrs[flen];

	for (i = 0; i < flen; i++) {
		unsigned int K = filter[i].k;
		u16 code = bpf_anc_helper(&filter[i]);

		/*
		 * addrs[] maps a BPF bytecode address into a real offset from
		 * the start of the body code.
		 */
		addrs[i] = ctx->idx * 4;

		switch (code) {
			/*** ALU ops ***/
		case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
			ctx->seen |= SEEN_XREG;
			PPC_ADD(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
			if (!K)
				break;
			PPC_ADDI(r_A, r_A, IMM_L(K));
			if (K >= 32768)
				PPC_ADDIS(r_A, r_A, IMM_HA(K));
			break;
		case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
			ctx->seen |= SEEN_XREG;
			PPC_SUB(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
			if (!K)
				break;
			PPC_ADDI(r_A, r_A, IMM_L(-K));
			if (K >= 32768)
				PPC_ADDIS(r_A, r_A, IMM_HA(-K));
			break;
		case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
			ctx->seen |= SEEN_XREG;
			PPC_MULW(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
			if (K < 32768)
				PPC_MULI(r_A, r_A, K);
			else {
				PPC_LI32(r_scratch1, K);
				PPC_MULW(r_A, r_A, r_scratch1);
			}
			break;
		case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
		case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
			ctx->seen |= SEEN_XREG;
			PPC_CMPWI(r_X, 0);
			if (ctx->pc_ret0 != -1) {
				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
			} else {
				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
				PPC_LI(r_ret, 0);
				PPC_JMP(exit_addr);
			}
			if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
				PPC_DIVWU(r_scratch1, r_A, r_X);
				PPC_MULW(r_scratch1, r_X, r_scratch1);
				PPC_SUB(r_A, r_A, r_scratch1);
			} else {
				PPC_DIVWU(r_A, r_A, r_X);
			}
			break;
		case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
			PPC_LI32(r_scratch2, K);
			PPC_DIVWU(r_scratch1, r_A, r_scratch2);
			PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
			PPC_SUB(r_A, r_A, r_scratch1);
			break;
		case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
			if (K == 1)
				break;
			PPC_LI32(r_scratch1, K);
			PPC_DIVWU(r_A, r_A, r_scratch1);
			break;
		case BPF_ALU | BPF_AND | BPF_X:
			ctx->seen |= SEEN_XREG;
			PPC_AND(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_AND | BPF_K:
			if (!IMM_H(K))
				PPC_ANDI(r_A, r_A, K);
			else {
				PPC_LI32(r_scratch1, K);
				PPC_AND(r_A, r_A, r_scratch1);
			}
			break;
		case BPF_ALU | BPF_OR | BPF_X:
			ctx->seen |= SEEN_XREG;
			PPC_OR(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_OR | BPF_K:
			if (IMM_L(K))
				PPC_ORI(r_A, r_A, IMM_L(K));
			if (K >= 65536)
				PPC_ORIS(r_A, r_A, IMM_H(K));
			break;
		case BPF_ANC | SKF_AD_ALU_XOR_X:
		case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
			ctx->seen |= SEEN_XREG;
			PPC_XOR(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
			if (IMM_L(K))
				PPC_XORI(r_A, r_A, IMM_L(K));
			if (K >= 65536)
				PPC_XORIS(r_A, r_A, IMM_H(K));
			break;
		case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
			ctx->seen |= SEEN_XREG;
			PPC_SLW(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_LSH | BPF_K:
			if (K == 0)
				break;
			else
				PPC_SLWI(r_A, r_A, K);
			break;
		case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
			ctx->seen |= SEEN_XREG;
			PPC_SRW(r_A, r_A, r_X);
			break;
		case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
			if (K == 0)
				break;
			else
				PPC_SRWI(r_A, r_A, K);
			break;
		case BPF_ALU | BPF_NEG:
			PPC_NEG(r_A, r_A);
			break;
		case BPF_RET | BPF_K:
			PPC_LI32(r_ret, K);
			if (!K) {
				if (ctx->pc_ret0 == -1)
					ctx->pc_ret0 = i;
			}
			/*
			 * If this isn't the very last instruction, branch to
			 * the epilogue if we've stuff to clean up.  Otherwise,
			 * if there's nothing to tidy, just return.  If we /are/
			 * the last instruction, we're about to fall through to
			 * the epilogue to return.
			 */
			if (i != flen - 1) {
				/*
				 * Note: 'seen' is properly valid only on pass
				 * #2.	Both parts of this conditional are the
				 * same instruction size though, meaning the
				 * first pass will still correctly determine the
				 * code size/addresses.
				 */
				if (ctx->seen)
					PPC_JMP(exit_addr);
				else
					PPC_BLR();
			}
			break;
		case BPF_RET | BPF_A:
			PPC_MR(r_ret, r_A);
			if (i != flen - 1) {
				if (ctx->seen)
					PPC_JMP(exit_addr);
				else
					PPC_BLR();
			}
			break;
		case BPF_MISC | BPF_TAX: /* X = A */
			PPC_MR(r_X, r_A);
			break;
		case BPF_MISC | BPF_TXA: /* A = X */
			ctx->seen |= SEEN_XREG;
			PPC_MR(r_A, r_X);
			break;

			/*** Constant loads/M[] access ***/
		case BPF_LD | BPF_IMM: /* A = K */
			PPC_LI32(r_A, K);
			break;
		case BPF_LDX | BPF_IMM: /* X = K */
			PPC_LI32(r_X, K);
			break;
		case BPF_LD | BPF_MEM: /* A = mem[K] */
			PPC_MR(r_A, r_M + (K & 0xf));
			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_LDX | BPF_MEM: /* X = mem[K] */
			PPC_MR(r_X, r_M + (K & 0xf));
			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_ST: /* mem[K] = A */
			PPC_MR(r_M + (K & 0xf), r_A);
			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_STX: /* mem[K] = X */
			PPC_MR(r_M + (K & 0xf), r_X);
			ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_LD | BPF_W | BPF_LEN: /*	A = skb->len; */
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
			break;
		case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
			break;

			/*** Ancillary info loads ***/
		case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
						  protocol) != 2);
			PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							    protocol));
			break;
		case BPF_ANC | SKF_AD_IFINDEX:
		case BPF_ANC | SKF_AD_HATYPE:
			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
						ifindex) != 4);
			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
						type) != 2);
			PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
								dev));
			PPC_CMPDI(r_scratch1, 0);
			if (ctx->pc_ret0 != -1) {
				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
			} else {
				/* Exit, returning 0; first pass hits here. */
				PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
				PPC_LI(r_ret, 0);
				PPC_JMP(exit_addr);
			}
			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
				PPC_LWZ_OFFS(r_A, r_scratch1,
				     offsetof(struct net_device, ifindex));
			} else {
				PPC_LHZ_OFFS(r_A, r_scratch1,
				     offsetof(struct net_device, type));
			}

			break;
		case BPF_ANC | SKF_AD_MARK:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  mark));
			break;
		case BPF_ANC | SKF_AD_RXHASH:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  hash));
			break;
		case BPF_ANC | SKF_AD_VLAN_TAG:
		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
			BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);

			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  vlan_tci));
			if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
				PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
			} else {
				PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
				PPC_SRWI(r_A, r_A, 12);
			}
			break;
		case BPF_ANC | SKF_AD_QUEUE:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
						  queue_mapping) != 2);
			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  queue_mapping));
			break;
		case BPF_ANC | SKF_AD_PKTTYPE:
			PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
			PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
			PPC_SRWI(r_A, r_A, 5);
			break;
		case BPF_ANC | SKF_AD_CPU:
			PPC_BPF_LOAD_CPU(r_A);
			break;
			/*** Absolute loads from packet header/data ***/
		case BPF_LD | BPF_W | BPF_ABS:
			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
			goto common_load;
		case BPF_LD | BPF_H | BPF_ABS:
			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
			goto common_load;
		case BPF_LD | BPF_B | BPF_ABS:
			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
		common_load:
			/* Load from [K]. */
			ctx->seen |= SEEN_DATAREF;
			PPC_FUNC_ADDR(r_scratch1, func);
			PPC_MTLR(r_scratch1);
			PPC_LI32(r_addr, K);
			PPC_BLRL();
			/*
			 * Helper returns 'lt' condition on error, and an
			 * appropriate return value in r3
			 */
			PPC_BCC(COND_LT, exit_addr);
			break;

			/*** Indirect loads from packet header/data ***/
		case BPF_LD | BPF_W | BPF_IND:
			func = sk_load_word;
			goto common_load_ind;
		case BPF_LD | BPF_H | BPF_IND:
			func = sk_load_half;
			goto common_load_ind;
		case BPF_LD | BPF_B | BPF_IND:
			func = sk_load_byte;
		common_load_ind:
			/*
			 * Load from [X + K].  Negative offsets are tested for
			 * in the helper functions.
			 */
			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
			PPC_FUNC_ADDR(r_scratch1, func);
			PPC_MTLR(r_scratch1);
			PPC_ADDI(r_addr, r_X, IMM_L(K));
			if (K >= 32768)
				PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
			PPC_BLRL();
			/* If error, cr0.LT set */
			PPC_BCC(COND_LT, exit_addr);
			break;

		case BPF_LDX | BPF_B | BPF_MSH:
			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
			goto common_load;
			break;

			/*** Jump and branches ***/
		case BPF_JMP | BPF_JA:
			if (K != 0)
				PPC_JMP(addrs[i + 1 + K]);
			break;

		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
			true_cond = COND_GT;
			goto cond_branch;
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
			true_cond = COND_GE;
			goto cond_branch;
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
			true_cond = COND_EQ;
			goto cond_branch;
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			true_cond = COND_NE;
			/* Fall through */
		cond_branch:
			/* same targets, can avoid doing the test :) */
			if (filter[i].jt == filter[i].jf) {
				if (filter[i].jt > 0)
					PPC_JMP(addrs[i + 1 + filter[i].jt]);
				break;
			}

			switch (code) {
			case BPF_JMP | BPF_JGT | BPF_X:
			case BPF_JMP | BPF_JGE | BPF_X:
			case BPF_JMP | BPF_JEQ | BPF_X:
				ctx->seen |= SEEN_XREG;
				PPC_CMPLW(r_A, r_X);
				break;
			case BPF_JMP | BPF_JSET | BPF_X:
				ctx->seen |= SEEN_XREG;
				PPC_AND_DOT(r_scratch1, r_A, r_X);
				break;
			case BPF_JMP | BPF_JEQ | BPF_K:
			case BPF_JMP | BPF_JGT | BPF_K:
			case BPF_JMP | BPF_JGE | BPF_K:
				if (K < 32768)
					PPC_CMPLWI(r_A, K);
				else {
					PPC_LI32(r_scratch1, K);
					PPC_CMPLW(r_A, r_scratch1);
				}
				break;
			case BPF_JMP | BPF_JSET | BPF_K:
				if (K < 32768)
					/* PPC_ANDI is /only/ dot-form */
					PPC_ANDI(r_scratch1, r_A, K);
				else {
					PPC_LI32(r_scratch1, K);
					PPC_AND_DOT(r_scratch1, r_A,
						    r_scratch1);
				}
				break;
			}
			/* Sometimes branches are constructed "backward", with
			 * the false path being the branch and true path being
			 * a fallthrough to the next instruction.
			 */
			if (filter[i].jt == 0)
				/* Swap the sense of the branch */
				PPC_BCC(true_cond ^ COND_CMP_TRUE,
					addrs[i + 1 + filter[i].jf]);
			else {
				PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
				if (filter[i].jf != 0)
					PPC_JMP(addrs[i + 1 + filter[i].jf]);
			}
			break;
		default:
			/* The filter contains something cruel & unusual.
			 * We don't handle it, but also there shouldn't be
			 * anything missing from our list.
			 */
			if (printk_ratelimit())
				pr_err("BPF filter opcode %04x (@%d) unsupported\n",
				       filter[i].code, i);
			return -ENOTSUPP;
		}

	}
	/* Set end-of-body-code address for exit. */
	addrs[i] = ctx->idx * 4;

	return 0;
}
static int AMI306_ReadSensorData(char *buf, int bufsize)
{
	char cmd;
	int mode = 0;
	unsigned char databuf[10] = {0,};
	int res = 0;
	int mx, my, mz;
	mx = my = mz = 0;

	if ((!buf)||(bufsize<=80))
		return -1;

	if (!ami306_i2c_client) {
		*buf = 0;
		return -2;
	}

	read_lock(&ami306_data.lock);
	mode = ami306_data.mode;
	read_unlock(&ami306_data.lock);

	databuf[0] = AMI306_REG_CTRL3;
	databuf[1] = AMI306_CTRL3_FORCE_BIT;
	res = i2c_master_send(ami306_i2c_client, databuf, 2);	
	if (res <= 0) 
		goto exit_AMI306_ReadSensorData;
	//udelay(700);
	msleep(1);
	// We can read all measured data in once
	cmd = AMI306_REG_DATAXH;
	res = i2c_master_send(ami306_i2c_client, &cmd, 1);	
	if (res <= 0) 
		goto exit_AMI306_ReadSensorData;
//	udelay(20);
	res = i2c_master_recv(ami306_i2c_client, &(databuf[0]), 6);
	if (res <= 0) 
		goto exit_AMI306_ReadSensorData;

	sprintf(buf, "%02x %02x %02x %02x %02x %02x", 
			databuf[0], databuf[1], databuf[2], 
			databuf[3], databuf[4], databuf[5]);

	//printk(KERN_INFO "[jaekyung83..lee]databuf : %s", buf);
	

	mx = (int)(databuf[0] | (databuf[1] << 8));
	my = (int)(databuf[2] | (databuf[3] << 8));
	mz = (int)(databuf[4] | (databuf[5] << 8));

	if (mx>32768)  mx = mx-65536;
	if (my>32768)  my = my-65536;
	if (mz>32768)  mz = mz-65536;

	//printk(KERN_INFO "[jaekyung83.lee]Magnetic Raw Data: X=%d, Y=%d, Z=%d\n", mx, my, mz);
	
	if (AMI306_DEBUG_DEV_STATUS & ami306_debug_mask) {
		int mx, my, mz;
		mx = my = mz = 0;

		mx = (int)(databuf[0] | (databuf[1] << 8));
		my = (int)(databuf[2] | (databuf[3] << 8));
		mz = (int)(databuf[4] | (databuf[5] << 8));

		if (mx>32768)  mx = mx-65536;
		if (my>32768)  my = my-65536;
		if (mz>32768)  mz = mz-65536;

		AMID("Magnetic Raw Data: X=%d, Y=%d, Z=%d\n", mx, my, mz);
	}

exit_AMI306_ReadSensorData:
	if (res <= 0) {
		if (printk_ratelimit()) {
			AMIE("I2C error: ret value=%d\n", res);
		}
		return -3;
	}
	return 0;
}
Exemple #10
0
/*
 * Receive a message with payloads from the USB bus into an skb
 *
 * @i2400mu: USB device descriptor
 * @rx_skb: skb where to place the received message
 *
 * Deals with all the USB-specifics of receiving, dynamically
 * increasing the buffer size if so needed. Returns the payload in the
 * skb, ready to process. On a zero-length packet, we retry.
 *
 * On soft USB errors, we retry (until they become too frequent and
 * then are promoted to hard); on hard USB errors, we reset the
 * device. On other errors (skb realloacation, we just drop it and
 * hope for the next invocation to solve it).
 *
 * Returns: pointer to the skb if ok, ERR_PTR on error.
 *   NOTE: this function might realloc the skb (if it is too small),
 *   so always update with the one returned.
 *   ERR_PTR() is < 0 on error.
 *   Will return NULL if it cannot reallocate -- this can be
 *   considered a transient retryable error.
 */
static
struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
{
	int result = 0;
	struct device *dev = &i2400mu->usb_iface->dev;
	int usb_pipe, read_size, rx_size, do_autopm;
	struct usb_endpoint_descriptor *epd;
	const size_t max_pkt_size = 512;

	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
	do_autopm = atomic_read(&i2400mu->do_autopm);
	result = do_autopm ?
		usb_autopm_get_interface(i2400mu->usb_iface) : 0;
	if (result < 0) {
		dev_err(dev, "RX: can't get autopm: %d\n", result);
		do_autopm = 0;
	}
	epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
	usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
	rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
	if (unlikely(rx_size % max_pkt_size == 0)) {
		rx_size -= 8;
		d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
	}
	result = usb_bulk_msg(
		i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
		rx_size, &read_size, 200);
	usb_mark_last_busy(i2400mu->usb_dev);
	switch (result) {
	case 0:
		if (read_size == 0)
			goto retry;	/* ZLP, just resubmit */
		skb_put(rx_skb, read_size);
		break;
	case -EPIPE:
		/*
		 * Stall -- maybe the device is choking with our
		 * requests. Clear it and give it some time. If they
		 * happen to often, it might be another symptom, so we
		 * reset.
		 *
		 * No error handling for usb_clear_halt(0; if it
		 * works, the retry works; if it fails, this switch
		 * does the error handling for us.
		 */
		if (edc_inc(&i2400mu->urb_edc,
			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
			dev_err(dev, "BM-CMD: too many stalls in "
				"URB; resetting device\n");
			goto do_reset;
		}
		usb_clear_halt(i2400mu->usb_dev, usb_pipe);
		msleep(10);	/* give the device some time */
		goto retry;
	case -EINVAL:			/* while removing driver */
	case -ENODEV:			/* dev disconnect ... */
	case -ENOENT:			/* just ignore it */
	case -ESHUTDOWN:
	case -ECONNRESET:
		break;
	case -EOVERFLOW: {		/* too small, reallocate */
		struct sk_buff *new_skb;
		rx_size = i2400mu_rx_size_grow(i2400mu);
		if (rx_size <= (1 << 16))	/* cap it */
			i2400mu->rx_size = rx_size;
		else if (printk_ratelimit()) {
			dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
			result = -EINVAL;
			goto out;
		}
		skb_put(rx_skb, read_size);
		new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
					  GFP_KERNEL);
		if (new_skb == NULL) {
			if (printk_ratelimit())
				dev_err(dev, "RX: Can't reallocate skb to %d; "
					"RX dropped\n", rx_size);
			kfree_skb(rx_skb);
			rx_skb = NULL;
			goto out;	/* drop it...*/
		}
		kfree_skb(rx_skb);
		rx_skb = new_skb;
		i2400mu->rx_size_cnt = 0;
		i2400mu->rx_size_acc = i2400mu->rx_size;
		d_printf(1, dev, "RX: size changed to %d, received %d, "
			 "copied %d, capacity %ld\n",
			 rx_size, read_size, rx_skb->len,
			 (long) (skb_end_pointer(new_skb) - new_skb->head));
		goto retry;
	}
		/* In most cases, it happens due to the hardware scheduling a
		 * read when there was no data - unfortunately, we have no way
		 * to tell this timeout from a USB timeout. So we just ignore
		 * it. */
	case -ETIMEDOUT:
		dev_err(dev, "RX: timeout: %d\n", result);
		result = 0;
		break;
	default:			/* Any error */
		if (edc_inc(&i2400mu->urb_edc,
			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
			goto error_reset;
		dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
		goto retry;
	}
out:
	if (do_autopm)
		usb_autopm_put_interface(i2400mu->usb_iface);
	d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
	return rx_skb;

error_reset:
	dev_err(dev, "RX: maximum errors in URB exceeded; "
		"resetting device\n");
do_reset:
	usb_queue_reset_device(i2400mu->usb_iface);
	rx_skb = ERR_PTR(result);
	goto out;
}
Exemple #11
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
				unsigned long address)
{
	struct vm_area_struct *vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	int code = SEGV_MAPERR;
	vm_fault_t fault;
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	cause >>= 2;

	/* Restart the instruction */
	regs->ea -= 4;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) {
		if (user_mode(regs))
			goto bad_area_nosemaphore;
		else
			goto vmalloc_fault;
	}

	if (unlikely(address >= TASK_SIZE))
		goto bad_area_nosemaphore;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (faulthandler_disabled() || !mm)
		goto bad_area_nosemaphore;

	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;

	if (!down_read_trylock(&mm->mmap_sem)) {
		if (!user_mode(regs) && !search_exception_tables(regs->ea))
			goto bad_area_nosemaphore;
retry:
		down_read(&mm->mmap_sem);
	}

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	code = SEGV_ACCERR;

	switch (cause) {
	case EXC_SUPERV_INSN_ACCESS:
		goto bad_area;
	case EXC_SUPERV_DATA_ACCESS:
		goto bad_area;
	case EXC_X_PROTECTION_FAULT:
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
		break;
	case EXC_R_PROTECTION_FAULT:
		if (!(vma->vm_flags & VM_READ))
			goto bad_area;
		break;
	case EXC_W_PROTECTION_FAULT:
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags = FAULT_FLAG_WRITE;
		break;
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGSEGV)
			goto bad_area;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}

	/*
	 * Major/minor page fault accounting is only done on the
	 * initial attempt. If we go through a retry, it is extremely
	 * likely that the page will be found in page cache at that point.
	 */
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR)
			current->maj_flt++;
		else
			current->min_flt++;
		if (fault & VM_FAULT_RETRY) {
			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
			 * of starvation. */
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
			flags |= FAULT_FLAG_TRIED;

			/*
			 * No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
	}

	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
			pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
				"cause %ld\n", current->comm, SIGSEGV, address, cause);
			show_regs(regs);
		}
		_exception(SIGSEGV, regs, code, address);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault? */
	if (fixup_exception(regs))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);

	pr_alert("Unable to handle kernel %s at virtual address %08lx",
		address < PAGE_SIZE ? "NULL pointer dereference" :
		"paging request", address);
	pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
		cause);
	panic("Oops");
	return;

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (!user_mode(regs))
		goto no_context;
	pagefault_out_of_memory();
	return;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;

	_exception(SIGBUS, regs, BUS_ADRERR, address);
	return;

vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk" here. We might be inside
		 * an interrupt in the middle of a task switch..
		 */
		int offset = pgd_index(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

		pgd = pgd_current + offset;
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;
		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;

		flush_tlb_one(address);
		return;
	}
}
Exemple #12
0
static unsigned long iommu_range_alloc(struct iommu_table *tbl,
                                       unsigned long npages,
                                       unsigned long *handle,
                                       unsigned long mask,
                                       unsigned int align_order)
{ 
	unsigned long n, end, i, start;
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
	if (unlikely(npages) == 0) {
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (handle && *handle)
		start = *handle;
	else
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

	/* Use only half of the table for small allocs (15 pages or less) */
	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;

	if (largealloc && start < tbl->it_halfpoint)
		start = tbl->it_halfpoint;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

 again:

	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0.
		 */
		if ((start & mask) >= limit || pass > 0)
			start = 0;
		else
			start &= mask;
	}

	n = find_next_zero_bit(tbl->it_map, limit, start);

	/* Align allocation */
	n = (n + align_mask) & ~align_mask;

	end = n + npages;

	if (unlikely(end >= limit)) {
		if (likely(pass < 2)) {
			/* First failure, just rescan the half of the table.
			 * Second failure, rescan the other half of the table.
			 */
			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
			limit = pass ? tbl->it_size : limit;
			pass++;
			goto again;
		} else {
			/* Third failure, give up */
			return DMA_ERROR_CODE;
		}
	}

	for (i = n; i < end; i++)
		if (test_bit(i, tbl->it_map)) {
			start = i+1;
			goto again;
		}

	for (i = n; i < end; i++)
		__set_bit(i, tbl->it_map);

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		tbl->it_largehint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		tbl->it_hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	return n;
}
Exemple #13
0
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		struct scatterlist *sglist, int nelems,
		unsigned long mask, enum dma_data_direction direction)
{
	dma_addr_t dma_next = 0, dma_addr;
	unsigned long flags;
	struct scatterlist *s, *outs, *segstart;
	int outcount, incount;
	unsigned long handle;

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	DBG("mapping %d elements:\n", nelems);

	spin_lock_irqsave(&(tbl->it_lock), flags);

	for (s = outs; nelems; nelems--, s++) {
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		vaddr = (unsigned long)page_address(s->page) + s->offset;
		npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
		npages >>= PAGE_SHIFT;
		entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
				       " npages %lx\n", tbl, vaddr, npages);
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
		dma_addr = entry << PAGE_SHIFT;
		dma_addr |= s->offset;

		DBG("  - %lx pages, entry: %lx, dma_addr: %lx\n",
			    npages, entry, dma_addr);

		/* Insert into HW table */
		ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if (novmerge || (dma_addr != dma_next)) {
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++; outs++;
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
				DBG("    merged, new len: %lx\n", outs->dma_length);
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	DBG("mapped %d elements:\n", outcount);

	/* For the sake of iommu_unmap_sg, we clear out the length in the
	 * next entry of the sglist if we didn't fill the list completely
	 */
	if (outcount < incount) {
		outs++;
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}

	/* Make sure updates are seen by hardware */
	mb();

	return outcount;

 failure:
	for (s = &sglist[0]; s <= outs; s++) {
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

			vaddr = s->dma_address & PAGE_MASK;
			npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
				>> PAGE_SHIFT;
			__iommu_free(tbl, vaddr, npages);
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
		}
	}
Exemple #14
0
static int ti_hecc_error(struct net_device *ndev, int int_status,
	int err_status)
{
	struct ti_hecc_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	struct can_frame *cf;
	struct sk_buff *skb;

	/* propagate the error condition to the can stack */
	skb = alloc_can_err_skb(ndev, &cf);
	if (!skb) {
		if (printk_ratelimit())
			dev_err(priv->ndev->dev.parent,
				"ti_hecc_error: alloc_can_err_skb() failed\n");
		return -ENOMEM;
	}

	if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
		if ((int_status & HECC_CANGIF_BOIF) == 0) {
			priv->can.state = CAN_STATE_ERROR_WARNING;
			++priv->can.can_stats.error_warning;
			cf->can_id |= CAN_ERR_CRTL;
			if (hecc_read(priv, HECC_CANTEC) > 96)
				cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
			if (hecc_read(priv, HECC_CANREC) > 96)
				cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
		}
		hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
		dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
	}

	if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
		if ((int_status & HECC_CANGIF_BOIF) == 0) {
			priv->can.state = CAN_STATE_ERROR_PASSIVE;
			++priv->can.can_stats.error_passive;
			cf->can_id |= CAN_ERR_CRTL;
			if (hecc_read(priv, HECC_CANTEC) > 127)
				cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
			if (hecc_read(priv, HECC_CANREC) > 127)
				cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
		}
		hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
		dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
	}

	/*
	 * Need to check busoff condition in error status register too to
	 * ensure warning interrupts don't hog the system
	 */
	if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
		priv->can.state = CAN_STATE_BUS_OFF;
		cf->can_id |= CAN_ERR_BUSOFF;
		hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
		hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
		/* Disable all interrupts in bus-off to avoid int hog */
		hecc_write(priv, HECC_CANGIM, 0);
		can_bus_off(ndev);
	}

	if (err_status & HECC_BUS_ERROR) {
		++priv->can.can_stats.bus_error;
		cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
		cf->data[2] |= CAN_ERR_PROT_UNSPEC;
		if (err_status & HECC_CANES_FE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
			cf->data[2] |= CAN_ERR_PROT_FORM;
		}
		if (err_status & HECC_CANES_BE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
			cf->data[2] |= CAN_ERR_PROT_BIT;
		}
		if (err_status & HECC_CANES_SE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
			cf->data[2] |= CAN_ERR_PROT_STUFF;
		}
		if (err_status & HECC_CANES_CRCE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
					CAN_ERR_PROT_LOC_CRC_DEL;
		}
		if (err_status & HECC_CANES_ACKE) {
			hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
			cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
					CAN_ERR_PROT_LOC_ACK_DEL;
		}
	}

	netif_receive_skb(skb);
	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;
	return 0;
}
/*
 * This needs to be very careful to not leave IS_ERR pointers around for
 * cleanup to trip over.
 */
static int rds_ib_setup_qp(struct rds_connection *conn)
{
	struct rds_ib_connection *ic = conn->c_transport_data;
	struct ib_device *dev = ic->i_cm_id->device;
	struct ib_qp_init_attr attr;
	struct rds_ib_device *rds_ibdev;
	int ret;

	/* rds_ib_add_one creates a rds_ib_device object per IB device,
	 * and allocates a protection domain, memory range and FMR pool
	 * for each.  If that fails for any reason, it will not register
	 * the rds_ibdev at all.
	 */
	rds_ibdev = ib_get_client_data(dev, &rds_ib_client);
	if (rds_ibdev == NULL) {
		if (printk_ratelimit())
			printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n",
					dev->name);
		return -EOPNOTSUPP;
	}

	if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
		rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
	if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
		rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);

	/* Protection domain and memory range */
	ic->i_pd = rds_ibdev->pd;
	ic->i_mr = rds_ibdev->mr;

	ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
				     rds_ib_cq_event_handler, conn,
				     ic->i_send_ring.w_nr + 1, 0);
	if (IS_ERR(ic->i_send_cq)) {
		ret = PTR_ERR(ic->i_send_cq);
		ic->i_send_cq = NULL;
		rdsdebug("ib_create_cq send failed: %d\n", ret);
		goto out;
	}

	ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
				     rds_ib_cq_event_handler, conn,
				     ic->i_recv_ring.w_nr, 0);
	if (IS_ERR(ic->i_recv_cq)) {
		ret = PTR_ERR(ic->i_recv_cq);
		ic->i_recv_cq = NULL;
		rdsdebug("ib_create_cq recv failed: %d\n", ret);
		goto out;
	}

	ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
	if (ret) {
		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
		goto out;
	}

	ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
	if (ret) {
		rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
		goto out;
	}

	/* XXX negotiate max send/recv with remote? */
	memset(&attr, 0, sizeof(attr));
	attr.event_handler = rds_ib_qp_event_handler;
	attr.qp_context = conn;
	/* + 1 to allow for the single ack message */
	attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
	attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
	attr.cap.max_send_sge = rds_ibdev->max_sge;
	attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	attr.qp_type = IB_QPT_RC;
	attr.send_cq = ic->i_send_cq;
	attr.recv_cq = ic->i_recv_cq;

	/*
	 * XXX this can fail if max_*_wr is too large?  Are we supposed
	 * to back off until we get a value that the hardware can support?
	 */
	ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
	if (ret) {
		rdsdebug("rdma_create_qp failed: %d\n", ret);
		goto out;
	}

	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
					   ic->i_send_ring.w_nr *
						sizeof(struct rds_header),
					   &ic->i_send_hdrs_dma, GFP_KERNEL);
	if (ic->i_send_hdrs == NULL) {
		ret = -ENOMEM;
		rdsdebug("ib_dma_alloc_coherent send failed\n");
		goto out;
	}

	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
					   ic->i_recv_ring.w_nr *
						sizeof(struct rds_header),
					   &ic->i_recv_hdrs_dma, GFP_KERNEL);
	if (ic->i_recv_hdrs == NULL) {
		ret = -ENOMEM;
		rdsdebug("ib_dma_alloc_coherent recv failed\n");
		goto out;
	}

	ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
				       &ic->i_ack_dma, GFP_KERNEL);
	if (ic->i_ack == NULL) {
		ret = -ENOMEM;
		rdsdebug("ib_dma_alloc_coherent ack failed\n");
		goto out;
	}

	ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
	if (ic->i_sends == NULL) {
		ret = -ENOMEM;
		rdsdebug("send allocation failed\n");
		goto out;
	}
	memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));

	ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
	if (ic->i_recvs == NULL) {
		ret = -ENOMEM;
		rdsdebug("recv allocation failed\n");
		goto out;
	}
	memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));

	rds_ib_recv_init_ack(ic);

	rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
		 ic->i_send_cq, ic->i_recv_cq);

out:
	return ret;
}
Exemple #16
0
/*
 * Receive a WLP frame from device
 *
 * @returns: 1 if calling function should free the skb
 *           0 if it successfully handled skb and freed it
 *           0 if error occured, will free skb in this case
 */
int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb,
		      struct uwb_dev_addr *src)
{
	unsigned len = skb->len;
	void *ptr = skb->data;
	struct wlp_frame_hdr *hdr;
	int result = 0;

	if (len < sizeof(*hdr)) {
		dev_err(dev, "Not enough data to parse WLP header.\n");
		result = -EINVAL;
		goto out;
	}
	hdr = ptr;
	if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) {
		dev_err(dev, "Not a WLP frame type.\n");
		result = -EINVAL;
		goto out;
	}
	switch (hdr->type) {
	case WLP_FRAME_STANDARD:
		if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) {
			dev_err(dev, "Not enough data to parse Standard "
				"WLP header.\n");
			goto out;
		}
		result = wlp_verify_prep_rx_frame(wlp, skb, src);
		if (result < 0) {
			if (printk_ratelimit())
				dev_err(dev, "WLP: Verification of frame "
					"from neighbor %02x:%02x failed.\n",
					src->data[1], src->data[0]);
			goto out;
		}
		result = 1;
		break;
	case WLP_FRAME_ABBREVIATED:
		dev_err(dev, "Abbreviated frame received. FIXME?\n");
		kfree_skb(skb);
		break;
	case WLP_FRAME_CONTROL:
		dev_err(dev, "Control frame received. FIXME?\n");
		kfree_skb(skb);
		break;
	case WLP_FRAME_ASSOCIATION:
		if (len < sizeof(struct wlp_frame_assoc)) {
			dev_err(dev, "Not enough data to parse Association "
				"WLP header.\n");
			goto out;
		}
		wlp_receive_assoc_frame(wlp, skb, src);
		break;
	default:
		dev_err(dev, "Invalid frame received.\n");
		result = -EINVAL;
		break;
	}
out:
	if (result < 0) {
		kfree_skb(skb);
		result = 0;
	}
	return result;
}
/* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
			      struct codegen_context *ctx,
			      unsigned int *addrs)
{
	const struct sock_filter *filter = fp->insns;
	int flen = fp->len;
	u8 *func;
	unsigned int true_cond;
	int i;

	/* Start of epilogue code */
	unsigned int exit_addr = addrs[flen];

	for (i = 0; i < flen; i++) {
		unsigned int K = filter[i].k;

		/*
		 * addrs[] maps a BPF bytecode address into a real offset from
		 * the start of the body code.
		 */
		addrs[i] = ctx->idx * 4;

		switch (filter[i].code) {
			/*** ALU ops ***/
		case BPF_S_ALU_ADD_X: /* A += X; */
			ctx->seen |= SEEN_XREG;
			PPC_ADD(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_ADD_K: /* A += K; */
			if (!K)
				break;
			PPC_ADDI(r_A, r_A, IMM_L(K));
			if (K >= 32768)
				PPC_ADDIS(r_A, r_A, IMM_HA(K));
			break;
		case BPF_S_ALU_SUB_X: /* A -= X; */
			ctx->seen |= SEEN_XREG;
			PPC_SUB(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_SUB_K: /* A -= K */
			if (!K)
				break;
			PPC_ADDI(r_A, r_A, IMM_L(-K));
			if (K >= 32768)
				PPC_ADDIS(r_A, r_A, IMM_HA(-K));
			break;
		case BPF_S_ALU_MUL_X: /* A *= X; */
			ctx->seen |= SEEN_XREG;
			PPC_MUL(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_MUL_K: /* A *= K */
			if (K < 32768)
				PPC_MULI(r_A, r_A, K);
			else {
				PPC_LI32(r_scratch1, K);
				PPC_MUL(r_A, r_A, r_scratch1);
			}
			break;
		case BPF_S_ALU_DIV_X: /* A /= X; */
			ctx->seen |= SEEN_XREG;
			PPC_CMPWI(r_X, 0);
			if (ctx->pc_ret0 != -1) {
				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
			} else {
				/*
				 * Exit, returning 0; first pass hits here
				 * (longer worst-case code size).
				 */
				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
				PPC_LI(r_ret, 0);
				PPC_JMP(exit_addr);
			}
			PPC_DIVWU(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
			PPC_LI32(r_scratch1, K);
			/* Top 32 bits of 64bit result -> A */
			PPC_MULHWU(r_A, r_A, r_scratch1);
			break;
		case BPF_S_ALU_AND_X:
			ctx->seen |= SEEN_XREG;
			PPC_AND(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_AND_K:
			if (!IMM_H(K))
				PPC_ANDI(r_A, r_A, K);
			else {
				PPC_LI32(r_scratch1, K);
				PPC_AND(r_A, r_A, r_scratch1);
			}
			break;
		case BPF_S_ALU_OR_X:
			ctx->seen |= SEEN_XREG;
			PPC_OR(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_OR_K:
			if (IMM_L(K))
				PPC_ORI(r_A, r_A, IMM_L(K));
			if (K >= 65536)
				PPC_ORIS(r_A, r_A, IMM_H(K));
			break;
		case BPF_S_ALU_LSH_X: /* A <<= X; */
			ctx->seen |= SEEN_XREG;
			PPC_SLW(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_LSH_K:
			if (K == 0)
				break;
			else
				PPC_SLWI(r_A, r_A, K);
			break;
		case BPF_S_ALU_RSH_X: /* A >>= X; */
			ctx->seen |= SEEN_XREG;
			PPC_SRW(r_A, r_A, r_X);
			break;
		case BPF_S_ALU_RSH_K: /* A >>= K; */
			if (K == 0)
				break;
			else
				PPC_SRWI(r_A, r_A, K);
			break;
		case BPF_S_ALU_NEG:
			PPC_NEG(r_A, r_A);
			break;
		case BPF_S_RET_K:
			PPC_LI32(r_ret, K);
			if (!K) {
				if (ctx->pc_ret0 == -1)
					ctx->pc_ret0 = i;
			}
			/*
			 * If this isn't the very last instruction, branch to
			 * the epilogue if we've stuff to clean up.  Otherwise,
			 * if there's nothing to tidy, just return.  If we /are/
			 * the last instruction, we're about to fall through to
			 * the epilogue to return.
			 */
			if (i != flen - 1) {
				/*
				 * Note: 'seen' is properly valid only on pass
				 * #2.	Both parts of this conditional are the
				 * same instruction size though, meaning the
				 * first pass will still correctly determine the
				 * code size/addresses.
				 */
				if (ctx->seen)
					PPC_JMP(exit_addr);
				else
					PPC_BLR();
			}
			break;
		case BPF_S_RET_A:
			PPC_MR(r_ret, r_A);
			if (i != flen - 1) {
				if (ctx->seen)
					PPC_JMP(exit_addr);
				else
					PPC_BLR();
			}
			break;
		case BPF_S_MISC_TAX: /* X = A */
			PPC_MR(r_X, r_A);
			break;
		case BPF_S_MISC_TXA: /* A = X */
			ctx->seen |= SEEN_XREG;
			PPC_MR(r_A, r_X);
			break;

			/*** Constant loads/M[] access ***/
		case BPF_S_LD_IMM: /* A = K */
			PPC_LI32(r_A, K);
			break;
		case BPF_S_LDX_IMM: /* X = K */
			PPC_LI32(r_X, K);
			break;
		case BPF_S_LD_MEM: /* A = mem[K] */
			PPC_MR(r_A, r_M + (K & 0xf));
			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_S_LDX_MEM: /* X = mem[K] */
			PPC_MR(r_X, r_M + (K & 0xf));
			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_S_ST: /* mem[K] = A */
			PPC_MR(r_M + (K & 0xf), r_A);
			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_S_STX: /* mem[K] = X */
			PPC_MR(r_M + (K & 0xf), r_X);
			ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
			break;
		case BPF_S_LD_W_LEN: /*	A = skb->len; */
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
			break;
		case BPF_S_LDX_W_LEN: /* X = skb->len; */
			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
			break;

			/*** Ancillary info loads ***/

			/* None of the BPF_S_ANC* codes appear to be passed by
			 * sk_chk_filter().  The interpreter and the x86 BPF
			 * compiler implement them so we do too -- they may be
			 * planted in future.
			 */
		case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
						  protocol) != 2);
			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  protocol));
			/* ntohs is a NOP with BE loads. */
			break;
		case BPF_S_ANC_IFINDEX:
			PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
								dev));
			PPC_CMPDI(r_scratch1, 0);
			if (ctx->pc_ret0 != -1) {
				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
			} else {
				/* Exit, returning 0; first pass hits here. */
				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
				PPC_LI(r_ret, 0);
				PPC_JMP(exit_addr);
			}
			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
						  ifindex) != 4);
			PPC_LWZ_OFFS(r_A, r_scratch1,
				     offsetof(struct net_device, ifindex));
			break;
		case BPF_S_ANC_MARK:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  mark));
			break;
		case BPF_S_ANC_RXHASH:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  rxhash));
			break;
		case BPF_S_ANC_QUEUE:
			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
						  queue_mapping) != 2);
			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
							  queue_mapping));
			break;
		case BPF_S_ANC_CPU:
#ifdef CONFIG_SMP
			/*
			 * PACA ptr is r13:
			 * raw_smp_processor_id() = local_paca->paca_index
			 */
			BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
						  paca_index) != 2);
			PPC_LHZ_OFFS(r_A, 13,
				     offsetof(struct paca_struct, paca_index));
#else
			PPC_LI(r_A, 0);
#endif
			break;

			/*** Absolute loads from packet header/data ***/
		case BPF_S_LD_W_ABS:
			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
			goto common_load;
		case BPF_S_LD_H_ABS:
			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
			goto common_load;
		case BPF_S_LD_B_ABS:
			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
		common_load:
			/* Load from [K]. */
			ctx->seen |= SEEN_DATAREF;
			PPC_LI64(r_scratch1, func);
			PPC_MTLR(r_scratch1);
			PPC_LI32(r_addr, K);
			PPC_BLRL();
			/*
			 * Helper returns 'lt' condition on error, and an
			 * appropriate return value in r3
			 */
			PPC_BCC(COND_LT, exit_addr);
			break;

			/*** Indirect loads from packet header/data ***/
		case BPF_S_LD_W_IND:
			func = sk_load_word;
			goto common_load_ind;
		case BPF_S_LD_H_IND:
			func = sk_load_half;
			goto common_load_ind;
		case BPF_S_LD_B_IND:
			func = sk_load_byte;
		common_load_ind:
			/*
			 * Load from [X + K].  Negative offsets are tested for
			 * in the helper functions.
			 */
			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
			PPC_LI64(r_scratch1, func);
			PPC_MTLR(r_scratch1);
			PPC_ADDI(r_addr, r_X, IMM_L(K));
			if (K >= 32768)
				PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
			PPC_BLRL();
			/* If error, cr0.LT set */
			PPC_BCC(COND_LT, exit_addr);
			break;

		case BPF_S_LDX_B_MSH:
			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
			goto common_load;
			break;

			/*** Jump and branches ***/
		case BPF_S_JMP_JA:
			if (K != 0)
				PPC_JMP(addrs[i + 1 + K]);
			break;

		case BPF_S_JMP_JGT_K:
		case BPF_S_JMP_JGT_X:
			true_cond = COND_GT;
			goto cond_branch;
		case BPF_S_JMP_JGE_K:
		case BPF_S_JMP_JGE_X:
			true_cond = COND_GE;
			goto cond_branch;
		case BPF_S_JMP_JEQ_K:
		case BPF_S_JMP_JEQ_X:
			true_cond = COND_EQ;
			goto cond_branch;
		case BPF_S_JMP_JSET_K:
		case BPF_S_JMP_JSET_X:
			true_cond = COND_NE;
			/* Fall through */
		cond_branch:
			/* same targets, can avoid doing the test :) */
			if (filter[i].jt == filter[i].jf) {
				if (filter[i].jt > 0)
					PPC_JMP(addrs[i + 1 + filter[i].jt]);
				break;
			}

			switch (filter[i].code) {
			case BPF_S_JMP_JGT_X:
			case BPF_S_JMP_JGE_X:
			case BPF_S_JMP_JEQ_X:
				ctx->seen |= SEEN_XREG;
				PPC_CMPLW(r_A, r_X);
				break;
			case BPF_S_JMP_JSET_X:
				ctx->seen |= SEEN_XREG;
				PPC_AND_DOT(r_scratch1, r_A, r_X);
				break;
			case BPF_S_JMP_JEQ_K:
			case BPF_S_JMP_JGT_K:
			case BPF_S_JMP_JGE_K:
				if (K < 32768)
					PPC_CMPLWI(r_A, K);
				else {
					PPC_LI32(r_scratch1, K);
					PPC_CMPLW(r_A, r_scratch1);
				}
				break;
			case BPF_S_JMP_JSET_K:
				if (K < 32768)
					/* PPC_ANDI is /only/ dot-form */
					PPC_ANDI(r_scratch1, r_A, K);
				else {
					PPC_LI32(r_scratch1, K);
					PPC_AND_DOT(r_scratch1, r_A,
						    r_scratch1);
				}
				break;
			}
			/* Sometimes branches are constructed "backward", with
			 * the false path being the branch and true path being
			 * a fallthrough to the next instruction.
			 */
			if (filter[i].jt == 0)
				/* Swap the sense of the branch */
				PPC_BCC(true_cond ^ COND_CMP_TRUE,
					addrs[i + 1 + filter[i].jf]);
			else {
				PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
				if (filter[i].jf != 0)
					PPC_JMP(addrs[i + 1 + filter[i].jf]);
			}
			break;
		default:
			/* The filter contains something cruel & unusual.
			 * We don't handle it, but also there shouldn't be
			 * anything missing from our list.
			 */
			if (printk_ratelimit())
				pr_err("BPF filter opcode %04x (@%d) unsupported\n",
				       filter[i].code, i);
			return -ENOTSUPP;
		}

	}
	/* Set end-of-body-code address for exit. */
	addrs[i] = ctx->idx * 4;

	return 0;
}
Exemple #18
0
static void __kprobes
do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
	long error_code, siginfo_t *info)
{
	struct task_struct *tsk = current;

#ifdef CONFIG_X86_32
	if (v8086_mode(regs)) {
		/*
		 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
		 * On nmi (interrupt 2), do_trap should not be called.
		 */
		if (trapnr < 6)
			goto vm86_trap;
		goto trap_signal;
	}
#endif

	if (!user_mode_novm(regs))
		goto kernel_trap;

#ifdef CONFIG_X86_32
trap_signal:
#endif
	/*
	 * We want error_code and trap_no set for userspace faults and
	 * kernelspace faults which result in die(), but not
	 * kernelspace faults which are fixed up.  die() gives the
	 * process no chance to handle the signal and notice the
	 * kernel fault information, so that won't result in polluting
	 * the information about previously queued, but not yet
	 * delivered, faults.  See also do_general_protection below.
	 */
	tsk->thread.error_code = error_code;
	tsk->thread.trap_no = trapnr;

#ifdef CONFIG_X86_64
	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
	    printk_ratelimit()) {
		printk(KERN_INFO
		       "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
		       tsk->comm, task_pid_nr(tsk), str,
		       regs->ip, regs->sp, error_code);
		print_vma_addr(" in ", regs->ip);
		printk("\n");
	}
#endif

	if (info)
		force_sig_info(signr, info, tsk);
	else
		force_sig(signr, tsk);
	return;

kernel_trap:
	if (!fixup_exception(regs)) {
		tsk->thread.error_code = error_code;
		tsk->thread.trap_no = trapnr;

#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
		if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
			str = "PAX: suspicious stack segment fault";
#endif

		die(str, regs, error_code);
	}

#ifdef CONFIG_PAX_REFCOUNT
	if (trapnr == 4)
		pax_report_refcount_overflow(regs);
#endif

	return;

#ifdef CONFIG_X86_32
vm86_trap:
	if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
						error_code, trapnr))
		goto trap_signal;
	return;
#endif
}
/*
 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
 * operations performed in the send path.  As the sender allocs and potentially
 * unallocs the next free entry in the ring it doesn't alter which is
 * the next to be freed, which is what this is concerned with.
 */
void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
{
	struct rds_connection *conn = context;
	struct rds_iw_connection *ic = conn->c_transport_data;
	struct ib_wc wc;
	struct rds_iw_send_work *send;
	u32 completed;
	u32 oldest;
	u32 i;
	int ret;

	rdsdebug("cq %p conn %p\n", cq, conn);
	rds_iw_stats_inc(s_iw_tx_cq_call);
	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
	if (ret)
		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);

	while (ib_poll_cq(cq, 1, &wc) > 0) {
		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
			 be32_to_cpu(wc.ex.imm_data));
		rds_iw_stats_inc(s_iw_tx_cq_event);

		if (wc.status != IB_WC_SUCCESS) {
			printk(KERN_ERR "WC Error:  status = %d opcode = %d\n", wc.status, wc.opcode);
			break;
		}

		if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) {
			ic->i_fastreg_posted = 0;
			continue;
		}

		if (wc.opcode == IB_WC_FAST_REG_MR && wc.wr_id == RDS_IW_FAST_REG_WR_ID) {
			ic->i_fastreg_posted = 1;
			continue;
		}

		if (wc.wr_id == RDS_IW_ACK_WR_ID) {
			if (ic->i_ack_queued + HZ/2 < jiffies)
				rds_iw_stats_inc(s_iw_tx_stalled);
			rds_iw_ack_send_complete(ic);
			continue;
		}

		oldest = rds_iw_ring_oldest(&ic->i_send_ring);

		completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);

		for (i = 0; i < completed; i++) {
			send = &ic->i_sends[oldest];

			/* In the error case, wc.opcode sometimes contains garbage */
			switch (send->s_wr.opcode) {
			case IB_WR_SEND:
				if (send->s_rm)
					rds_iw_send_unmap_rm(ic, send, wc.status);
				break;
			case IB_WR_FAST_REG_MR:
			case IB_WR_RDMA_WRITE:
			case IB_WR_RDMA_READ:
			case IB_WR_RDMA_READ_WITH_INV:
				/* Nothing to be done - the SG list will be unmapped
				 * when the SEND completes. */
				break;
			default:
				if (printk_ratelimit())
					printk(KERN_NOTICE
						"RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
						__func__, send->s_wr.opcode);
				break;
			}

			send->s_wr.opcode = 0xdead;
			send->s_wr.num_sge = 1;
			if (send->s_queued + HZ/2 < jiffies)
				rds_iw_stats_inc(s_iw_tx_stalled);

			/* If a RDMA operation produced an error, signal this right
			 * away. If we don't, the subsequent SEND that goes with this
			 * RDMA will be canceled with ERR_WFLUSH, and the application
			 * never learn that the RDMA failed. */
			if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
				struct rds_message *rm;

				rm = rds_send_get_message(conn, send->s_op);
				if (rm)
					rds_iw_send_rdma_complete(rm, wc.status);
			}

			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
		}

		rds_iw_ring_free(&ic->i_send_ring, completed);

		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
		    test_bit(0, &conn->c_map_queued))
			queue_delayed_work(rds_wq, &conn->c_send_w, 0);

		/* We expect errors as the qp is drained during shutdown */
		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
			rds_iw_conn_error(conn,
				"send completion on %pI4 "
				"had status %u, disconnecting and reconnecting\n",
				&conn->c_faddr, wc.status);
		}
	}
}
Exemple #20
0
dotraplinkage void __kprobes
do_general_protection(struct pt_regs *regs, long error_code)
{
	struct task_struct *tsk;

	conditional_sti(regs);

#ifdef CONFIG_X86_32
	if (v8086_mode(regs))
		goto gp_in_vm86;
#endif

	tsk = current;
	if (!user_mode_novm(regs))
		goto gp_in_kernel;

#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
	if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
		struct mm_struct *mm = tsk->mm;
		unsigned long limit;

		down_write(&mm->mmap_sem);
		limit = mm->context.user_cs_limit;
		if (limit < TASK_SIZE) {
			track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
			up_write(&mm->mmap_sem);
			return;
		}
		up_write(&mm->mmap_sem);
	}
#endif

	tsk->thread.error_code = error_code;
	tsk->thread.trap_no = 13;

	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
			printk_ratelimit()) {
		printk(KERN_INFO
			"%s[%d] general protection ip:%lx sp:%lx error:%lx",
			tsk->comm, task_pid_nr(tsk),
			regs->ip, regs->sp, error_code);
		print_vma_addr(" in ", regs->ip);
		printk("\n");
	}

	force_sig(SIGSEGV, tsk);
	return;

#ifdef CONFIG_X86_32
gp_in_vm86:
	local_irq_enable();
	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
	return;
#endif

gp_in_kernel:
	if (fixup_exception(regs))
		return;

	tsk->thread.error_code = error_code;
	tsk->thread.trap_no = 13;
	if (notify_die(DIE_GPF, "general protection fault", regs,
				error_code, 13, SIGSEGV) == NOTIFY_STOP)
		return;

#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
	if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
		die("PAX: suspicious general protection fault", regs, error_code);
	else
#endif

	die("general protection fault", regs, error_code);
}
Exemple #21
0
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
                                       unsigned long npages,
                                       unsigned long *handle,
                                       unsigned long mask,
                                       unsigned int align_order)
{ 
	unsigned long n, end, start;
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
	unsigned long boundary_size;
	unsigned long flags;
	unsigned int pool_nr;
	struct iommu_pool *pool;

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
	if (unlikely(npages == 0)) {
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (should_fail_iommu(dev))
		return DMA_ERROR_CODE;

	/*
	 * We don't need to disable preemption here because any CPU can
	 * safely use any IOMMU pool.
	 */
	pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);

	if (largealloc)
		pool = &(tbl->large_pool);
	else
		pool = &(tbl->pools[pool_nr]);

	spin_lock_irqsave(&(pool->lock), flags);

again:
	if ((pass == 0) && handle && *handle &&
	    (*handle >= pool->start) && (*handle < pool->end))
		start = *handle;
	else
		start = pool->hint;

	limit = pool->end;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = pool->start;

	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0 in pool 0.
		 */
		if ((start & mask) >= limit || pass > 0) {
			spin_unlock(&(pool->lock));
			pool = &(tbl->pools[0]);
			spin_lock(&(pool->lock));
			start = pool->start;
		} else {
			start &= mask;
		}
	}

	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << tbl->it_page_shift);
	else
		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */

	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
			     boundary_size >> tbl->it_page_shift, align_mask);
	if (n == -1) {
		if (likely(pass == 0)) {
			/* First try the pool from the start */
			pool->hint = pool->start;
			pass++;
			goto again;

		} else if (pass <= tbl->nr_pools) {
			/* Now try scanning all the other pools */
			spin_unlock(&(pool->lock));
			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
			pool = &tbl->pools[pool_nr];
			spin_lock(&(pool->lock));
			pool->hint = pool->start;
			pass++;
			goto again;

		} else {
			/* Give up */
			spin_unlock_irqrestore(&(pool->lock), flags);
			return DMA_ERROR_CODE;
		}
	}

	end = n + npages;

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		pool->hint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		pool->hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	spin_unlock_irqrestore(&(pool->lock), flags);

	return n;
}
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
                                       unsigned long npages,
                                       unsigned long *handle,
                                       unsigned long mask,
                                       unsigned int align_order)
{ 
	unsigned long n, end, start;
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
	unsigned long boundary_size;

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
	if (unlikely(npages == 0)) {
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (handle && *handle)
		start = *handle;
	else
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

	/* Use only half of the table for small allocs (15 pages or less) */
	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;

	if (largealloc && start < tbl->it_halfpoint)
		start = tbl->it_halfpoint;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

 again:

	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0.
		 */
		if ((start & mask) >= limit || pass > 0)
			start = 0;
		else
			start &= mask;
	}

	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */

	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			     align_mask);
	if (n == -1) {
		if (likely(pass < 2)) {
			/* First failure, just rescan the half of the table.
			 * Second failure, rescan the other half of the table.
			 */
			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
			limit = pass ? tbl->it_size : limit;
			pass++;
			goto again;
		} else {
			/* Third failure, give up */
			return DMA_ERROR_CODE;
		}
	}

	end = n + npages;

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		tbl->it_largehint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		tbl->it_hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	return n;
}
static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
		     u8 *wdata, int wsize, u8 *rdata, int rsize)
{
	int ret, actlen;
	int offb, offd;
	const int stride = CMD_PACKET_SIZE - 4;
	u8 *wbuf = instance->snd_buf;
	u8 *rbuf = instance->rcv_buf;
	int wbuflen = ((wsize - 1) / stride + 1) * CMD_PACKET_SIZE;
	int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE;

	if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) {
		if (printk_ratelimit())
			usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n",
				wbuflen, rbuflen);
		ret = -ENOMEM;
		goto err;
	}

	mutex_lock(&instance->cm_serialize);

	
	init_completion(&instance->rcv_done);
	ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL);
	if (ret < 0) {
		if (printk_ratelimit())
			usb_err(instance->usbatm, "submit of read urb for cm %#x failed (%d)\n",
				cm, ret);
		goto fail;
	}

	memset(wbuf, 0, wbuflen);
	
	wbuf[0] = cm;
	for (offb = offd = 0; offd < wsize; offd += stride, offb += CMD_PACKET_SIZE) {
		wbuf[offb] = cm;
		memcpy(wbuf + offb + 4, wdata + offd, min_t(int, stride, wsize - offd));
	}

	instance->snd_urb->transfer_buffer_length = wbuflen;
	init_completion(&instance->snd_done);
	ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL);
	if (ret < 0) {
		if (printk_ratelimit())
			usb_err(instance->usbatm, "submit of write urb for cm %#x failed (%d)\n",
				cm, ret);
		goto fail;
	}

	ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL);
	if (ret < 0) {
		if (printk_ratelimit())
			usb_err(instance->usbatm, "send of cm %#x failed (%d)\n", cm, ret);
		goto fail;
	}

	ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen);
	if (ret < 0) {
		if (printk_ratelimit())
			usb_err(instance->usbatm, "receive of cm %#x failed (%d)\n", cm, ret);
		goto fail;
	}
	if (actlen % CMD_PACKET_SIZE || !actlen) {
		if (printk_ratelimit())
			usb_err(instance->usbatm, "invalid response length to cm %#x: %d\n",
				cm, actlen);
		ret = -EIO;
		goto fail;
	}

	
	for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) {
		if (rbuf[offb] != cm) {
			if (printk_ratelimit())
				usb_err(instance->usbatm, "wrong cm %#x in response to cm %#x\n",
					rbuf[offb], cm);
			ret = -EIO;
			goto fail;
		}
		if (rbuf[offb + 1] != CM_STATUS_SUCCESS) {
			if (printk_ratelimit())
				usb_err(instance->usbatm, "response to cm %#x failed: %#x\n",
					cm, rbuf[offb + 1]);
			ret = -EIO;
			goto fail;
		}
		if (offd >= rsize)
			break;
		memcpy(rdata + offd, rbuf + offb + 4, min_t(int, stride, rsize - offd));
		offd += stride;
	}

	ret = offd;
	dbg("cm %#x", cm);
fail:
	mutex_unlock(&instance->cm_serialize);
err:
	return ret;
}
Exemple #24
0
static bool _sc92031_check_media(struct net_device *dev)
{
	struct sc92031_priv *priv = netdev_priv(dev);
	void __iomem *port_base = priv->port_base;
	u16 bmsr;

	bmsr = _sc92031_mii_read(port_base, MII_BMSR);
	rmb();
	if (bmsr & BMSR_LSTATUS) {
		bool speed_100, duplex_full;
		u32 flow_ctrl_config = 0;
		u16 output_status = _sc92031_mii_read(port_base,
				MII_OutputStatus);
		_sc92031_mii_scan(port_base);

		speed_100 = output_status & 0x2;
		duplex_full = output_status & 0x4;

		/* Initial Tx/Rx configuration */
		priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
		priv->tx_config = 0x48800000;

		/* NOTE: vendor driver had dead code here to enable tx padding */

		if (!speed_100)
			priv->tx_config |= 0x80000;

		// configure rx mode
		_sc92031_set_rx_config(dev);

		if (duplex_full) {
			priv->rx_config |= RxFullDx;
			priv->tx_config |= TxFullDx;
			flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
		} else {
			priv->rx_config &= ~RxFullDx;
			priv->tx_config &= ~TxFullDx;
		}

		_sc92031_set_mar(dev);
		_sc92031_set_rx_config(dev);
		_sc92031_enable_tx_rx(dev);
		iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);

		netif_carrier_on(dev);

		if (printk_ratelimit())
			printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
				dev->name,
				speed_100 ? "100" : "10",
				duplex_full ? "full" : "half");
		return true;
	} else {
		_sc92031_mii_scan(port_base);

		netif_carrier_off(dev);

		_sc92031_disable_tx_rx(dev);

		if (printk_ratelimit())
			printk(KERN_INFO "%s: link down\n", dev->name);
		return false;
	}
}
Exemple #25
0
/*
 * isoc_complete
 *
 * This is an interrupt handler.
 * Called when we recieve isochronous usb-data from the device
 */
static void isoc_complete(struct urb *urb)
{
	int i,rc;
	struct usb_somagic *somagic = urb->context;

	if (!(somagic->streaming_flags & SOMAGIC_STREAMING_STARTED)) {
		return;
	}

	if (urb->status == -ENOENT) {
		printk(KERN_INFO "somagic::%s: Recieved empty ISOC urb!\n", __func__);
		return;
	}

	somagic->received_urbs++;

	for (i = 0; i < urb->number_of_packets; i++) {
		unsigned char *data = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
		int packet_len = urb->iso_frame_desc[i].actual_length;
		int pos = 0;

		urb->iso_frame_desc[i].status = 0;
		urb->iso_frame_desc[i].actual_length = 0;

		if ((packet_len % 0x400) != 0) {
				printk(KERN_INFO "somagic::%s: Discrad ISOC packet with "\
							           "unknown size! Size is %d\n",
								__func__, packet_len);
				continue;
		}

		while(pos < packet_len) {
			/*
			 * Within each packet of transfer, the data is divided into blocks of 0x400 (1024) bytes
			 * beginning with [0xaa 0xaa 0x00 0x00] for video, or [0xaa 0xaa 0x00 0x01] for audio.
		 	 * Check for this signature, and pass each block to scratch buffer for further processing
		 	 */

			if (data[pos] == 0xaa && data[pos+1] == 0xaa &&
					data[pos+2] == 0x00 && data[pos+3] == 0x00) {
				// We have video data, put it on scratch-buffer

				somagic_video_put(somagic, &data[pos+4], 0x400 - 4);

			} else if (data[pos] == 0xaa && data[pos+1] == 0xaa &&
								 data[pos+2] == 0x00 && data[pos+3] == 0x01) {

				somagic_audio_put(somagic, &data[pos + 4], 0x400 - 4);

			} else if (printk_ratelimit()) {
				printk(KERN_INFO "somagic::%s: Unexpected block, "\
							 "expected [aa aa 00 00], found [%02x %02x %02x %02x]\n",
							 __func__, data[pos], data[pos+1], data[pos+2], data[pos+3]);
			}

			// Skip to next 0xaa 0xaa 0x00 0x00
			pos += 0x400;
		}
	}

	tasklet_hi_schedule(&(somagic->audio.process_audio));
	tasklet_hi_schedule(&(somagic->video.process_video));

	urb->status = 0;
	urb->dev = somagic->dev;
	rc = usb_submit_urb(urb, GFP_ATOMIC);
	if (rc) {
		dev_err(&somagic->dev->dev,
						"%s: usb_submit_urb failed: error %d\n",
						__func__, rc);
	}

	return;
}
/*
 * This function is called as an URB transfert is complete (Isochronous pipe).
 * So, the traitement is done in interrupt time, so it has be fast, not crash,
 * and not stall. Neat.
 */
static void stk_isoc_handler(struct urb *urb)
{
	int i;
	int ret;
	int framelen;
	unsigned long flags;

	unsigned char *fill = NULL;
	unsigned char *iso_buf = NULL;

	struct stk_camera *dev;
	struct stk_sio_buffer *fb;

	dev = (struct stk_camera *) urb->context;

	if (dev == NULL) {
		STK_ERROR("isoc_handler called with NULL device !\n");
		return;
	}

	if (urb->status == -ENOENT || urb->status == -ECONNRESET
		|| urb->status == -ESHUTDOWN) {
		atomic_dec(&dev->urbs_used);
		return;
	}

	spin_lock_irqsave(&dev->spinlock, flags);

	if (urb->status != -EINPROGRESS && urb->status != 0) {
		STK_ERROR("isoc_handler: urb->status == %d\n", urb->status);
		goto resubmit;
	}

	if (list_empty(&dev->sio_avail)) {
		/*FIXME Stop streaming after a while */
		(void) (printk_ratelimit() &&
		STK_ERROR("isoc_handler without available buffer!\n"));
		goto resubmit;
	}
	fb = list_first_entry(&dev->sio_avail,
			struct stk_sio_buffer, list);
	fill = fb->buffer + fb->v4lbuf.bytesused;

	for (i = 0; i < urb->number_of_packets; i++) {
		if (urb->iso_frame_desc[i].status != 0) {
			if (urb->iso_frame_desc[i].status != -EXDEV)
				STK_ERROR("Frame %d has error %d\n", i,
					urb->iso_frame_desc[i].status);
			continue;
		}
		framelen = urb->iso_frame_desc[i].actual_length;
		iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;

		if (framelen <= 4)
			continue; /* no data */

		/*
		 * we found something informational from there
		 * the isoc frames have to type of headers
		 * type1: 00 xx 00 00 or 20 xx 00 00
		 * type2: 80 xx 00 00 00 00 00 00 or a0 xx 00 00 00 00 00 00
		 * xx is a sequencer which has never been seen over 0x3f
		 * imho data written down looks like bayer, i see similarities
		 * after every 640 bytes
		 */
		if (*iso_buf & 0x80) {
			framelen -= 8;
			iso_buf += 8;
			/* This marks a new frame */
			if (fb->v4lbuf.bytesused != 0
				&& fb->v4lbuf.bytesused != dev->frame_size) {
				(void) (printk_ratelimit() &&
				STK_ERROR("frame %d, "
					"bytesused=%d, skipping\n",
					i, fb->v4lbuf.bytesused));
				fb->v4lbuf.bytesused = 0;
				fill = fb->buffer;
			} else if (fb->v4lbuf.bytesused == dev->frame_size) {
				if (list_is_singular(&dev->sio_avail)) {
					/* Always reuse the last buffer */
					fb->v4lbuf.bytesused = 0;
					fill = fb->buffer;
				} else {
					list_move_tail(dev->sio_avail.next,
						&dev->sio_full);
					wake_up(&dev->wait_frame);
					fb = list_first_entry(&dev->sio_avail,
						struct stk_sio_buffer, list);
					fb->v4lbuf.bytesused = 0;
					fill = fb->buffer;
				}
			}
		} else {
Exemple #27
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(trace, regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

#ifdef CONFIG_KDI
		memset(&p->kdi_state,0,sizeof(struct kdi_task_state_s));  
#endif

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
static
struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
{
	int result = 0;
	struct device *dev = &i2400mu->usb_iface->dev;
	int usb_pipe, read_size, rx_size, do_autopm;
	struct usb_endpoint_descriptor *epd;
	const size_t max_pkt_size = 512;

	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
	do_autopm = atomic_read(&i2400mu->do_autopm);
	result = do_autopm ?
		usb_autopm_get_interface(i2400mu->usb_iface) : 0;
	if (result < 0) {
		dev_err(dev, "RX: can't get autopm: %d\n", result);
		do_autopm = 0;
	}
	epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
	usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
	rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
	if (unlikely(rx_size % max_pkt_size == 0)) {
		rx_size -= 8;
		d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
	}
	result = usb_bulk_msg(
		i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
		rx_size, &read_size, 200);
	usb_mark_last_busy(i2400mu->usb_dev);
	switch (result) {
	case 0:
		if (read_size == 0)
			goto retry;	/*                    */
		skb_put(rx_skb, read_size);
		break;
	case -EPIPE:
		/*
                                                  
                                                      
                                                        
           
    
                                                  
                                                     
                                    
   */
		if (edc_inc(&i2400mu->urb_edc,
			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
			dev_err(dev, "BM-CMD: too many stalls in "
				"URB; resetting device\n");
			goto do_reset;
		}
		usb_clear_halt(i2400mu->usb_dev, usb_pipe);
		msleep(10);	/*                           */
		goto retry;
	case -EINVAL:			/*                       */
	case -ENODEV:			/*                    */
	case -ENOENT:			/*                */
	case -ESHUTDOWN:
	case -ECONNRESET:
		break;
	case -EOVERFLOW: {		/*                       */
		struct sk_buff *new_skb;
		rx_size = i2400mu_rx_size_grow(i2400mu);
		if (rx_size <= (1 << 16))	/*        */
			i2400mu->rx_size = rx_size;
		else if (printk_ratelimit()) {
			dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
			result = -EINVAL;
			goto out;
		}
		skb_put(rx_skb, read_size);
		new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
					  GFP_KERNEL);
		if (new_skb == NULL) {
			if (printk_ratelimit())
				dev_err(dev, "RX: Can't reallocate skb to %d; "
					"RX dropped\n", rx_size);
			kfree_skb(rx_skb);
			rx_skb = NULL;
			goto out;	/*           */
		}
		kfree_skb(rx_skb);
		rx_skb = new_skb;
		i2400mu->rx_size_cnt = 0;
		i2400mu->rx_size_acc = i2400mu->rx_size;
		d_printf(1, dev, "RX: size changed to %d, received %d, "
			 "copied %d, capacity %ld\n",
			 rx_size, read_size, rx_skb->len,
			 (long) (skb_end_pointer(new_skb) - new_skb->head));
		goto retry;
	}
		/*                                                           
                                                                
                                                               
         */
	case -ETIMEDOUT:
		dev_err(dev, "RX: timeout: %d\n", result);
		result = 0;
		break;
	default:			/*           */
		if (edc_inc(&i2400mu->urb_edc,
			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
			goto error_reset;
		dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
		goto retry;
	}
out:
	if (do_autopm)
		usb_autopm_put_interface(i2400mu->usb_iface);
	d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
	return rx_skb;

error_reset:
	dev_err(dev, "RX: maximum errors in URB exceeded; "
		"resetting device\n");
do_reset:
	usb_queue_reset_device(i2400mu->usb_iface);
	rx_skb = ERR_PTR(result);
	goto out;
}
Exemple #29
0
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
{
	dma_addr_t dma_next = 0, dma_addr;
	unsigned long flags;
	struct scatterlist *s, *outs, *segstart;
	int outcount, incount, i, build_fail = 0;
	unsigned int align;
	unsigned long handle;
	unsigned int max_seg_size;

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	DBG("sg mapping %d elements:\n", nelems);

	spin_lock_irqsave(&(tbl->it_lock), flags);

	max_seg_size = dma_get_max_seg_size(dev);
	for_each_sg(sglist, s, nelems, i) {
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		vaddr = (unsigned long) sg_virt(s);
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
					  mask >> IOMMU_PAGE_SHIFT, align);

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
				       " npages %lx\n", tbl, vaddr, npages);
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);

		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
			    npages, entry, dma_addr);

		/* Insert into HW table */
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
		                              direction, attrs);
		if(unlikely(build_fail))
			goto failure;

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++;
				outs = sg_next(outs);
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
				DBG("    merged, new len: %ux\n", outs->dma_length);
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}
Exemple #30
0
/*
 * This needs to be very careful to not leave IS_ERR pointers around for
 * cleanup to trip over.
 */
static int rds_iw_setup_qp(struct rds_connection *conn)
{
	struct rds_iw_connection *ic = conn->c_transport_data;
	struct ib_device *dev = ic->i_cm_id->device;
	struct ib_qp_init_attr attr;
	struct rds_iw_device *rds_iwdev;
	int ret;

	/* rds_iw_add_one creates a rds_iw_device object per IB device,
	 * and allocates a protection domain, memory range and MR pool
	 * for each.  If that fails for any reason, it will not register
	 * the rds_iwdev at all.
	 */
	rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
	if (!rds_iwdev) {
		if (printk_ratelimit())
			printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
					dev->name);
		return -EOPNOTSUPP;
	}

	/* Protection domain and memory range */
	ic->i_pd = rds_iwdev->pd;
	ic->i_mr = rds_iwdev->mr;

	ret = rds_iw_init_qp_attrs(&attr, rds_iwdev,
			&ic->i_send_ring, rds_iw_send_cq_comp_handler,
			&ic->i_recv_ring, rds_iw_recv_cq_comp_handler,
			conn);
	if (ret < 0)
		goto out;

	ic->i_send_cq = attr.send_cq;
	ic->i_recv_cq = attr.recv_cq;

	/*
	 * XXX this can fail if max_*_wr is too large?  Are we supposed
	 * to back off until we get a value that the hardware can support?
	 */
	ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
	if (ret) {
		rdsdebug("rdma_create_qp failed: %d\n", ret);
		goto out;
	}

	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
					   ic->i_send_ring.w_nr *
						sizeof(struct rds_header),
					   &ic->i_send_hdrs_dma, GFP_KERNEL);
	if (!ic->i_send_hdrs) {
		ret = -ENOMEM;
		rdsdebug("ib_dma_alloc_coherent send failed\n");
		goto out;
	}

	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
					   ic->i_recv_ring.w_nr *
						sizeof(struct rds_header),
					   &ic->i_recv_hdrs_dma, GFP_KERNEL);
	if (!ic->i_recv_hdrs) {
		ret = -ENOMEM;
		rdsdebug("ib_dma_alloc_coherent recv failed\n");
		goto out;
	}

	ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
				       &ic->i_ack_dma, GFP_KERNEL);
	if (!ic->i_ack) {
		ret = -ENOMEM;
		rdsdebug("ib_dma_alloc_coherent ack failed\n");
		goto out;
	}

	ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
	if (!ic->i_sends) {
		ret = -ENOMEM;
		rdsdebug("send allocation failed\n");
		goto out;
	}
	rds_iw_send_init_ring(ic);

	ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
	if (!ic->i_recvs) {
		ret = -ENOMEM;
		rdsdebug("recv allocation failed\n");
		goto out;
	}

	rds_iw_recv_init_ring(ic);
	rds_iw_recv_init_ack(ic);

	/* Post receive buffers - as a side effect, this will update
	 * the posted credit count. */
	rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);

	rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
		 ic->i_send_cq, ic->i_recv_cq);

out:
	return ret;
}