Exemple #1
0
static void xen_resume_notifier(int _suspend_cancelled)
{
	int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING);
	BUG_ON(old_state != SHUTDOWN_SUSPEND);
	suspend_cancelled = _suspend_cancelled;
}
inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
}
Exemple #3
0
RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler)
{
	return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler, handler);
}
void cv_signal(cond_t *cv){
  t_wakeup();
  xchg(&cv->cond, 1);
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
Exemple #6
0
/*
 * Expand the fdset in the files_struct.  Called with the files spinlock
 * held for write.
 */
int expand_fdset(struct files_struct *files, int nr)
{
    fd_set *new_openset = 0, *new_execset = 0;
    int error, nfds = 0;

    error = -EMFILE;
    if (files->max_fdset >= NR_OPEN || nr >= NR_OPEN)
        goto out;

    nfds = files->max_fdset;
    write_unlock(&files->file_lock);

    /* Expand to the max in easy steps */
    do {
        if (nfds < (PAGE_SIZE * 8))
            nfds = PAGE_SIZE * 8;
        else {
            nfds = nfds * 2;
            if (nfds > NR_OPEN)
                nfds = NR_OPEN;
        }
    } while (nfds <= nr);

    error = -ENOMEM;
    new_openset = alloc_fdset(nfds);
    new_execset = alloc_fdset(nfds);
    write_lock(&files->file_lock);
    if (!new_openset || !new_execset)
        goto out;

    error = 0;

    /* Copy the existing tables and install the new pointers */
    if (nfds > files->max_fdset) {
        int i = files->max_fdset / (sizeof(unsigned long) * 8);
        int count = (nfds - files->max_fdset) / 8;

        /*
         * Don't copy the entire array if the current fdset is
         * not yet initialised.
         */
        if (i) {
            memcpy (new_openset, files->open_fds, files->max_fdset/8);
            memcpy (new_execset, files->close_on_exec, files->max_fdset/8);
            memset (&new_openset->fds_bits[i], 0, count);
            memset (&new_execset->fds_bits[i], 0, count);
        }

        nfds = xchg(&files->max_fdset, nfds);
        new_openset = xchg(&files->open_fds, new_openset);
        new_execset = xchg(&files->close_on_exec, new_execset);
        write_unlock(&files->file_lock);
        free_fdset (new_openset, nfds);
        free_fdset (new_execset, nfds);
        write_lock(&files->file_lock);
        return 0;
    }
    /* Somebody expanded the array while we slept ... */

out:
    write_unlock(&files->file_lock);
    if (new_openset)
        free_fdset(new_openset, nfds);
    if (new_execset)
        free_fdset(new_execset, nfds);
    write_lock(&files->file_lock);
    return error;
}
Exemple #7
0
int expand_fd_array(struct files_struct *files, int nr)
{
    struct file **new_fds;
    int error, nfds;


    error = -EMFILE;
    if (files->max_fds >= NR_OPEN || nr >= NR_OPEN)
        goto out;

    nfds = files->max_fds;
    write_unlock(&files->file_lock);

    /*
     * Expand to the max in easy steps, and keep expanding it until
     * we have enough for the requested fd array size.
     */

    do {
#if NR_OPEN_DEFAULT < 256
        if (nfds < 256)
            nfds = 256;
        else
#endif
            if (nfds < (PAGE_SIZE / sizeof(struct file *)))
                nfds = PAGE_SIZE / sizeof(struct file *);
            else {
                nfds = nfds * 2;
                if (nfds > NR_OPEN)
                    nfds = NR_OPEN;
            }
    } while (nfds <= nr);

    error = -ENOMEM;
    new_fds = alloc_fd_array(nfds);
    write_lock(&files->file_lock);
    if (!new_fds)
        goto out;

    /* Copy the existing array and install the new pointer */

    if (nfds > files->max_fds) {
        struct file **old_fds;
        int i;

        old_fds = xchg(&files->fd, new_fds);
        i = xchg(&files->max_fds, nfds);

        /* Don't copy/clear the array if we are creating a new
           fd array for fork() */
        if (i) {
            memcpy(new_fds, old_fds, i * sizeof(struct file *));
            /* clear the remainder of the array */
            memset(&new_fds[i], 0,
                   (nfds-i) * sizeof(struct file *));

            write_unlock(&files->file_lock);
            free_fd_array(old_fds, i);
            write_lock(&files->file_lock);
        }
    } else {
        /* Somebody expanded the array while we slept ... */
        write_unlock(&files->file_lock);
        free_fd_array(new_fds, nfds);
        write_lock(&files->file_lock);
    }
    error = 0;
out:
    return error;
}
Exemple #8
0
asmlinkage long sys_umask(int mask)
{
	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
	return mask;
}
Exemple #9
0
void
runtime·notewakeup(Note *n)
{
	runtime·xchg(&n->state, 1);
	futexwakeup(&n->state, 1<<30);
}
Exemple #10
0
int sock_getsockopt(struct socket *sock, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;

	union {
		int val;
		struct linger ling;
		struct timeval tm;
	} v;

	unsigned int lv = sizeof(int);
	int len;

	if (get_user(len, optlen))
		return -EFAULT;
	if (len < 0)
		return -EINVAL;

	switch(optname) {
	case SO_DEBUG:
		v.val = sock_flag(sk, SOCK_DBG);
		break;

	case SO_DONTROUTE:
		v.val = sock_flag(sk, SOCK_LOCALROUTE);
		break;

	case SO_BROADCAST:
		v.val = !!sock_flag(sk, SOCK_BROADCAST);
		break;

	case SO_SNDBUF:
		v.val = sk->sk_sndbuf;
		break;

	case SO_RCVBUF:
		v.val = sk->sk_rcvbuf;
		break;

	case SO_REUSEADDR:
		v.val = sk->sk_reuse;
		break;

	case SO_KEEPALIVE:
		v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
		break;

	case SO_TYPE:
		v.val = sk->sk_type;
		break;

	case SO_ERROR:
		v.val = -sock_error(sk);
		if (v.val==0)
			v.val = xchg(&sk->sk_err_soft, 0);
		break;

	case SO_OOBINLINE:
		v.val = !!sock_flag(sk, SOCK_URGINLINE);
		break;

	case SO_NO_CHECK:
		v.val = sk->sk_no_check;
		break;

	case SO_PRIORITY:
		v.val = sk->sk_priority;
		break;

	case SO_LINGER:
		lv		= sizeof(v.ling);
		v.ling.l_onoff	= !!sock_flag(sk, SOCK_LINGER);
		v.ling.l_linger	= sk->sk_lingertime / HZ;
		break;

	case SO_BSDCOMPAT:
		sock_warn_obsolete_bsdism("getsockopt");
		break;

	case SO_TIMESTAMP:
		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
				!sock_flag(sk, SOCK_RCVTSTAMPNS);
		break;

	case SO_TIMESTAMPNS:
		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
		break;

	case SO_RCVTIMEO:
		lv=sizeof(struct timeval);
		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
			v.tm.tv_sec = 0;
			v.tm.tv_usec = 0;
		} else {
			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
		}
		break;

	case SO_SNDTIMEO:
		lv=sizeof(struct timeval);
		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
			v.tm.tv_sec = 0;
			v.tm.tv_usec = 0;
		} else {
			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
		}
		break;

	case SO_RCVLOWAT:
		v.val = sk->sk_rcvlowat;
		break;

	case SO_SNDLOWAT:
		v.val=1;
		break;

	case SO_PASSCRED:
		v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
		break;

	case SO_PEERCRED:
		if (len > sizeof(sk->sk_peercred))
			len = sizeof(sk->sk_peercred);
		if (copy_to_user(optval, &sk->sk_peercred, len))
			return -EFAULT;
		goto lenout;

	case SO_PEERNAME:
	{
		char address[128];

		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
			return -ENOTCONN;
		if (lv < len)
			return -EINVAL;
		if (copy_to_user(optval, address, len))
			return -EFAULT;
		goto lenout;
	}

	/* Dubious BSD thing... Probably nobody even uses it, but
	 * the UNIX standard wants it for whatever reason... -DaveM
	 */
	case SO_ACCEPTCONN:
		v.val = sk->sk_state == TCP_LISTEN;
		break;

	case SO_PASSSEC:
		v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
		break;

	case SO_PEERSEC:
		return security_socket_getpeersec_stream(sock, optval, optlen, len);

	default:
		return -ENOPROTOOPT;
	}

	if (len > lv)
		len = lv;
	if (copy_to_user(optval, &v, len))
		return -EFAULT;
lenout:
	if (put_user(len, optlen))
		return -EFAULT;
	return 0;
}
Exemple #11
0
/*
 * Perform one request-response transaction to the device.
 */
int
islpci_mgt_transaction(struct net_device *ndev,
               int operation, unsigned long oid,
               void *senddata, int sendlen,
               struct islpci_mgmtframe **recvframe)
{
    islpci_private *priv = netdev_priv(ndev);
    const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
    long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
    int err;
    DEFINE_WAIT(wait);

    *recvframe = NULL;

    if (mutex_lock_interruptible(&priv->mgmt_lock))
        return -ERESTARTSYS;

    prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
    err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
    if (err)
        goto out;

    err = -ETIMEDOUT;
    while (timeout_left > 0) {
        int timeleft;
        struct islpci_mgmtframe *frame;

        timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
        frame = xchg(&priv->mgmt_received, NULL);
        if (frame) {
            if (frame->header->oid == oid) {
                *recvframe = frame;
                err = 0;
                goto out;
            } else {
                printk(KERN_DEBUG
                       "%s: expecting oid 0x%x, received 0x%x.\n",
                       ndev->name, (unsigned int) oid,
                       frame->header->oid);
                kfree(frame);
                frame = NULL;
            }
        }
        if (timeleft == 0) {
            printk(KERN_DEBUG
                "%s: timeout waiting for mgmt response %lu, "
                "triggering device\n",
                ndev->name, timeout_left);
            islpci_trigger(priv);
        }
        timeout_left += timeleft - wait_cycle_jiffies;
    }
    printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
           ndev->name);

    /* TODO: we should reset the device here */
 out:
    finish_wait(&priv->mgmt_wqueue, &wait);
    mutex_unlock(&priv->mgmt_lock);
    return err;
}
Exemple #12
0
/*
 * Receive a management frame from the device.
 * This can be an arbitrary number of traps, and at most one response
 * frame for a previous request sent via islpci_mgt_transmit().
 */
int
islpci_mgt_receive(struct net_device *ndev)
{
    islpci_private *priv = netdev_priv(ndev);
    isl38xx_control_block *cb =
        (isl38xx_control_block *) priv->control_block;
    u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
#endif

    /* Only once per interrupt, determine fragment range to
     * process.  This avoids an endless loop (i.e. lockup) if
     * frames come in faster than we can process them. */
    curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
    barrier();

    for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
        pimfor_header_t *header;
        u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
        struct islpci_membuf *buf = &priv->mgmt_rx[index];
        u16 frag_len;
        int size;
        struct islpci_mgmtframe *frame;

        /* I have no idea (and no documentation) if flags != 0
         * is possible.  Drop the frame, reuse the buffer. */
        if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
            printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
                   ndev->name,
                   le16_to_cpu(cb->rx_data_mgmt[index].flags));
            continue;
        }

        /* The device only returns the size of the header(s) here. */
        frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);

        /*
         * We appear to have no way to tell the device the
         * size of a receive buffer.  Thus, if this check
         * triggers, we likely have kernel heap corruption. */
        if (frag_len > MGMT_FRAME_SIZE) {
            printk(KERN_WARNING
                "%s: Bogus packet size of %d (%#x).\n",
                ndev->name, frag_len, frag_len);
            frag_len = MGMT_FRAME_SIZE;
        }

        /* Ensure the results of device DMA are visible to the CPU. */
        pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
                        buf->size, PCI_DMA_FROMDEVICE);

        /* Perform endianess conversion for PIMFOR header in-place. */
        header = pimfor_decode_header(buf->mem, frag_len);
        if (!header) {
            printk(KERN_WARNING "%s: no PIMFOR header found\n",
                   ndev->name);
            continue;
        }

        /* The device ID from the PIMFOR packet received from
         * the MVC is always 0.  We forward a sensible device_id.
         * Not that anyone upstream would care... */
        header->device_id = priv->ndev->ifindex;

#if VERBOSE > SHOW_ERROR_MESSAGES
        DEBUG(SHOW_PIMFOR_FRAMES,
              "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
              header->operation, header->oid, header->device_id,
              header->flags, header->length);

        /* display the buffer contents for debugging */
        display_buffer((char *) header, PIMFOR_HEADER_SIZE);
        display_buffer((char *) header + PIMFOR_HEADER_SIZE,
                   header->length);
#endif

        /* nobody sends these */
        if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
            printk(KERN_DEBUG
                   "%s: errant PIMFOR application frame\n",
                   ndev->name);
            continue;
        }

        /* Determine frame size, skipping OID_INL_TUNNEL headers. */
        size = PIMFOR_HEADER_SIZE + header->length;
        frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
                GFP_ATOMIC);
        if (!frame) {
            printk(KERN_WARNING
                   "%s: Out of memory, cannot handle oid 0x%08x\n",
                   ndev->name, header->oid);
            continue;
        }
        frame->ndev = ndev;
        memcpy(&frame->buf, header, size);
        frame->header = (pimfor_header_t *) frame->buf;
        frame->data = frame->buf + PIMFOR_HEADER_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
        DEBUG(SHOW_PIMFOR_FRAMES,
              "frame: header: %p, data: %p, size: %d\n",
              frame->header, frame->data, size);
#endif

        if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
            printk(KERN_DEBUG
                   "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
                   header->oid, header->device_id, header->flags,
                   header->length);
#endif

            /* Create work to handle trap out of interrupt
             * context. */
            INIT_WORK(&frame->ws, prism54_process_trap);
            schedule_work(&frame->ws);

        } else {
            /* Signal the one waiting process that a response
             * has been received. */
            if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
                printk(KERN_WARNING
                       "%s: mgmt response not collected\n",
                       ndev->name);
                kfree(frame);
            }
#if VERBOSE > SHOW_ERROR_MESSAGES
            DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
            wake_up(&priv->mgmt_wqueue);
        }

    }

    return 0;
}
Exemple #13
0
void slock_acquire(slock_t* lock)
{
	while(xchg(&lock->val, 1) == 1);
}
static int install_os_hooks(void)
{
	int ret;

	/* register module state change notifier */
	nb_init.notifier_call = module_init_notifier;

	ret = register_module_notifier(&nb_init);

	if (ret != 0)
	{
		printk(KERN_ERR "[CPA] register_module_notifier() fails\n");
		return -EFAULT;
	}

#ifdef CONFIG_PROFILING
	ret = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
        
	if (ret != 0)
	{
		printk(KERN_ERR "[CPA] profile_event_register() fails\n");
		return -EFAULT;
	}
#endif

	/* hook necessary system call table */
	px_original_sys_fork       = (sys_fork_t) xchg(&system_call_table[__NR_fork - __NR_SYSCALL_BASE], px_sys_fork);
	px_original_sys_vfork      = (sys_vfork_t) xchg(&system_call_table[__NR_vfork - __NR_SYSCALL_BASE], px_sys_vfork);
	px_original_sys_clone      = (sys_clone_t) xchg(&system_call_table[__NR_clone - __NR_SYSCALL_BASE], px_sys_clone);
	px_original_sys_execve     = (sys_execve_t) xchg(&system_call_table[__NR_execve - __NR_SYSCALL_BASE], px_sys_execve);
	px_original_sys_mmap       = (sys_mmap_t) xchg(&system_call_table[__NR_mmap - __NR_SYSCALL_BASE], px_sys_mmap);
	px_original_sys_mmap2      = (sys_mmap2_t) xchg(&system_call_table[__NR_mmap2 - __NR_SYSCALL_BASE], px_sys_mmap2);
	px_original_sys_exit       = (sys_exit_t) xchg(&system_call_table[__NR_exit - __NR_SYSCALL_BASE], px_sys_exit);
	px_original_sys_exit_group = (sys_exit_group_t) xchg(&system_call_table[__NR_exit_group - __NR_SYSCALL_BASE], px_sys_exit_group);
	px_original_sys_kill       = (sys_kill_t) xchg(&system_call_table[__NR_kill - __NR_SYSCALL_BASE], px_sys_kill);
	px_original_sys_tkill      = (sys_tkill_t) xchg(&system_call_table[__NR_tkill - __NR_SYSCALL_BASE], px_sys_tkill);
	px_original_sys_tgkill     = (sys_tgkill_t) xchg(&system_call_table[__NR_tgkill - __NR_SYSCALL_BASE], px_sys_tgkill);
	px_original_sys_prctl      = (sys_prctl_t) xchg(&system_call_table[__NR_prctl - __NR_SYSCALL_BASE], px_sys_prctl);

	gb_enable_os_hooks = true;

	return 0;
}
Exemple #15
0
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, unsigned int optlen)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct net *net = sock_net(sk);
	int val, valbool;
	int retv = -ENOPROTOOPT;

	if (optval == NULL)
		val=0;
	else {
		if (optlen >= sizeof(int)) {
			if (get_user(val, (int __user *) optval))
				return -EFAULT;
		} else
			val = 0;
	}

	valbool = (val!=0);

	if (ip6_mroute_opt(optname))
		return ip6_mroute_setsockopt(sk, optname, optval, optlen);

	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->sk_type == SOCK_RAW)
				break;

			if (sk->sk_protocol == IPPROTO_UDP ||
			    sk->sk_protocol == IPPROTO_UDPLITE) {
				struct udp_sock *up = udp_sk(sk);
				if (up->pending == AF_INET6) {
					retv = -EBUSY;
					break;
				}
			} else if (sk->sk_protocol != IPPROTO_TCP)
				break;

			if (sk->sk_state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (ipv6_only_sock(sk) ||
			    !ipv6_addr_v4mapped(&np->daddr)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			/*
			 * Sock is moving from IPv6 to IPv4 (sk_prot), so
			 * remove it from the refcnt debug socks count in the
			 * original family...
			 */
			sk_refcnt_debug_dec(sk);

			if (sk->sk_protocol == IPPROTO_TCP) {
				struct inet_connection_sock *icsk = inet_csk(sk);
				local_bh_disable();
				sock_prot_inuse_add(net, sk->sk_prot, -1);
				sock_prot_inuse_add(net, &tcp_prot, 1);
				local_bh_enable();
				sk->sk_prot = &tcp_prot;
				icsk->icsk_af_ops = &ipv4_specific;
				sk->sk_socket->ops = &inet_stream_ops;
				sk->sk_family = PF_INET;
				tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
			} else {
				struct proto *prot = &udp_prot;

				if (sk->sk_protocol == IPPROTO_UDPLITE)
					prot = &udplite_prot;
				local_bh_disable();
				sock_prot_inuse_add(net, sk->sk_prot, -1);
				sock_prot_inuse_add(net, prot, 1);
				local_bh_enable();
				sk->sk_prot = prot;
				sk->sk_socket->ops = &inet_dgram_ops;
				sk->sk_family = PF_INET;
			}
			opt = xchg(&np->opt, NULL);
			if (opt)
				sock_kfree_s(sk, opt, opt->tot_len);
			pktopt = xchg(&np->pktoptions, NULL);
			kfree_skb(pktopt);

			sk->sk_destruct = inet_sock_destruct;
			/*
			 * ... and add it to the refcnt debug socks count
			 * in the new family. -acme
			 */
			sk_refcnt_debug_inc(sk);
			module_put(THIS_MODULE);
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_V6ONLY:
		if (optlen < sizeof(int) ||
		    inet_sk(sk)->inet_num)
			goto e_inval;
		np->ipv6only = valbool;
		retv = 0;
		break;

	case IPV6_RECVPKTINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_2292PKTINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxoinfo = valbool;
		retv = 0;
		break;

	case IPV6_RECVHOPLIMIT:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_2292HOPLIMIT:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxohlim = valbool;
		retv = 0;
		break;

	case IPV6_RECVRTHDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.srcrt = valbool;
		retv = 0;
		break;

	case IPV6_2292RTHDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.osrcrt = valbool;
		retv = 0;
		break;

	case IPV6_RECVHOPOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_2292HOPOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.ohopopts = valbool;
		retv = 0;
		break;

	case IPV6_RECVDSTOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_2292DSTOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.odstopts = valbool;
		retv = 0;
		break;

	case IPV6_TCLASS:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < -1 || val > 0xff)
			goto e_inval;
		/* RFC 3542, 6.5: default traffic class of 0x0 */
		if (val == -1)
			val = 0;
		np->tclass = val;
		retv = 0;
		break;

	case IPV6_RECVTCLASS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxtclass = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_RECVPATHMTU:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxpmtu = valbool;
		retv = 0;
		break;

	case IPV6_TRANSPARENT:
		if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) {
			retv = -EPERM;
			break;
		}
		if (optlen < sizeof(int))
			goto e_inval;
		/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
		inet_sk(sk)->transparent = valbool;
		retv = 0;
		break;

	case IPV6_RECVORIGDSTADDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxorigdstaddr = valbool;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
	case IPV6_RTHDRDSTOPTS:
	case IPV6_RTHDR:
	case IPV6_DSTOPTS:
	{
		struct ipv6_txoptions *opt;

		/* remove any sticky options header with a zero option
		 * length, per RFC3542.
		 */
		if (optlen == 0)
			optval = NULL;
		else if (optval == NULL)
			goto e_inval;
		else if (optlen < sizeof(struct ipv6_opt_hdr) ||
			 optlen & 0x7 || optlen > 8 * 255)
			goto e_inval;

		/* hop-by-hop / destination options are privileged option */
		retv = -EPERM;
		if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW))
			break;

		opt = ipv6_renew_options(sk, np->opt, optname,
					 (struct ipv6_opt_hdr __user *)optval,
					 optlen);
		if (IS_ERR(opt)) {
			retv = PTR_ERR(opt);
			break;
		}

		/* routing header option needs extra check */
		retv = -EINVAL;
		if (optname == IPV6_RTHDR && opt && opt->srcrt) {
			struct ipv6_rt_hdr *rthdr = opt->srcrt;
			switch (rthdr->type) {
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
			case IPV6_SRCRT_TYPE_2:
				if (rthdr->hdrlen != 2 ||
				    rthdr->segments_left != 1)
					goto sticky_done;

				break;
#endif
			default:
				goto sticky_done;
			}
		}

		retv = 0;
		opt = ipv6_update_options(sk, opt);
sticky_done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}

	case IPV6_PKTINFO:
	{
		struct in6_pktinfo pkt;

		if (optlen == 0)
			goto e_inval;
		else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
			goto e_inval;

		if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
				retv = -EFAULT;
				break;
		}
		if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if)
			goto e_inval;

		np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
		ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
		retv = 0;
		break;
	}

	case IPV6_2292PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi6 fl6;
		int junk;

		memset(&fl6, 0, sizeof(fl6));
		fl6.flowi6_oif = sk->sk_bound_dev_if;
		fl6.flowi6_mark = sk->sk_mark;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (opt == NULL)
			break;

		memset(opt, 0, sizeof(*opt));
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void*)(opt+1);

		retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
					 &junk);
		if (retv)
			goto done;
update:
		retv = 0;
		opt = ipv6_update_options(sk, opt);
done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->sk_type == SOCK_STREAM)
			break;
		if (optlen < sizeof(int))
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val != valbool)
			goto e_inval;
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_MULTICAST_IF:
		if (sk->sk_type == SOCK_STREAM)
			break;
		if (optlen < sizeof(int))
			goto e_inval;

		if (val) {
			struct net_device *dev;

			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
				goto e_inval;

			dev = dev_get_by_index(net, val);
			if (!dev) {
				retv = -ENODEV;
				break;
			}
			dev_put(dev);
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		if (optlen < sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EPROTO;
		if (inet_sk(sk)->is_icsk)
			break;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_JOIN_ANYCAST:
	case IPV6_LEAVE_ANYCAST:
	{
		struct ipv6_mreq mreq;

		if (optlen < sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_JOIN_ANYCAST)
			retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		else
			retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		break;
	}
	case MCAST_JOIN_GROUP:
	case MCAST_LEAVE_GROUP:
	{
		struct group_req greq;
		struct sockaddr_in6 *psin6;

		if (optlen < sizeof(struct group_req))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&greq, optval, sizeof(struct group_req)))
			break;
		if (greq.gr_group.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		psin6 = (struct sockaddr_in6 *)&greq.gr_group;
		if (optname == MCAST_JOIN_GROUP)
			retv = ipv6_sock_mc_join(sk, greq.gr_interface,
				&psin6->sin6_addr);
		else
			retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
				&psin6->sin6_addr);
		break;
	}
	case MCAST_JOIN_SOURCE_GROUP:
	case MCAST_LEAVE_SOURCE_GROUP:
	case MCAST_BLOCK_SOURCE:
	case MCAST_UNBLOCK_SOURCE:
	{
		struct group_source_req greqs;
		int omode, add;

		if (optlen < sizeof(struct group_source_req))
			goto e_inval;
		if (copy_from_user(&greqs, optval, sizeof(greqs))) {
			retv = -EFAULT;
			break;
		}
		if (greqs.gsr_group.ss_family != AF_INET6 ||
		    greqs.gsr_source.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		if (optname == MCAST_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == MCAST_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
			struct sockaddr_in6 *psin6;

			psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
			retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
				&psin6->sin6_addr);
			/* prior join w/ different source is ok */
			if (retv && retv != -EADDRINUSE)
				break;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /* MCAST_LEAVE_SOURCE_GROUP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		retv = ip6_mc_source(add, omode, sk, &greqs);
		break;
	}
	case MCAST_MSFILTER:
	{
		extern int sysctl_mld_max_msf;
		struct group_filter *gsf;

		if (optlen < GROUP_FILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			retv = -ENOBUFS;
			break;
		}
		gsf = kmalloc(optlen,GFP_KERNEL);
		if (!gsf) {
			retv = -ENOBUFS;
			break;
		}
		retv = -EFAULT;
		if (copy_from_user(gsf, optval, optlen)) {
			kfree(gsf);
			break;
		}
		/* numsrc >= (4G-140)/128 overflow in 32 bits */
		if (gsf->gf_numsrc >= 0x1ffffffU ||
		    gsf->gf_numsrc > sysctl_mld_max_msf) {
			kfree(gsf);
			retv = -ENOBUFS;
			break;
		}
		if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
			kfree(gsf);
			retv = -EINVAL;
			break;
		}
		retv = ip6_mc_msfilter(sk, gsf);
		kfree(gsf);

		break;
	}
	case IPV6_ROUTER_ALERT:
		if (optlen < sizeof(int))
			goto e_inval;
		retv = ip6_ra_control(sk, val);
		break;
	case IPV6_MTU_DISCOVER:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->sk_error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		if (optlen < sizeof(int))
			goto e_inval;
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;
	case IPV6_IPSEC_POLICY:
	case IPV6_XFRM_POLICY:
		retv = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		retv = xfrm_user_policy(sk, optname, optval, optlen);
		break;

	case IPV6_ADDR_PREFERENCES:
	    {
		unsigned int pref = 0;
		unsigned int prefmask = ~0;

		if (optlen < sizeof(int))
			goto e_inval;

		retv = -EINVAL;

		/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
		switch (val & (IPV6_PREFER_SRC_PUBLIC|
			       IPV6_PREFER_SRC_TMP|
			       IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
		case IPV6_PREFER_SRC_PUBLIC:
			pref |= IPV6_PREFER_SRC_PUBLIC;
			break;
		case IPV6_PREFER_SRC_TMP:
			pref |= IPV6_PREFER_SRC_TMP;
			break;
		case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
			break;
		case 0:
			goto pref_skip_pubtmp;
		default:
			goto e_inval;
		}

		prefmask &= ~(IPV6_PREFER_SRC_PUBLIC|
			      IPV6_PREFER_SRC_TMP);
pref_skip_pubtmp:

		/* check HOME/COA conflicts */
		switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) {
		case IPV6_PREFER_SRC_HOME:
			break;
		case IPV6_PREFER_SRC_COA:
			pref |= IPV6_PREFER_SRC_COA;
		case 0:
			goto pref_skip_coa;
		default:
			goto e_inval;
		}

		prefmask &= ~IPV6_PREFER_SRC_COA;
pref_skip_coa:

		/* check CGA/NONCGA conflicts */
		switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
		case IPV6_PREFER_SRC_CGA:
		case IPV6_PREFER_SRC_NONCGA:
		case 0:
			break;
		default:
			goto e_inval;
		}

		np->srcprefs = (np->srcprefs & prefmask) | pref;
		retv = 0;

		break;
	    }
	case IPV6_MINHOPCOUNT:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < 0 || val > 255)
			goto e_inval;
		np->min_hopcount = val;
		break;
	case IPV6_DONTFRAG:
		np->dontfrag = valbool;
		retv = 0;
		break;
	}

	release_sock(sk);

	return retv;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
Exemple #16
0
// Releases a spinlock
void
ulock_release(ulock_t * lock)
{
	xchg(&lock->locked, 0);
}
Exemple #17
0
int rtlx_open(int index, int can_sleep)
{
	struct rtlx_info **p;
	struct rtlx_channel *chan;
	enum rtlx_state state;
	int ret = 0;

	if (index >= RTLX_CHANNELS) {
		printk(KERN_DEBUG "rtlx_open index out of range\n");
		return -ENOSYS;
	}

	if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
		printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
		       index);
		ret = -EBUSY;
		goto out_fail;
	}

	if (rtlx == NULL) {
		if( (p = vpe_get_shared(tclimit)) == NULL) {
		    if (can_sleep) {
			__wait_event_interruptible(channel_wqs[index].lx_queue,
				(p = vpe_get_shared(tclimit)), ret);
			if (ret)
				goto out_fail;
		    } else {
			printk(KERN_DEBUG "No SP program loaded, and device "
					"opened with O_NONBLOCK\n");
			ret = -ENOSYS;
			goto out_fail;
		    }
		}

		smp_rmb();
		if (*p == NULL) {
			if (can_sleep) {
				DEFINE_WAIT(wait);

				for (;;) {
					prepare_to_wait(
						&channel_wqs[index].lx_queue,
						&wait, TASK_INTERRUPTIBLE);
					smp_rmb();
					if (*p != NULL)
						break;
					if (!signal_pending(current)) {
						schedule();
						continue;
					}
					ret = -ERESTARTSYS;
					goto out_fail;
				}
				finish_wait(&channel_wqs[index].lx_queue, &wait);
			} else {
				pr_err(" *vpe_get_shared is NULL. "
				       "Has an SP program been loaded?\n");
				ret = -ENOSYS;
				goto out_fail;
			}
		}

		if ((unsigned int)*p < KSEG0) {
			printk(KERN_WARNING "vpe_get_shared returned an "
			       "invalid pointer maybe an error code %d\n",
			       (int)*p);
			ret = -ENOSYS;
			goto out_fail;
		}

		if ((ret = rtlx_init(*p)) < 0)
			goto out_ret;
	}

	chan = &rtlx->channel[index];

	state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
	if (state == RTLX_STATE_OPENED) {
		ret = -EBUSY;
		goto out_fail;
	}

out_fail:
	smp_mb();
	atomic_dec(&channel_wqs[index].in_open);
	smp_mb();

out_ret:
	return ret;
}
Exemple #18
0
// Initializes a spinlock
void
ulock_init(ulock_t * lock)
{
	xchg(&lock->locked, 0);
}
Exemple #19
0
// Acquires a spinlock which returns if no one already has the lock and spins
// if the lock is already acquired.
void
ulock_acquire(ulock_t * lock)
{
	while(xchg(&lock->locked, 1) == 1);
}
Exemple #20
0
// Acquire the lock.
void lock_acquire(lock_t* lk)
{

  while(xchg(&lk->locked, 1) != 0)
    ;
}
Exemple #21
0
// Release the lock.
void lock_release(lock_t* lk){
   xchg(&lk->locked, 0);
}
Exemple #22
0
static int clip_start_xmit(struct sk_buff *skb, knet_netdev_t *dev)
{
    struct clip_priv *clip_priv = PRIV(dev);
    struct atmarp_entry *entry;
    struct atm_vcc *vcc;
    int old;
    unsigned long flags;

    DPRINTK("clip_start_xmit (skb %p)\n", skb);
    if (!skb->dst)
    {
	printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
	dev_kfree_skb(skb);
	clip_priv->stats.tx_dropped++;
	return 0;
    }
    if (!skb->dst->neighbour)
    {
	printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR!\n");
	dev_kfree_skb(skb);
	clip_priv->stats.tx_dropped++;
	return 0;
    }
    entry = NEIGH2ENTRY(skb->dst->neighbour);
    if (!entry->vccs)
    {
	if (time_after(jiffies, entry->expires))
	{
	    /* should be resolved */
	    entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
	} 
	if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
	    skb_queue_tail(&entry->neigh->arp_queue, skb);
	else
	{
	    dev_kfree_skb(skb);
	    clip_priv->stats.tx_dropped++;
	}

	/* If a vcc was not resolved for a long time, it sends an InARP
	 * packet every 5 minutes. But if the other side connected now
	 * we do not want to wait.
	 */
	all_clip_vccs_start_resolving();
	return 0;
    }
    DPRINTK("neigh %p, vccs %p\n", entry, entry->vccs);
    ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
    DPRINTK("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc);
    if (entry->vccs->encap)
    {
	void *here;

	here = skb_push(skb, RFC1483LLC_LEN);
	memcpy(here, llc_oui, sizeof(llc_oui));
	((u16 *) here)[3] = skb->protocol;
    }
    atomic_add(skb->truesize, &vcc->tx_inuse);
    ATM_SKB(skb)->iovcnt = 0;
    ATM_SKB(skb)->atm_options = vcc->atm_options;
    entry->vccs->last_use = jiffies;
    DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc,vcc->dev);
    old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
    if (old)
    {
	printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
	return 0;
    }
    clip_priv->stats.tx_packets++;
    clip_priv->stats.tx_bytes += skb->len;
    vcc->dev->ops->send(vcc, skb);
    if (atm_may_send(vcc, 0))
    {
	entry->vccs->xoff = 0;
	return 0;
    }
    spin_lock_irqsave(&clip_priv->xoff_lock, flags);
    knet_netdev_stop_queue(dev); /* XOFF -> throttle immediately */
    barrier();
    if (!entry->vccs->xoff)
	knet_netdev_start_queue(dev);
    /* Oh, we just raced with clip_pop. netif_start_queue should be
       good enough, because nothing should really be asleep because
       of the brief netif_stop_queue. If this isn't true or if it
       changes, use netif_wake_queue instead. */
    spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
    return 0;
}
Exemple #23
0
void
trap(struct Trapframe *tf)
{
	// cprintf("Entered trap\n");
	// The environment may have set DF and some versions
	// of GCC rely on DF being clear
	asm volatile("cld" ::: "cc");

	// Halt the CPU if some other CPU has called panic()
	extern char *panicstr;
	if (panicstr)
		asm volatile("hlt");

	// Re-acqurie the big kernel lock if we were halted in
	// sched_yield()
	if (xchg(&thiscpu->cpu_status, CPU_STARTED) == CPU_HALTED)
		lock_kernel();
	// Check that interrupts are disabled.  If this assertion
	// fails, DO NOT be tempted to fix it by inserting a "cli" in
	// the interrupt path.
	assert(!(read_eflags() & FL_IF));
	if ((tf->tf_cs & 3) == 3) {
		// Trapped from user mode.
		// Acquire the big kernel lock before doing any
		// serious kernel work.
		// LAB 4: Your code here.
		lock_kernel();
		int i;

		assert(curenv);

		// Garbage collect if current enviroment is a zombie
		if (curenv->env_status == ENV_DYING) {
			env_free(curenv);
			curenv = NULL;
			sched_yield();
		}

		// Copy trap frame (which is currently on the stack)
		// into 'curenv->env_tf', so that running the environment
		// will restart at the trap point.
		curenv->env_tf = *tf;
		// The trapframe on the stack should be ignored from here on.
		tf = &curenv->env_tf;
	}

	// Record that tf is the last real trapframe so
	// print_trapframe can print some additional information.
	last_tf = tf;

	// Dispatch based on what type of trap occurred
	trap_dispatch(tf);

	// If we made it to this point, then no other environment was
	// scheduled, so we should return to the current environment
	// if doing so makes sense.
	if (curenv && curenv->env_status == ENV_RUNNING) {
		env_run(curenv);
	}
	else {
		sched_yield();
	}
}
Exemple #24
0
/*
 * Ugh, this is ugly, but we want the default case to run
 * straight through, which is why we have the ugly goto's
 */
void *kmalloc(size_t size, int priority)
{
	unsigned long flags;
	unsigned long type;
	int order, dma;
	struct block_header *p;
	struct page_descriptor *page, **pg;
	struct size_descriptor *bucket = sizes;

	/* Get order */
	order = 0;
	{
		unsigned int realsize = size + sizeof(struct block_header);
		for (;;) {
			int ordersize = BLOCKSIZE(order);
			if (realsize <= ordersize)
				break;
			order++;
			bucket++;
			if (ordersize)
				continue;
			printk("kmalloc of too large a block (%d bytes).\n", (int) size);
			return NULL;
		}
	}

	dma = 0;
	type = MF_USED;
	pg = &bucket->firstfree;
	if (priority & GFP_DMA) {
		dma = 1;
		type = MF_DMA;
		pg = &bucket->dmafree;
	}

	priority &= GFP_LEVEL_MASK;

/* Sanity check... */
	if (intr_count && priority != GFP_ATOMIC) {
		static int count = 0;
		if (++count < 5) {
			printk("kmalloc called nonatomically from interrupt %p\n",
			       __builtin_return_address(0));
			priority = GFP_ATOMIC;
		}
	}

	save_flags(flags);
	cli();
	page = *pg;
	if (!page)
		goto no_bucket_page;

	p = page->firstfree;
	if (p->bh_flags != MF_FREE)
		goto not_free_on_freelist;

found_it:
	page->firstfree = p->bh_next;
	page->nfree--;
	if (!page->nfree)
		*pg = page->next;
	restore_flags(flags);
	bucket->nmallocs++;
	bucket->nbytesmalloced += size;
	p->bh_flags = type;	/* As of now this block is officially in use */
	p->bh_length = size;
#ifdef SADISTIC_KMALLOC
	memset(p+1, 0xf0, size);
#endif
	return p + 1;		/* Pointer arithmetic: increments past header */


no_bucket_page:
	/*
	 * If we didn't find a page already allocated for this
	 * bucket size, we need to get one..
	 *
	 * This can be done with ints on: it is private to this invocation
	 */
	restore_flags(flags);

	{
		int i, realpages;
		
		if (BLOCKSIZE(order) < PAGE_SIZE)
			realpages = 1;
		else
			realpages = NUM_PAGES(size + sizeof(struct block_header) +
					sizeof(struct page_descriptor));
			
		page = get_kmalloc_pages(priority, realpages, dma);
		if (!page)
			goto no_free_page;
found_cached_page:

		bucket->npages++;

		page->order = order | (realpages * PAGE_SIZE);
		/* Loop for all but last block: */
		i = (page->nfree = bucket->nblocks) - 1;
		p = BH(page + 1);
		while (i > 0) { /* doesn't happen except for small ^2 mallocs */
			i--;
			p->bh_flags = MF_FREE;
			p->bh_next = BH(((long) p) + BLOCKSIZE(order));
			p = p->bh_next;
		}
		/* Last block: */
		p->bh_flags = MF_FREE;
		p->bh_next = NULL;

		p = BH(page+1);
	}

	/*
	 * Now we're going to muck with the "global" freelist
	 * for this size: this should be uninterruptible
	 */
	cli();
	page->next = *pg;
	*pg = page;
	goto found_it;


no_free_page:
	/*
	 * No free pages, check the kmalloc cache of
	 * pages to see if maybe we have something available
	 */
	if (!dma && order < MAX_CACHE_ORDER) {
		page = xchg(kmalloc_cache+order, page);
		if (page)
			goto found_cached_page;
	}
	{
		static unsigned long last = 0;
		if (priority != GFP_BUFFER && priority != GFP_IO &&
		    (last + 10 * HZ < jiffies)) {
			last = jiffies;
			printk("Couldn't get a free page.....\n");
		}
		return NULL;
	}

not_free_on_freelist:
	restore_flags(flags);
	printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
	return NULL;
}
Exemple #25
0
/*
 * This is the task which runs the usermode application
 */
static int ____call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	int retval;

	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);

	/* Unblock all signals */
	spin_lock_irq(&current->sighand->siglock);
	flush_signal_handlers(current, 1);
	sigemptyset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/* Install the credentials */
	commit_creds(sub_info->cred);
	sub_info->cred = NULL;

	/* Install input pipe when needed */
	if (sub_info->stdin) {
		struct files_struct *f = current->files;
		struct fdtable *fdt;
		/* no races because files should be private here */
		sys_close(0);
		fd_install(0, sub_info->stdin);
		spin_lock(&f->file_lock);
		fdt = files_fdtable(f);
		FD_SET(0, fdt->open_fds);
		FD_CLR(0, fdt->close_on_exec);
		spin_unlock(&f->file_lock);

		/* and disallow core files too */
		current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
	}

	/* We can run anywhere, unlike our parent keventd(). */
	set_cpus_allowed_ptr(current, cpu_all_mask);

	/*
	 * Our parent is keventd, which runs with elevated scheduling priority.
	 * Avoid propagating that into the userspace child.
	 */
	set_user_nice(current, 0);

	if (sub_info->init) {
		retval = sub_info->init(sub_info);
		if (retval)
			goto fail;
	}

	retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);

	/* Exec failed? */
fail:
	sub_info->retval = retval;
	return 0;
}

void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
	if (info->cleanup)
		(*info->cleanup)(info);
	if (info->cred)
		put_cred(info->cred);
	kfree(info);
}
EXPORT_SYMBOL(call_usermodehelper_freeinfo);

static void umh_complete(struct subprocess_info *sub_info)
{
	struct completion *comp = xchg(&sub_info->complete, NULL);
	/*
	 * See call_usermodehelper_exec(). If xchg() returns NULL
	 * we own sub_info, the UMH_KILLABLE caller has gone away.
	 */
	if (comp)
		complete(comp);
	else
		call_usermodehelper_freeinfo(sub_info);
}

/* Keventd can't block, but this (a child) can. */
static int wait_for_helper(void *data)
{
	struct subprocess_info *sub_info = data;
	pid_t pid;

	/* Install a handler: if SIGCLD isn't handled sys_wait4 won't
	 * populate the status, but will return -ECHILD. */
	allow_signal(SIGCHLD);

	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
	if (pid < 0) {
		sub_info->retval = pid;
	} else {
		int ret;

		/*
		 * Normally it is bogus to call wait4() from in-kernel because
		 * wait4() wants to write the exit code to a userspace address.
		 * But wait_for_helper() always runs as keventd, and put_user()
		 * to a kernel address works OK for kernel threads, due to their
		 * having an mm_segment_t which spans the entire address space.
		 *
		 * Thus the __user pointer cast is valid here.
		 */
		sys_wait4(pid, (int __user *)&ret, 0, NULL);

		/*
		 * If ret is 0, either ____call_usermodehelper failed and the
		 * real error code is already in sub_info->retval or
		 * sub_info->retval is 0 anyway, so don't mess with it then.
		 */
		if (ret)
			sub_info->retval = ret;
	}

	if (sub_info->wait == UMH_NO_WAIT)
		call_usermodehelper_freeinfo(sub_info);
	else
		umh_complete(sub_info);
	return 0;
}

/* This is run by khelper thread  */
static void __call_usermodehelper(struct work_struct *work)
{
	struct subprocess_info *sub_info =
		container_of(work, struct subprocess_info, work);
	int wait = sub_info->wait & ~UMH_KILLABLE;
	pid_t pid;

	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);

	/* CLONE_VFORK: wait until the usermode helper has execve'd
	 * successfully We need the data structures to stay around
	 * until that is done.  */
	if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
		pid = kernel_thread(wait_for_helper, sub_info,
				    CLONE_FS | CLONE_FILES | SIGCHLD);
	else
		pid = kernel_thread(____call_usermodehelper, sub_info,
				    CLONE_VFORK | SIGCHLD);

	switch (wait) {
	case UMH_NO_WAIT:
		break;

	case UMH_WAIT_PROC:
		if (pid > 0)
			break;
		sub_info->retval = pid;
		/* FALLTHROUGH */

	case UMH_WAIT_EXEC:
		umh_complete(sub_info);
	}
}

#ifdef CONFIG_PM_SLEEP
/*
 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
 * (used for preventing user land processes from being created after the user
 * land has been frozen during a system-wide hibernation or suspend operation).
 * Should always be manipulated under umhelper_sem acquired for write.
 */
static int usermodehelper_disabled;

/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);

/*
 * Wait queue head used by usermodehelper_pm_callback() to wait for all running
 * helpers to finish.
 */
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);

/*
 * Time to wait for running_helpers to become zero before the setting of
 * usermodehelper_disabled in usermodehelper_pm_callback() fails
 */
#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)

void read_lock_usermodehelper(void)
{
	down_read(&umhelper_sem);
}
Exemple #26
0
static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
{
	struct clip_priv *clip_priv = PRIV(dev);
	struct atmarp_entry *entry;
	struct atm_vcc *vcc;
	int old;
	unsigned long flags;

	DPRINTK("clip_start_xmit (skb %p)\n",skb);
	if (!skb->dst) {
		printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->dst->neighbour) {
#if 0
		skb->dst->neighbour = clip_find_neighbour(skb->dst,1);
		if (!skb->dst->neighbour) {
			dev_kfree_skb(skb); /* lost that one */
			clip_priv->stats.tx_dropped++;
			return 0;
		}
#endif
		printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	entry = NEIGH2ENTRY(skb->dst->neighbour);
	if (!entry->vccs) {
		if (time_after(jiffies, entry->expires)) {
			/* should be resolved */
			entry->expires = jiffies+ATMARP_RETRY_DELAY*HZ;
			to_atmarpd(act_need,PRIV(dev)->number,entry->ip);
		}
		if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
			skb_queue_tail(&entry->neigh->arp_queue,skb);
		else {
			dev_kfree_skb(skb);
			clip_priv->stats.tx_dropped++;
		}
		return 0;
	}
	DPRINTK("neigh %p, vccs %p\n",entry,entry->vccs);
	ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
	DPRINTK("using neighbour %p, vcc %p\n",skb->dst->neighbour,vcc);
	if (entry->vccs->encap) {
		void *here;

		here = skb_push(skb,RFC1483LLC_LEN);
		memcpy(here,llc_oui,sizeof(llc_oui));
		((u16 *) here)[3] = skb->protocol;
	}
	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = vcc->atm_options;
	entry->vccs->last_use = jiffies;
	DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n",skb,vcc,vcc->dev);
	old = xchg(&entry->vccs->xoff,1); /* assume XOFF ... */
	if (old) {
		printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
		return 0;
	}
	clip_priv->stats.tx_packets++;
	clip_priv->stats.tx_bytes += skb->len;
	(void) vcc->send(vcc,skb);
	if (atm_may_send(vcc,0)) {
		entry->vccs->xoff = 0;
		return 0;
	}
	spin_lock_irqsave(&clip_priv->xoff_lock,flags);
	netif_stop_queue(dev); /* XOFF -> throttle immediately */
	barrier();
	if (!entry->vccs->xoff)
		netif_start_queue(dev);
		/* Oh, we just raced with clip_pop. netif_start_queue should be
		   good enough, because nothing should really be asleep because
		   of the brief netif_stop_queue. If this isn't true or if it
		   changes, use netif_wake_queue instead. */
	spin_unlock_irqrestore(&clip_priv->xoff_lock,flags);
	return 0;
}
Exemple #27
0
/*
 * Setup everything required to start tracing
 */
int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
		       struct block_device *bdev,
		       struct blk_user_trace_setup *buts)
{
	struct blk_trace *old_bt, *bt = NULL;
	struct dentry *dir = NULL;
	int ret, i;

	if (!buts->buf_size || !buts->buf_nr)
		return -EINVAL;

	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';

	/*
	 * some device names have larger paths - convert the slashes
	 * to underscores for this to work as expected
	 */
	for (i = 0; i < strlen(buts->name); i++)
		if (buts->name[i] == '/')
			buts->name[i] = '_';

	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		return -ENOMEM;

	ret = -ENOMEM;
	bt->sequence = alloc_percpu(unsigned long);
	if (!bt->sequence)
		goto err;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
	if (!bt->msg_data)
		goto err;

	ret = -ENOENT;

	mutex_lock(&blk_tree_mutex);
	if (!blk_tree_root) {
		blk_tree_root = debugfs_create_dir("block", NULL);
		if (!blk_tree_root) {
			mutex_unlock(&blk_tree_mutex);
			goto err;
		}
	}
	mutex_unlock(&blk_tree_mutex);

	dir = debugfs_create_dir(buts->name, blk_tree_root);

	if (!dir)
		goto err;

	bt->dir = dir;
	bt->dev = dev;
	atomic_set(&bt->dropped, 0);

	ret = -EIO;
	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
					       &blk_dropped_fops);
	if (!bt->dropped_file)
		goto err;

	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
	if (!bt->msg_file)
		goto err;

	bt->rchan = relay_open("trace", dir, buts->buf_size,
				buts->buf_nr, &blk_relay_callbacks, bt);
	if (!bt->rchan)
		goto err;

	bt->act_mask = buts->act_mask;
	if (!bt->act_mask)
		bt->act_mask = (u16) -1;

	blk_trace_setup_lba(bt, bdev);

	/* overwrite with user settings */
	if (buts->start_lba)
		bt->start_lba = buts->start_lba;
	if (buts->end_lba)
		bt->end_lba = buts->end_lba;

	bt->pid = buts->pid;
	bt->trace_state = Blktrace_setup;

	ret = -EBUSY;
	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt) {
		(void) xchg(&q->blk_trace, old_bt);
		goto err;
	}

	if (atomic_inc_return(&blk_probes_ref) == 1)
		blk_register_tracepoints();

	return 0;
err:
	blk_trace_free(bt);
	return ret;
}
Exemple #28
0
static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
	const int offset = offsetof(struct blk_io_trace, sector);
	struct blk_io_trace old = {
		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
		.time     = iter->ts,
	};

	if (!trace_seq_putmem(s, &old, offset))
		return 0;
	return trace_seq_putmem(s, &t->sector,
				sizeof(old) - offset + t->pdu_len);
}

static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
{
	return blk_trace_synthesize_old_trace(iter) ?
			TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}

static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
		return TRACE_TYPE_UNHANDLED;

	return print_one_line(iter, true);
}

static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
{
	/* don't output context-info for blk_classic output */
	if (bit == TRACE_BLK_OPT_CLASSIC) {
		if (set)
			trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
		else
			trace_flags |= TRACE_ITER_CONTEXT_INFO;
	}
	return 0;
}

static struct tracer blk_tracer __read_mostly = {
	.name		= "blk",
	.init		= blk_tracer_init,
	.reset		= blk_tracer_reset,
	.start		= blk_tracer_start,
	.stop		= blk_tracer_stop,
	.print_header	= blk_tracer_print_header,
	.print_line	= blk_tracer_print_line,
	.flags		= &blk_tracer_flags,
	.set_flag	= blk_tracer_set_flag,
};

static struct trace_event trace_blk_event = {
	.type		= TRACE_BLK,
	.trace		= blk_trace_event_print,
	.binary		= blk_trace_event_print_binary,
};

static int __init init_blk_tracer(void)
{
	if (!register_ftrace_event(&trace_blk_event)) {
		pr_warning("Warning: could not register block events\n");
		return 1;
	}

	if (register_tracer(&blk_tracer) != 0) {
		pr_warning("Warning: could not register the block tracer\n");
		unregister_ftrace_event(&trace_blk_event);
		return 1;
	}

	return 0;
}

device_initcall(init_blk_tracer);

static int blk_trace_remove_queue(struct request_queue *q)
{
	struct blk_trace *bt;

	bt = xchg(&q->blk_trace, NULL);
	if (bt == NULL)
		return -EINVAL;

	if (atomic_dec_and_test(&blk_probes_ref))
		blk_unregister_tracepoints();

	blk_trace_free(bt);
	return 0;
}

/*
 * Setup everything required to start tracing
 */
static int blk_trace_setup_queue(struct request_queue *q,
				 struct block_device *bdev)
{
	struct blk_trace *old_bt, *bt = NULL;
	int ret = -ENOMEM;

	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		return -ENOMEM;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
	if (!bt->msg_data)
		goto free_bt;

	bt->dev = bdev->bd_dev;
	bt->act_mask = (u16)-1;

	blk_trace_setup_lba(bt, bdev);

	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt != NULL) {
		(void)xchg(&q->blk_trace, old_bt);
		ret = -EBUSY;
		goto free_bt;
	}

	if (atomic_inc_return(&blk_probes_ref) == 1)
		blk_register_tracepoints();
	return 0;

free_bt:
	blk_trace_free(bt);
	return ret;
}

/*
 * sysfs interface to enable and configure tracing
 */

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf);
static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count);
#define BLK_TRACE_DEVICE_ATTR(_name) \
	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
		    sysfs_blk_trace_attr_show, \
		    sysfs_blk_trace_attr_store)

static BLK_TRACE_DEVICE_ATTR(enable);
static BLK_TRACE_DEVICE_ATTR(act_mask);
static BLK_TRACE_DEVICE_ATTR(pid);
static BLK_TRACE_DEVICE_ATTR(start_lba);
static BLK_TRACE_DEVICE_ATTR(end_lba);

static struct attribute *blk_trace_attrs[] = {
	&dev_attr_enable.attr,
	&dev_attr_act_mask.attr,
	&dev_attr_pid.attr,
	&dev_attr_start_lba.attr,
	&dev_attr_end_lba.attr,
	NULL
};

struct attribute_group blk_trace_attr_group = {
	.name  = "trace",
	.attrs = blk_trace_attrs,
};

static const struct {
	int mask;
	const char *str;
} mask_maps[] = {
	{ BLK_TC_READ,		"read"		},
	{ BLK_TC_WRITE,		"write"		},
	{ BLK_TC_BARRIER,	"barrier"	},
	{ BLK_TC_SYNC,		"sync"		},
	{ BLK_TC_QUEUE,		"queue"		},
	{ BLK_TC_REQUEUE,	"requeue"	},
	{ BLK_TC_ISSUE,		"issue"		},
	{ BLK_TC_COMPLETE,	"complete"	},
	{ BLK_TC_FS,		"fs"		},
	{ BLK_TC_PC,		"pc"		},
	{ BLK_TC_AHEAD,		"ahead"		},
	{ BLK_TC_META,		"meta"		},
	{ BLK_TC_DISCARD,	"discard"	},
	{ BLK_TC_DRV_DATA,	"drv_data"	},
};

static int blk_trace_str2mask(const char *str)
{
	int i;
	int mask = 0;
	char *buf, *s, *token;

	buf = kstrdup(str, GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;
	s = strstrip(buf);

	while (1) {
		token = strsep(&s, ",");
		if (token == NULL)
			break;

		if (*token == '\0')
			continue;

		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
			if (strcasecmp(token, mask_maps[i].str) == 0) {
				mask |= mask_maps[i].mask;
				break;
			}
		}
		if (i == ARRAY_SIZE(mask_maps)) {
			mask = -EINVAL;
			break;
		}
	}
	kfree(buf);

	return mask;
}

static ssize_t blk_trace_mask2str(char *buf, int mask)
{
	int i;
	char *p = buf;

	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
		if (mask & mask_maps[i].mask) {
			p += sprintf(p, "%s%s",
				    (p == buf) ? "" : ",", mask_maps[i].str);
		}
	}
	*p++ = '\n';

	return p - buf;
}

static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
{
	if (bdev->bd_disk == NULL)
		return NULL;

	return bdev_get_queue(bdev);
}

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct hd_struct *p = dev_to_part(dev);
	struct request_queue *q;
	struct block_device *bdev;
	ssize_t ret = -ENXIO;

	lock_kernel();
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		ret = sprintf(buf, "%u\n", !!q->blk_trace);
		goto out_unlock_bdev;
	}

	if (q->blk_trace == NULL)
		ret = sprintf(buf, "disabled\n");
	else if (attr == &dev_attr_act_mask)
		ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
	else if (attr == &dev_attr_pid)
		ret = sprintf(buf, "%u\n", q->blk_trace->pid);
	else if (attr == &dev_attr_start_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
	else if (attr == &dev_attr_end_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
	return ret;
}

static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count)
{
	struct block_device *bdev;
	struct request_queue *q;
	struct hd_struct *p;
	u64 value;
	ssize_t ret = -EINVAL;

	if (count == 0)
		goto out;

	if (attr == &dev_attr_act_mask) {
		if (sscanf(buf, "%llx", &value) != 1) {
			/* Assume it is a list of trace category names */
			ret = blk_trace_str2mask(buf);
			if (ret < 0)
				goto out;
			value = ret;
		}
	} else if (sscanf(buf, "%llu", &value) != 1)
		goto out;

	ret = -ENXIO;

	lock_kernel();
	p = dev_to_part(dev);
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		if (value)
			ret = blk_trace_setup_queue(q, bdev);
		else
			ret = blk_trace_remove_queue(q);
		goto out_unlock_bdev;
	}

	ret = 0;
	if (q->blk_trace == NULL)
		ret = blk_trace_setup_queue(q, bdev);

	if (ret == 0) {
		if (attr == &dev_attr_act_mask)
			q->blk_trace->act_mask = value;
		else if (attr == &dev_attr_pid)
			q->blk_trace->pid = value;
		else if (attr == &dev_attr_start_lba)
			q->blk_trace->start_lba = value;
		else if (attr == &dev_attr_end_lba)
			q->blk_trace->end_lba = value;
	}

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
out:
	return ret ? ret : count;
}

int blk_trace_init_sysfs(struct device *dev)
{
	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}

void blk_trace_remove_sysfs(struct device *dev)
{
	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
}
static int remove_os_hooks(void)
{
	void *orgFn;

	/* only remove the os hooks when install_os_hooks() has been called */
	if (!gb_enable_os_hooks)
		return 0;

	if ((orgFn = xchg(&px_original_sys_fork, 0)))
		orgFn = xchg(&system_call_table[__NR_fork - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_vfork, 0)))
		orgFn = xchg(&system_call_table[__NR_vfork - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_clone, 0)))
		orgFn = xchg(&system_call_table[__NR_clone - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_execve, 0)))
		orgFn = xchg(&system_call_table[__NR_execve - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_mmap, 0)))
		orgFn = xchg(&system_call_table[__NR_mmap - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_mmap2, 0)))
		orgFn = xchg(&system_call_table[__NR_mmap2 - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_exit, 0)))
		orgFn = xchg(&system_call_table[__NR_exit - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_exit_group, 0)))
		orgFn = xchg(&system_call_table[__NR_exit_group - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_kill, 0)))
		orgFn = xchg(&system_call_table[__NR_kill - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_tkill, 0)))
		orgFn = xchg(&system_call_table[__NR_tkill - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_tgkill, 0)))
		orgFn = xchg(&system_call_table[__NR_tgkill - __NR_SYSCALL_BASE], orgFn);

	if ((orgFn = xchg(&px_original_sys_prctl, 0)))
		orgFn = xchg(&system_call_table[__NR_prctl - __NR_SYSCALL_BASE], orgFn);

#ifdef CONFIG_PROFILING
	// unregister task exit notifier
	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
#endif

	// unregister module state change notifier
	unregister_module_notifier(&nb_init);

	gb_enable_os_hooks = false;

	return 0;
}
Exemple #30
-1
void __fastcall TForm1::Button1Click(TObject *Sender)
{
        memset(_b,0,sizeof(_b));
        memset(_a,0,sizeof(_a));
        int i;
        int n=4;
        int e2=Edit2->Text.Length();
        int e3=Edit3->Text.Length();
        for(i=1; i<=n; i++)
                _a[n-i]= (AnsiString("0x0")+Edit2->Text.SubString(e2>=8*i?e2-8*i+1:1,e2>=8*i?8:(e2-8*(i-1)>0?e2-8*(i-1):0))).ToInt();
        for(i=1; i<=n; i++)
                _a[n-i+n]= (AnsiString("0x0")+Edit3->Text.SubString(e3>=8*i?e3-8*i+1:1,e3>=8*i?8:(e3-8*(i-1)>0?e3-8*(i-1):0))).ToInt();
        // n=2

      /*  equ(4, 1, 1);
        sub(4, 0, 1);
        equ(5, 2, 1);
        sub(5, 3, 1);
        xchg(1, 2, 1);
        mul(0, 1, 1);
        mul(2, 3, 1);
        mul(4, 5, 1);
        add(4, 0, 2);
        add(4, 2, 2);
        add(1, 4, 2);    /* */


        // n=4

        equ(12, 2, 2);
        sub(12, 0, 2);
        equ(14, 4, 2);
        sub(14, 6, 2);
        xchg(2, 4, 2);
        equ(8, 1, 1);
        sub(8, 0, 1);
        equ(9, 2, 1);
        sub(9, 3, 1);
        xchg(1, 2, 1);
        equ(10, 5, 1);
        sub(10, 4, 1);
        equ(11, 6, 1);
        sub(11, 7, 1);
        xchg(5, 6, 1);
        equ(16, 13, 1);
        sub(16, 12, 1);
        equ(17, 14, 1);
        sub(17, 15, 1);
        xchg(13, 14, 1);
        mul(0, 1, 1);
        mul(2, 3, 1);
        mul(8, 9, 1);
        mul(4, 5, 1);
        mul(6, 7, 1);
        mul(10, 11, 1);
        mul(12, 13, 1);
        mul(14, 15, 1);
        mul(16, 17, 1);
        add(8, 0, 2);
        add(8, 2, 2);
        add(1, 8, 2);
        add(10, 4, 2);
        add(10, 6, 2);
        add(5, 10, 2);
        add(16, 12, 2);
        add(16, 14, 2);
        add(13, 16, 2);
        add(12, 0, 4);
        add(12, 4, 4);
        add(2, 12, 4);   /*  */

        Edit4->Text="";

        for(i=0; i<n+n; i++)
                Edit4->Text=Edit4->Text+IntToHex((int)_a[i],8);
}