コード例 #1
0
int iterateFilesystems(struct vfsmount* root, int (*callback) (struct vfsmount* mnt, unsigned long flags, bool fromMount))
{
    talpa_mount_struct *mnt, *nextmnt, *prevmnt;
    struct list_head *nexthead = NULL;
    int ret;
    unsigned m_seq = 1;

    mnt = real_mount(root);
    talpa_mntget(mnt); /* Take extra reference count for the loop */
    do
    {
        struct vfsmount* vfsmnt = vfs_mount(mnt);
        dbg("VFSMNT: 0x%p (at 0x%p), sb: 0x%p, dev: %s, flags: 0x%lx, fs: %s", mnt, mnt->mnt_parent,
                vfsmnt->mnt_sb, mnt->mnt_devname, vfsmnt->mnt_sb->s_flags, vfsmnt->mnt_sb->s_type->name);

        ret = callback(vfsmnt, vfsmnt->mnt_sb->s_flags, false);
        if (ret)
        {
            break;
        }

        talpa_vfsmount_lock(&m_seq); /* locks dcache_lock on 2.4 */

        /* Go down the tree for a child if there is one */
        if ( !list_empty(&mnt->mnt_mounts) )
        {
            nextmnt = list_entry(mnt->mnt_mounts.next, talpa_mount_struct, mnt_child);
        }
        else
        {
            nextmnt = mnt;
            /* If no children, go up until we found some. Abort on root. */
            while ( nextmnt != nextmnt->mnt_parent )
            {
                nexthead = nextmnt->mnt_child.next;
                /* Take next child if available */
                if ( nexthead != &nextmnt->mnt_parent->mnt_mounts )
                {
                    break;
                }
                /* Otherwise go up the tree */
                nextmnt = nextmnt->mnt_parent;
            }

            /* Abort if we are at the root */
            if ( nextmnt == nextmnt->mnt_parent )
            {
                talpa_vfsmount_unlock(&m_seq); /* unlocks dcache_lock on 2.4 */
                talpa_mntput(mnt);
                break;
            }

            /* Take next mount from the list */
            nextmnt = list_entry(nexthead, talpa_mount_struct, mnt_child);
        }

        talpa_mntget(nextmnt);
        prevmnt = mnt;
        mnt = nextmnt;
        talpa_vfsmount_unlock(&m_seq); /* unlocks dcache_lock on 2.4 */
        talpa_mntput(prevmnt);
    } while (mnt);

    /* Don't mntput root as we didn't take a reference for ourselves */

    return ret;
}
コード例 #2
0
static inline void 
mpls_list_del_init (struct list_head *entry)
{
	if (!list_empty(entry))
		list_del_init(entry);
}
コード例 #3
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;
#ifdef CONFIG_DEBUG_MUTEXES
    unsigned char __mutex_contended = 0;
#endif

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 */

	for (;;) {
		struct task_struct *owner;

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;

		if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			preempt_enable();
			return 0;
		}

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		arch_mutex_cpu_relax();
	}
#endif
	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	if (atomic_xchg(&lock->count, -1) == 1)
		goto done;

	lock_contended(&lock->dep_map, ip);
#ifdef CONFIG_DEBUG_MUTEXES
    trace_mutex_contended(lock, ip);
    __mutex_contended = 1; // to pair mutex_contended & mutex_acquired
#endif

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		if (atomic_xchg(&lock->count, -1) == 1)
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			mutex_release(&lock->dep_map, 1, ip);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			preempt_enable();
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule_preempt_disabled();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
#ifdef CONFIG_DEBUG_MUTEXES
    if(unlikely(__mutex_contended > 0))
        trace_mutex_acquired(lock, ip);
#endif
	lock_acquired(&lock->dep_map, ip);
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);
	preempt_enable();

	return 0;
}
コード例 #4
0
static int
rx_queue_empty(int mindex)
{
	return list_empty(&mux_device[mindex].rxq.list);
}
コード例 #5
0
ファイル: mutex.c プロジェクト: HobbesOSR/kitten
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int
__mutex_lock_common(struct mutex *lock, long state)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned int old_val;
	unsigned long flags;

	spin_lock_mutex(&lock->wait_lock, flags);

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	old_val = atomic_xchg(&lock->count, -1);
	if (old_val == 1)
		goto done;

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		old_val = atomic_xchg(&lock->count, -1);
		if (old_val == 1)
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
#if 0
		if (unlikely((state == TASK_INTERRUPTIBLE &&
					signal_pending(task)) ||
			      (state == TASK_KILLABLE &&
					fatal_signal_pending(task)))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			spin_unlock_mutex(&lock->wait_lock, flags);

			return -EINTR;
		}
#endif
		set_task_state(task, state);

		/* didnt get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, task_thread_info(task));

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);


	return 0;
}
コード例 #6
0
// LAB2: below code is used to check the first fit allocation algorithm (your EXERCISE 1) 
// NOTICE: You SHOULD NOT CHANGE basic_check, default_check functions!
static void
default_check(void) {
    int count = 0, total = 0;
    list_entry_t *le = &free_list;
    while ((le = list_next(le)) != &free_list) {
        struct Page *p = le2page(le, page_link);
        assert(PageProperty(p));
        count ++, total += p->property;
    }
    assert(total == nr_free_pages());

    basic_check();

    struct Page *p0 = alloc_pages(5), *p1, *p2;
    assert(p0 != NULL);
    assert(!PageProperty(p0));

    list_entry_t free_list_store = free_list;
    list_init(&free_list);
    assert(list_empty(&free_list));
    assert(alloc_page() == NULL);

    unsigned int nr_free_store = nr_free;
    nr_free = 0;

    free_pages(p0 + 2, 3);
    assert(alloc_pages(4) == NULL);
    assert(PageProperty(p0 + 2) && p0[2].property == 3);
    assert((p1 = alloc_pages(3)) != NULL);
    assert(alloc_page() == NULL);
    assert(p0 + 2 == p1);

    p2 = p0 + 1;
    free_page(p0);
    free_pages(p1, 3);
    assert(PageProperty(p0) && p0->property == 1);
    assert(PageProperty(p1) && p1->property == 3);

    assert((p0 = alloc_page()) == p2 - 1);
    free_page(p0);
    assert((p0 = alloc_pages(2)) == p2 + 1);

    free_pages(p0, 2);
    free_page(p2);

    assert((p0 = alloc_pages(5)) != NULL);
    assert(alloc_page() == NULL);

    assert(nr_free == 0);
    nr_free = nr_free_store;

    free_list = free_list_store;
    free_pages(p0, 5);

    le = &free_list;
    while ((le = list_next(le)) != &free_list) {
        struct Page *p = le2page(le, page_link);
        count --, total -= p->property;
    }
    assert(count == 0);
    assert(total == 0);
}
コード例 #7
0
ファイル: ipath_qp.c プロジェクト: johnny/CobraDroidBeta
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
{
	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
	struct ib_wc wc;
	int ret = 0;

	if (qp->state == IB_QPS_ERR)
		goto bail;

	qp->state = IB_QPS_ERR;

	spin_lock(&dev->pending_lock);
	if (!list_empty(&qp->timerwait))
		list_del_init(&qp->timerwait);
	if (!list_empty(&qp->piowait))
		list_del_init(&qp->piowait);
	spin_unlock(&dev->pending_lock);

	/* Schedule the sending tasklet to drain the send work queue. */
	if (qp->s_last != qp->s_head)
		ipath_schedule_send(qp);

	memset(&wc, 0, sizeof(wc));
	wc.qp = &qp->ibqp;
	wc.opcode = IB_WC_RECV;

	if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
		wc.wr_id = qp->r_wr_id;
		wc.status = err;
		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
	}
	wc.status = IB_WC_WR_FLUSH_ERR;

	if (qp->r_rq.wq) {
		struct ipath_rwq *wq;
		u32 head;
		u32 tail;

		spin_lock(&qp->r_rq.lock);

		/* sanity check pointers before trusting them */
		wq = qp->r_rq.wq;
		head = wq->head;
		if (head >= qp->r_rq.size)
			head = 0;
		tail = wq->tail;
		if (tail >= qp->r_rq.size)
			tail = 0;
		while (tail != head) {
			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
			if (++tail >= qp->r_rq.size)
				tail = 0;
			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
		}
		wq->tail = tail;

		spin_unlock(&qp->r_rq.lock);
	} else if (qp->ibqp.event_handler)
		ret = 1;

bail:
	return ret;
}
コード例 #8
0
void nilfs_page_debug(const char *fname, int line, struct page *page,
		      const char *m, ...)
{
	struct address_space *mapping;
	struct inode *inode;
	va_list args;
	int len;
	char b[MSIZ];

	/* The page should be locked */
	len = snprintf(b, MSIZ, "PAGE %p ", page);
	va_start(args, m);
	len += vsnprintf(b + len, MSIZ - len, m, args);
	va_end(args);

	if (page == NULL) {
		printk(KERN_DEBUG "%s: page=NULL %s at %d\n", b, fname, line);
		return;
	}
	mapping = page->mapping;
	len += snprintf(b + len, MSIZ - len,
			": cnt=%d index#=%llu mapping=%d lru=%d",
			atomic_read(&page->_count),
			(unsigned long long)page->index, !!mapping,
			!list_empty(&page->lru));
	len += snprintf(b + len, MSIZ - len, " %s(%d) flags=", fname, line);
	len += snprint_page_flags(b + len, MSIZ - len, page);
	if (mapping) {
		if (buffer_nilfs_node(page_buffers(page)))
			inode = NILFS_BTNC_I(mapping);
		else
			inode = NILFS_AS_I(mapping);
		if (inode != NULL)
			len += snprintf(b + len, MSIZ - len, " ino=%lu",
					inode->i_ino);
	}
	printk(KERN_DEBUG "%s\n", b);

	if (page_has_buffers(page)) {
		struct buffer_head *bh, *head;
		int i = 0;

		bh = head = page_buffers(page);
		if (!bh) {
			printk(KERN_DEBUG "PAGE %p: invalid page buffers\n",
			       page);
			return;
		}
		do {
			len = snprintf(b, MSIZ,
				       "  BH[%d] %p: cnt=%d blk#=%llu state=",
				       i, bh, atomic_read(&bh->b_count),
				       (unsigned long long)bh->b_blocknr);
			len += snprint_bh_state(b + len, MSIZ - len, bh);
			printk(KERN_DEBUG "%s\n", b);
			bh = bh->b_this_page;  i++;
			if (unlikely(!bh)) {
				printk(KERN_DEBUG
				       "PAGE %p: unexpected buffers end\n",
				       page);
				break;
			}
		} while (bh != head);
	}
}
コード例 #9
0
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb;
	struct eth_dev	*dev;
	struct net_device *net;
	struct usb_request *new_req;
	struct usb_ep *in;
	int length;
	int retval;

	if (!ep->driver_data) {
		usb_ep_free_request(ep, req);
		return;
	}

	dev = ep->driver_data;
	net = dev->net;

	if (!dev->port_usb) {
		usb_ep_free_request(ep, req);
		return;
	}

	switch (req->status) {
	default:
		dev->net->stats.tx_errors++;
		VDBG(dev, "tx err %d\n", req->status);
		/* FALLTHROUGH */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		break;
	case 0:
		if (!req->zero)
			dev->net->stats.tx_bytes += req->length-1;
		else
			dev->net->stats.tx_bytes += req->length;
	}
	dev->net->stats.tx_packets++;

	spin_lock(&dev->req_lock);
	list_add_tail(&req->list, &dev->tx_reqs);

	if (dev->port_usb->multi_pkt_xfer) {
		dev->no_tx_req_used--;
		req->length = 0;
		in = dev->port_usb->in_ep;

		if (!list_empty(&dev->tx_reqs)) {
			new_req = container_of(dev->tx_reqs.next,
					struct usb_request, list);
			list_del(&new_req->list);
			spin_unlock(&dev->req_lock);
			if (new_req->length > 0) {
				length = new_req->length;

				/* NCM requires no zlp if transfer is
				 * dwNtbInMaxSize */
				if (dev->port_usb->is_fixed &&
					length == dev->port_usb->fixed_in_len &&
					(length % in->maxpacket) == 0)
					new_req->zero = 0;
				else
					new_req->zero = 1;

				/* use zlp framing on tx for strict CDC-Ether
				 * conformance, though any robust network rx
				 * path ignores extra padding. and some hardware
				 * doesn't like to write zlps.
				 */
				if (new_req->zero && !dev->zlp &&
						(length % in->maxpacket) == 0) {
					new_req->zero = 0;
					length++;
				}

				new_req->length = length;
				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
				switch (retval) {
				default:
					DBG(dev, "tx queue err %d\n", retval);
					new_req->length = 0;
					spin_lock(&dev->req_lock);
					list_add_tail(&new_req->list,
							&dev->tx_reqs);
					spin_unlock(&dev->req_lock);
					break;
				case 0:
					spin_lock(&dev->req_lock);
					dev->no_tx_req_used++;
					spin_unlock(&dev->req_lock);
					net->trans_start = jiffies;
				}
			} else {
				spin_lock(&dev->req_lock);
				/*
				 * Put the idle request at the back of the
				 * queue. The xmit function will put the
				 * unfinished request at the beginning of the
				 * queue.
				 */
				list_add_tail(&new_req->list, &dev->tx_reqs);
				spin_unlock(&dev->req_lock);
			}
		} else {
コード例 #10
0
ファイル: write.c プロジェクト: BackupTheBerlios/wl530g-svn
/*
 * Check if a request is dirty
 */
static inline int
nfs_dirty_request(struct nfs_page *req)
{
	struct inode *inode = req->wb_inode;
	return !list_empty(&req->wb_list) && req->wb_list_head == &inode->u.nfs_i.dirty;
}
コード例 #11
0
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
コード例 #12
0
static int
ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
{
	__u8				ifc_num;
	struct usb_host_interface	*ifc_desc;
	struct usb_endpoint_descriptor	*ep_desc;
	int				i;
	struct ks_bridge		*ksb;
	struct miscdevice		*mdev, *fbdev;
	struct usb_device		*udev;
	unsigned int			bus_id;
	unsigned long			flags;
	struct data_pkt			*pkt;

	ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;

	udev = interface_to_usbdev(ifc);
	fbdev = mdev = (struct miscdevice *)id->driver_info;

	bus_id = str_to_busid(udev->bus->bus_name);
	if (bus_id == BUS_UNDEF) {
		dev_err(&udev->dev, "unknown usb bus %s, probe failed\n",
				udev->bus->bus_name);
		return -ENODEV;
	}

	switch (id->idProduct) {
	case 0x9008:
		if (ifc_num != 0)
			return -ENODEV;
		ksb = __ksb[bus_id];
		mdev = &fbdev[bus_id];
		break;
	case 0x9048:
	case 0x904C:
	case 0x9075:
	case 0x908A:
		if (ifc_num != 2)
			return -ENODEV;
		ksb = __ksb[EFS_HSIC_BRIDGE_INDEX];
#ifdef CONFIG_USB_G_LGE_ANDROID
        ifc->needs_remote_wakeup = 1;
#endif
		break;
	case 0x9079:
		if (ifc_num != 2)
			return -ENODEV;
		ksb = __ksb[EFS_USB_BRIDGE_INDEX];
		break;
	default:
		return -ENODEV;
	}

	if (!ksb) {
		pr_err("ksb is not initialized");
		return -ENODEV;
	}

	ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
	ksb->ifc = ifc;
	ifc_desc = ifc->cur_altsetting;

	for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
		ep_desc = &ifc_desc->endpoint[i].desc;

		if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc))
			ksb->in_epAddr = ep_desc->bEndpointAddress;

		if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
			ksb->out_epAddr = ep_desc->bEndpointAddress;
	}

	if (!(ksb->in_epAddr && ksb->out_epAddr)) {
		dev_err(&udev->dev,
			"could not find bulk in and bulk out endpoints");
		usb_put_dev(ksb->udev);
		ksb->ifc = NULL;
		return -ENODEV;
	}

	ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
	ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);

	usb_set_intfdata(ifc, ksb);
	set_bit(USB_DEV_CONNECTED, &ksb->flags);
	atomic_set(&ksb->tx_pending_cnt, 0);
	atomic_set(&ksb->rx_pending_cnt, 0);

	dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);

	/*free up stale buffers if any from previous disconnect*/
	spin_lock_irqsave(&ksb->lock, flags);
	while (!list_empty(&ksb->to_ks_list)) {
		pkt = list_first_entry(&ksb->to_ks_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	while (!list_empty(&ksb->to_mdm_list)) {
		pkt = list_first_entry(&ksb->to_mdm_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	ksb->fs_dev = *mdev;
	misc_register(&ksb->fs_dev);

#ifdef CONFIG_USB_G_LGE_ANDROID
    usb_enable_autosuspend(ksb->udev);
#else
	if (device_can_wakeup(&ksb->udev->dev)) {
		ifc->needs_remote_wakeup = 1;
		usb_enable_autosuspend(ksb->udev);
	}
#endif

	dev_dbg(&udev->dev, "usb dev connected");

	return 0;
}
コード例 #13
0
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	int ret;
	unsigned long flags;
	struct ks_bridge *ksb = fp->private_data;
	struct data_pkt *pkt = NULL;
	size_t space, copied;

read_start:
	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
		return -ENODEV;

	spin_lock_irqsave(&ksb->lock, flags);
	if (list_empty(&ksb->to_ks_list)) {
		spin_unlock_irqrestore(&ksb->lock, flags);
		ret = wait_event_interruptible(ksb->ks_wait_q,
				!list_empty(&ksb->to_ks_list) ||
				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
		if (ret < 0)
			return ret;

		goto read_start;
	}

	space = count;
	copied = 0;
	while (!list_empty(&ksb->to_ks_list) && space &&
			test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		size_t len;

		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
		list_del_init(&pkt->list);
		len = min_t(size_t, space, pkt->len - pkt->n_read);
		spin_unlock_irqrestore(&ksb->lock, flags);

		ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
		if (ret) {
			dev_err(ksb->fs_dev.this_device,
					"copy_to_user failed err:%d\n", ret);
			ksb_free_data_pkt(pkt);
			return -EFAULT;
		}

		pkt->n_read += len;
		space -= len;
		copied += len;

		if (pkt->n_read == pkt->len) {
			/*
			 * re-init the packet and queue it
			 * for more data.
			 */
			pkt->n_read = 0;
			pkt->len = MAX_DATA_PKT_SIZE;
			submit_one_urb(ksb, GFP_KERNEL, pkt);
			pkt = NULL;
		}
		spin_lock_irqsave(&ksb->lock, flags);
	}

	/* put the partial packet back in the list */
	if (!space && pkt && pkt->n_read != pkt->len) {
		if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
			list_add(&pkt->list, &ksb->to_ks_list);
		else
			ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	dbg_log_event(ksb, "KS_READ", copied, 0);

	dev_dbg(ksb->fs_dev.this_device, "count:%d space:%d copied:%d", count,
			space, copied);

	return copied;
}
コード例 #14
0
/**
 * We iterate all the possible parents of our mount point,
 * and see if they also have a mount on the same mount point.
 */
int countPropagationPoints(struct vfsmount* vmnt)
{
#ifdef TALPA_SHARED_MOUNTS

    talpa_mount_struct *mnt = real_mount(vmnt);
    talpa_mount_struct *parent = mnt->mnt_parent;
    talpa_mount_struct *child = NULL;
    talpa_mount_struct *m;
    int ret = 1;

    unsigned m_seq = 1;

#ifdef DEBUG_PROPAGATION_POINTS
    talpa_mnt_namespace_t* ns;
    size_t path_size = 0;
    char* path = talpa_alloc_path_atomic(&path_size);
    const char* p = absolutePath(mnt->mnt_mountpoint,vfs_mount(parent), path, path_size);

    if (unlikely( path == NULL ))
    {
        warn("talpa_alloc_path failed");
        return 0;
    }

    ns = mnt->mnt_ns;
    dbg("PATH START: %s ns=%p ns.ns.inum=%u",p,ns,PROC_INUM_FROM_MNT_NAMESPACE(ns));

    ns = parent->mnt_ns;
    p = absolutePath(parent->mnt_mountpoint,vfs_mount(parent->mnt_parent), path, path_size);
    dbg("PARENT: %s ns=%p ns.ns.inum=%u",p,ns,PROC_INUM_FROM_MNT_NAMESPACE(ns));
#endif /* DEBUG_PROPAGATION_POINTS */

    talpa_vfsmount_lock(&m_seq); /* locks dcache_lock on 2.4 */

    /**
     * Iterate all possible shared/slave destination parents for copies of vmnt
     */
    for (m = propagation_next(parent, parent); m;
            m = propagation_next(m, parent))
    {
        child = talpa_lookup_mnt_last(vfs_mount(m), mnt->mnt_mountpoint);
        if (child)
        {
#ifdef DEBUG_PROPAGATION_POINTS
            /* absolutePath() locks up in d_path() if vfsmount_lock is already held */
            p = child->mnt_mountpoint->d_name.name;
            ns = child->mnt_ns;
            dbg("CHILD: %s ns=%p ns.ns.inum=%u",p,ns,PROC_INUM_FROM_MNT_NAMESPACE(ns));
#endif /* DEBUG_PROPAGATION_POINTS */
            if (list_empty(&child->mnt_mounts))
            {
                ret += 1;
            }
        }
    }
    talpa_vfsmount_unlock(&m_seq); /* unlocks dcache_lock on 2.4 */

#ifdef DEBUG_PROPAGATION_POINTS
    talpa_free_path(path);
#endif
    return ret;
#else /* ! TALPA_SHARED_MOUNTS */
    return 1;
#endif /* TALPA_SHARED_MOUNTS */
}
コード例 #15
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	/*
	 * batches are currently reads XOR writes
	 */
	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)
		/* we have a next request are still entitled to batch */
		goto dispatch_request;

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}

	/*
	 * there are either no reads or writes have been starved
	 */

	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected data_dir
	 */
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		/*
		 * A deadline has expired, the last request was in the other
		 * direction, or we have run out of higher-sectored requests.
		 * Start again from the request with the earliest expiry time.
		 */
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request.
	 */
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
コード例 #16
0
ファイル: masq.c プロジェクト: dwrudis/clustermatic
void masq_remove_proc(struct task_struct *tsk, int update_nlchild) {
    struct bproc_masq_master_t *m;

    /* update the child counts on the parent process(es) but only if
     * we're going away in a silent exit.  (i.e. tsk == current and
     * we're running, not from wait->release->unmasq in which case tsk
     * is some other process and a zombie.) */
    if (update_nlchild) {
	if (BPROC_ISMASQ(tsk->real_parent)) {
	    tsk->real_parent->bproc.nlchild++;
	    wake_up_interruptible(&tsk->real_parent->wait_chldexit);
	}
	if (tsk->parent != tsk->real_parent && BPROC_ISMASQ(tsk->parent)) {
	    tsk->parent->bproc.nlchild++;
	    wake_up_interruptible(&tsk->parent->wait_chldexit);
	}
    }

    /* Remove this process from the list of processes for this master.
     * If this was the last reference to it, free the master structure
     * as well. */
    m = tsk->bproc.master;
    list_del(&tsk->bproc.list);
    tsk->bproc.master = 0;
    if (atomic_dec_and_test(&m->count))
	kfree(m);

    /*ptrace_disable ? */
    if (tsk->state < TASK_ZOMBIE) {
	/* Since we're trying to disappear silently, we should
	 * reparent ourselves to init which will do the wait() on
	 * us. */
	ptrace_unlink(tsk);
	tsk->exit_signal = SIGCHLD;
	set_parents(tsk, child_reaper, child_reaper);
    }

#if 0
    /* Shed child processes - we just have them re-select parents.  If
     * this is being called from release() we shouldn't have any
     * children...  */
    while (!list_empty(&tsk->children)) {
	struct task_struct *child;
	child = list_entry(tsk->children.next, struct task_struct, sibling);

	if (!BPROC_ISMASQ(child) || child->bproc.master != m)
	    printk(KERN_ERR "bproc: masq_remove_proc: child isn't in my"
		   " process space!\n");

	masq_select_parents(child->bproc.master, child);

	if (child->parent == tsk || child->real_parent == tsk) {
	    printk(KERN_CRIT "bproc: masq: child is still mine! me=%d child=%d\n",
		   tsk->pid, child->pid);
	}
    }
    while (!list_empty(&tsk->ptrace_children)) {
	struct task_struct *child;
	child = list_entry(tsk->ptrace_children.next, struct task_struct,
			   ptrace_list);

	if (!BPROC_ISMASQ(child) || child->bproc.master != m)
	    printk(KERN_ERR "bproc: masq_remove_proc: child isn't in my"
		   " process space!\n");

	masq_select_parents(child->bproc.master, child);

	if (child->parent == tsk || child->real_parent == tsk) {
	    printk(KERN_CRIT "bproc: masq: child is still mine! me=%d child=%d\n",
		   tsk->pid, child->pid);
	}
    }
#endif
}
コード例 #17
0
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
	struct cx25821_buffer *buf =
	    container_of(vb, struct cx25821_buffer, vb);
	struct cx25821_buffer *prev;
	struct cx25821_fh *fh = vq->priv_data;
	struct cx25821_dev *dev = fh->dev;
	struct cx25821_dmaqueue *q = &dev->vidq[SRAM_CH11];

	/* add jump to stopper */
	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
	buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
	buf->risc.jmp[2] = cpu_to_le32(0);	/* bits 63-32 */

	dprintk(2, "jmp to stopper (0x%x)\n", buf->risc.jmp[1]);

	if (!list_empty(&q->queued)) {
		list_add_tail(&buf->vb.queue, &q->queued);
		buf->vb.state = VIDEOBUF_QUEUED;
		dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf,
			buf->vb.i);

	} else if (list_empty(&q->active)) {
		list_add_tail(&buf->vb.queue, &q->active);
		cx25821_start_video_dma(dev, q, buf,
					&dev->sram_channels[SRAM_CH11]);
		buf->vb.state = VIDEOBUF_ACTIVE;
		buf->count = q->count++;
		mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
		dprintk(2,
			"[%p/%d] buffer_queue - first active, buf cnt = %d, q->count = %d\n",
			buf, buf->vb.i, buf->count, q->count);
	} else {
		prev =
		    list_entry(q->active.prev, struct cx25821_buffer, vb.queue);
		if (prev->vb.width == buf->vb.width
		    && prev->vb.height == buf->vb.height
		    && prev->fmt == buf->fmt) {
			list_add_tail(&buf->vb.queue, &q->active);
			buf->vb.state = VIDEOBUF_ACTIVE;
			buf->count = q->count++;
			prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);

			/* 64 bit bits 63-32 */
			prev->risc.jmp[2] = cpu_to_le32(0);
			dprintk(2,
				"[%p/%d] buffer_queue - append to active, buf->count=%d\n",
				buf, buf->vb.i, buf->count);

		} else {
			list_add_tail(&buf->vb.queue, &q->queued);
			buf->vb.state = VIDEOBUF_QUEUED;
			dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf,
				buf->vb.i);
		}
	}

	if (list_empty(&q->active)) {
		dprintk(2, "active queue empty!\n");
	}
}
コード例 #18
0
ファイル: readpage.c プロジェクト: mdamt/linux
int ext4_mpage_readpages(struct address_space *mapping,
			 struct list_head *pages, struct page *page,
			 unsigned nr_pages)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;

	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t blocks[MAX_BUF_PER_PAGE];
	unsigned page_block;
	struct block_device *bdev = inode->i_sb->s_bdev;
	int length;
	unsigned relative_block = 0;
	struct ext4_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;

	for (; nr_pages; nr_pages--) {
		int fully_mapped = 1;
		unsigned first_hole = blocks_per_page;

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping, page->index,
				  readahead_gfp_mask(mapping)))
				goto next_page;
		}

		if (page_has_buffers(page))
			goto confused;

		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
		last_block = block_in_file + nr_pages * blocks_per_page;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;
		page_block = 0;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & EXT4_MAP_MAPPED) &&
		    block_in_file > map.m_lblk &&
		    block_in_file < (map.m_lblk + map.m_len)) {
			unsigned map_offset = block_in_file - map.m_lblk;
			unsigned last = map.m_len - map_offset;

			for (relative_block = 0; ; relative_block++) {
				if (relative_block == last) {
					/* needed? */
					map.m_flags &= ~EXT4_MAP_MAPPED;
					break;
				}
				if (page_block == blocks_per_page)
					break;
				blocks[page_block] = map.m_pblk + map_offset +
					relative_block;
				page_block++;
				block_in_file++;
			}
		}

		/*
		 * Then do more ext4_map_blocks() calls until we are
		 * done with this page.
		 */
		while (page_block < blocks_per_page) {
			if (block_in_file < last_block) {
				map.m_lblk = block_in_file;
				map.m_len = last_block - block_in_file;

				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
				set_error_page:
					SetPageError(page);
					zero_user_segment(page, 0,
							  PAGE_SIZE);
					unlock_page(page);
					goto next_page;
				}
			}
			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
				fully_mapped = 0;
				if (first_hole == blocks_per_page)
					first_hole = page_block;
				page_block++;
				block_in_file++;
				continue;
			}
			if (first_hole != blocks_per_page)
				goto confused;		/* hole -> non-hole */

			/* Contiguous blocks? */
			if (page_block && blocks[page_block-1] != map.m_pblk-1)
				goto confused;
			for (relative_block = 0; ; relative_block++) {
				if (relative_block == map.m_len) {
					/* needed? */
					map.m_flags &= ~EXT4_MAP_MAPPED;
					break;
				} else if (page_block == blocks_per_page)
					break;
				blocks[page_block] = map.m_pblk+relative_block;
				page_block++;
				block_in_file++;
			}
		}
		if (first_hole != blocks_per_page) {
			zero_user_segment(page, first_hole << blkbits,
					  PAGE_SIZE);
			if (first_hole == 0) {
				SetPageUptodate(page);
				unlock_page(page);
				goto next_page;
			}
		} else if (fully_mapped) {
			SetPageMappedToDisk(page);
		}
		if (fully_mapped && blocks_per_page == 1 &&
		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
			SetPageUptodate(page);
			goto confused;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != blocks[0] - 1)) {
		submit_and_realloc:
			submit_bio(bio);
			bio = NULL;
		}
		if (bio == NULL) {
			struct fscrypt_ctx *ctx = NULL;

			if (ext4_encrypted_inode(inode) &&
			    S_ISREG(inode->i_mode)) {
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
				if (IS_ERR(ctx))
					goto set_error_page;
			}
			bio = bio_alloc(GFP_KERNEL,
				min_t(int, nr_pages, BIO_MAX_PAGES));
			if (!bio) {
				if (ctx)
					fscrypt_release_ctx(ctx);
				goto set_error_page;
			}
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
			bio->bi_end_io = mpage_end_io;
			bio->bi_private = ctx;
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
		}

		length = first_hole << blkbits;
		if (bio_add_page(bio, page, length, 0) < length)
			goto submit_and_realloc;

		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
		     (relative_block == map.m_len)) ||
		    (first_hole != blocks_per_page)) {
			submit_bio(bio);
			bio = NULL;
		} else
			last_block_in_bio = blocks[blocks_per_page - 1];
		goto next_page;
	confused:
		if (bio) {
			submit_bio(bio);
			bio = NULL;
		}
		if (!PageUptodate(page))
			block_read_full_page(page, ext4_get_block);
		else
			unlock_page(page);
	next_page:
		if (pages)
			put_page(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(bio);
	return 0;
}
コード例 #19
0
ファイル: device.c プロジェクト: DeltaYang/wine
static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
{
    struct device_manager *manager = (struct device_manager *)obj;

    return !list_empty( &manager->requests );
}
コード例 #20
0
ファイル: inode.c プロジェクト: flwh/Alcatel_OT_985_kernel
void ocfs2_clear_inode(struct inode *inode)
{
	int status;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);

	mlog_entry_void();

	if (!inode)
		goto bail;

	mlog(0, "Clearing inode: %llu, nlink = %u\n",
	     (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink);

	mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
			"Inode=%lu\n", inode->i_ino);

	dquot_drop(inode);

	/* To preven remote deletes we hold open lock before, now it
	 * is time to unlock PR and EX open locks. */
	ocfs2_open_unlock(inode);

	/* Do these before all the other work so that we don't bounce
	 * the downconvert thread while waiting to destroy the locks. */
	ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);

	ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
			   &oi->ip_la_data_resv);
	ocfs2_resv_init_once(&oi->ip_la_data_resv);

	/* We very well may get a clear_inode before all an inodes
	 * metadata has hit disk. Of course, we can't drop any cluster
	 * locks until the journal has finished with it. The only
	 * exception here are successfully wiped inodes - their
	 * metadata can now be considered to be part of the system
	 * inodes from which it came. */
	if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
		ocfs2_checkpoint_inode(inode);

	mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
			"Clear inode of %llu, inode has io markers\n",
			(unsigned long long)oi->ip_blkno);

	ocfs2_extent_map_trunc(inode, 0);

	status = ocfs2_drop_inode_locks(inode);
	if (status < 0)
		mlog_errno(status);

	ocfs2_lock_res_free(&oi->ip_rw_lockres);
	ocfs2_lock_res_free(&oi->ip_inode_lockres);
	ocfs2_lock_res_free(&oi->ip_open_lockres);

	ocfs2_metadata_cache_exit(INODE_CACHE(inode));

	mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached,
			"Clear inode of %llu, inode has %u cache items\n",
			(unsigned long long)oi->ip_blkno,
			INODE_CACHE(inode)->ci_num_cached);

	mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE),
			"Clear inode of %llu, inode has a bad flag\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
			"Clear inode of %llu, inode is locked\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
			"Clear inode of %llu, io_mutex is locked\n",
			(unsigned long long)oi->ip_blkno);
	mutex_unlock(&oi->ip_io_mutex);

	/*
	 * down_trylock() returns 0, down_write_trylock() returns 1
	 * kernel 1, world 0
	 */
	mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
			"Clear inode of %llu, alloc_sem is locked\n",
			(unsigned long long)oi->ip_blkno);
	up_write(&oi->ip_alloc_sem);

	mlog_bug_on_msg(oi->ip_open_count,
			"Clear inode of %llu has open count %d\n",
			(unsigned long long)oi->ip_blkno, oi->ip_open_count);

	/* Clear all other flags. */
	oi->ip_flags = 0;
	oi->ip_dir_start_lookup = 0;
	oi->ip_blkno = 0ULL;

	/*
	 * ip_jinode is used to track txns against this inode. We ensure that
	 * the journal is flushed before journal shutdown. Thus it is safe to
	 * have inodes get cleaned up after journal shutdown.
	 */
	jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
				       &oi->ip_jinode);

bail:
	mlog_exit_void();
}
コード例 #21
0
ファイル: ipath_qp.c プロジェクト: johnny/CobraDroidBeta
/**
 * ipath_modify_qp - modify the attributes of a queue pair
 * @ibqp: the queue pair who's attributes we're modifying
 * @attr: the new attributes
 * @attr_mask: the mask of attributes to modify
 * @udata: user data for ipathverbs.so
 *
 * Returns 0 on success, otherwise returns an errno.
 */
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		    int attr_mask, struct ib_udata *udata)
{
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	struct ipath_qp *qp = to_iqp(ibqp);
	enum ib_qp_state cur_state, new_state;
	int lastwqe = 0;
	int ret;

	spin_lock_irq(&qp->s_lock);

	cur_state = attr_mask & IB_QP_CUR_STATE ?
		attr->cur_qp_state : qp->state;
	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;

	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
				attr_mask))
		goto inval;

	if (attr_mask & IB_QP_AV) {
		if (attr->ah_attr.dlid == 0 ||
		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
			goto inval;

		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
		    (attr->ah_attr.grh.sgid_index > 1))
			goto inval;
	}

	if (attr_mask & IB_QP_PKEY_INDEX)
		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
			goto inval;

	if (attr_mask & IB_QP_MIN_RNR_TIMER)
		if (attr->min_rnr_timer > 31)
			goto inval;

	if (attr_mask & IB_QP_PORT)
		if (attr->port_num == 0 ||
		    attr->port_num > ibqp->device->phys_port_cnt)
			goto inval;

	/*
	 * don't allow invalid Path MTU values or greater than 2048
	 * unless we are configured for a 4KB MTU
	 */
	if ((attr_mask & IB_QP_PATH_MTU) &&
		(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
		(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
		goto inval;

	if (attr_mask & IB_QP_PATH_MIG_STATE)
		if (attr->path_mig_state != IB_MIG_MIGRATED &&
		    attr->path_mig_state != IB_MIG_REARM)
			goto inval;

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
			goto inval;

	switch (new_state) {
	case IB_QPS_RESET:
		if (qp->state != IB_QPS_RESET) {
			qp->state = IB_QPS_RESET;
			spin_lock(&dev->pending_lock);
			if (!list_empty(&qp->timerwait))
				list_del_init(&qp->timerwait);
			if (!list_empty(&qp->piowait))
				list_del_init(&qp->piowait);
			spin_unlock(&dev->pending_lock);
			qp->s_flags &= ~IPATH_S_ANY_WAIT;
			spin_unlock_irq(&qp->s_lock);
			/* Stop the sending tasklet */
			tasklet_kill(&qp->s_task);
			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
			spin_lock_irq(&qp->s_lock);
		}
		ipath_reset_qp(qp, ibqp->qp_type);
		break;

	case IB_QPS_SQD:
		qp->s_draining = qp->s_last != qp->s_cur;
		qp->state = new_state;
		break;

	case IB_QPS_SQE:
		if (qp->ibqp.qp_type == IB_QPT_RC)
			goto inval;
		qp->state = new_state;
		break;

	case IB_QPS_ERR:
		lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
		break;

	default:
		qp->state = new_state;
		break;
	}

	if (attr_mask & IB_QP_PKEY_INDEX)
		qp->s_pkey_index = attr->pkey_index;

	if (attr_mask & IB_QP_DEST_QPN)
		qp->remote_qpn = attr->dest_qp_num;

	if (attr_mask & IB_QP_SQ_PSN) {
		qp->s_psn = qp->s_next_psn = attr->sq_psn;
		qp->s_last_psn = qp->s_next_psn - 1;
	}

	if (attr_mask & IB_QP_RQ_PSN)
		qp->r_psn = attr->rq_psn;

	if (attr_mask & IB_QP_ACCESS_FLAGS)
		qp->qp_access_flags = attr->qp_access_flags;

	if (attr_mask & IB_QP_AV) {
		qp->remote_ah_attr = attr->ah_attr;
		qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
	}

	if (attr_mask & IB_QP_PATH_MTU)
		qp->path_mtu = attr->path_mtu;

	if (attr_mask & IB_QP_RETRY_CNT)
		qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;

	if (attr_mask & IB_QP_RNR_RETRY) {
		qp->s_rnr_retry = attr->rnr_retry;
		if (qp->s_rnr_retry > 7)
			qp->s_rnr_retry = 7;
		qp->s_rnr_retry_cnt = qp->s_rnr_retry;
	}

	if (attr_mask & IB_QP_MIN_RNR_TIMER)
		qp->r_min_rnr_timer = attr->min_rnr_timer;

	if (attr_mask & IB_QP_TIMEOUT)
		qp->timeout = attr->timeout;

	if (attr_mask & IB_QP_QKEY)
		qp->qkey = attr->qkey;

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
		qp->s_max_rd_atomic = attr->max_rd_atomic;

	spin_unlock_irq(&qp->s_lock);

	if (lastwqe) {
		struct ib_event ev;

		ev.device = qp->ibqp.device;
		ev.element.qp = &qp->ibqp;
		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
	}
	ret = 0;
	goto bail;

inval:
	spin_unlock_irq(&qp->s_lock);
	ret = -EINVAL;

bail:
	return ret;
}
コード例 #22
0
ファイル: commctrl.c プロジェクト: CSCLOG/beaglebone
static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
	struct fib_ioctl f;
	struct fib *fib;
	struct aac_fib_context *fibctx;
	int status;
	struct list_head * entry;
	unsigned long flags;

	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
		return -EFAULT;
	/*
	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
	 *
	 *	Search the list of AdapterFibContext addresses on the adapter
	 *	to be sure this is a valid address
	 */
	spin_lock_irqsave(&dev->fib_lock, flags);
	entry = dev->fib_list.next;
	fibctx = NULL;

	while (entry != &dev->fib_list) {
		fibctx = list_entry(entry, struct aac_fib_context, next);
		/*
		 *	Extract the AdapterFibContext from the Input parameters.
		 */
		if (fibctx->unique == f.fibctx) { /* We found a winner */
			break;
		}
		entry = entry->next;
		fibctx = NULL;
	}
	if (!fibctx) {
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		dprintk ((KERN_INFO "Fib Context not found\n"));
		return -EINVAL;
	}

	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
		 (fibctx->size != sizeof(struct aac_fib_context))) {
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
		return -EINVAL;
	}
	status = 0;
	/*
	 *	If there are no fibs to send back, then either wait or return
	 *	-EAGAIN
	 */
return_fib:
	if (!list_empty(&fibctx->fib_list)) {
		/*
		 *	Pull the next fib from the fibs
		 */
		entry = fibctx->fib_list.next;
		list_del(entry);

		fib = list_entry(entry, struct fib, fiblink);
		fibctx->count--;
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
			kfree(fib->hw_fib_va);
			kfree(fib);
			return -EFAULT;
		}
		/*
		 *	Free the space occupied by this copy of the fib.
		 */
		kfree(fib->hw_fib_va);
		kfree(fib);
		status = 0;
	} else {
コード例 #23
0
ファイル: crtools.c プロジェクト: GolovanovSrg/criu
int main(int argc, char *argv[], char *envp[])
{
	pid_t pid = 0, tree_id = 0;
	int ret = -1;
	bool usage_error = true;
	bool has_exec_cmd = false;
	int opt, idx;
	int log_level = LOG_UNSET;
	char *imgs_dir = ".";
	char *work_dir = NULL;
	static const char short_opts[] = "dSsRf:F:t:p:hcD:o:n:v::x::Vr:jlW:L:M:";
	static struct option long_opts[] = {
		{ "tree",			required_argument,	0, 't'	},
		{ "pid",			required_argument,	0, 'p'	},
		{ "leave-stopped",		no_argument,		0, 's'	},
		{ "leave-running",		no_argument,		0, 'R'	},
		{ "restore-detached",		no_argument,		0, 'd'	},
		{ "restore-sibling",		no_argument,		0, 'S'	},
		{ "daemon",			no_argument,		0, 'd'	},
		{ "contents",			no_argument,		0, 'c'	},
		{ "file",			required_argument,	0, 'f'	},
		{ "fields",			required_argument,	0, 'F'	},
		{ "images-dir",			required_argument,	0, 'D'	},
		{ "work-dir",			required_argument,	0, 'W'	},
		{ "log-file",			required_argument,	0, 'o'	},
		{ "namespaces",			required_argument,	0, 'n'	},
		{ "root",			required_argument,	0, 'r'	},
		{ USK_EXT_PARAM,		optional_argument,	0, 'x'	},
		{ "help",			no_argument,		0, 'h'	},
		{ SK_EST_PARAM,			no_argument,		0, 1042	},
		{ "close",			required_argument,	0, 1043	},
		{ "log-pid",			no_argument,		0, 1044	},
		{ "version",			no_argument,		0, 'V'	},
		{ "evasive-devices",		no_argument,		0, 1045	},
		{ "pidfile",			required_argument,	0, 1046	},
		{ "veth-pair",			required_argument,	0, 1047	},
		{ "action-script",		required_argument,	0, 1049	},
		{ LREMAP_PARAM,			no_argument,		0, 1041	},
		{ OPT_SHELL_JOB,		no_argument,		0, 'j'	},
		{ OPT_FILE_LOCKS,		no_argument,		0, 'l'	},
		{ "page-server",		no_argument,		0, 1050	},
		{ "address",			required_argument,	0, 1051	},
		{ "port",			required_argument,	0, 1052	},
		{ "prev-images-dir",		required_argument,	0, 1053	},
		{ "ms",				no_argument,		0, 1054	},
		{ "track-mem",			no_argument,		0, 1055	},
		{ "auto-dedup",			no_argument,		0, 1056	},
		{ "libdir",			required_argument,	0, 'L'	},
		{ "cpu-cap",			optional_argument,	0, 1057	},
		{ "force-irmap",		no_argument,		0, 1058	},
		{ "ext-mount-map",		required_argument,	0, 'M'	},
		{ "exec-cmd",			no_argument,		0, 1059	},
		{ "manage-cgroups",		optional_argument,	0, 1060	},
		{ "cgroup-root",		required_argument,	0, 1061	},
		{ "inherit-fd",			required_argument,	0, 1062	},
		{ "feature",			required_argument,	0, 1063	},
		{ "skip-mnt",			required_argument,	0, 1064 },
		{ "enable-fs",			required_argument,	0, 1065 },
		{ "enable-external-sharing", 	no_argument, 		0, 1066 },
		{ "enable-external-masters", 	no_argument, 		0, 1067 },
		{ "freeze-cgroup",		required_argument,	0, 1068 },
		{ "ghost-limit",		required_argument,	0, 1069 },
		{ },
	};

	BUILD_BUG_ON(PAGE_SIZE != PAGE_IMAGE_SIZE);

	cr_pb_init();
	if (restrict_uid(getuid(), getgid()))
		return 1;

	setproctitle_init(argc, argv, envp);

	if (argc < 2)
		goto usage;

	init_opts();

	if (init_service_fd())
		return 1;

	if (!strcmp(argv[1], "swrk")) {
		if (argc < 3)
			goto usage;
		/*
		 * This is to start criu service worker from libcriu calls.
		 * The usage is "criu swrk <fd>" and is not for CLI/scripts.
		 * The arguments semantics can change at any tyme with the
		 * corresponding lib call change.
		 */
		opts.swrk_restore = true;
		return cr_service_work(atoi(argv[2]));
	}

	while (1) {
		idx = -1;
		opt = getopt_long(argc, argv, short_opts, long_opts, &idx);
		if (opt == -1)
			break;

		switch (opt) {
		case 's':
			opts.final_state = TASK_STOPPED;
			break;
		case 'R':
			opts.final_state = TASK_ALIVE;
			break;
		case 'x':
			if (optarg && unix_sk_ids_parse(optarg) < 0)
				return 1;
			opts.ext_unix_sk = true;
			break;
		case 'p':
			pid = atoi(optarg);
			if (pid <= 0)
				goto bad_arg;
			break;
		case 't':
			tree_id = atoi(optarg);
			if (tree_id <= 0)
				goto bad_arg;
			break;
		case 'c':
			opts.show_pages_content	= true;
			break;
		case 'f':
			opts.show_dump_file = optarg;
			break;
		case 'F':
			opts.show_fmt = optarg;
			break;
		case 'r':
			opts.root = optarg;
			break;
		case 'd':
			opts.restore_detach = true;
			break;
		case 'S':
			opts.restore_sibling = true;
			break;
		case 'D':
			imgs_dir = optarg;
			break;
		case 'W':
			work_dir = optarg;
			break;
		case 'o':
			opts.output = optarg;
			break;
		case 'n':
			if (parse_ns_string(optarg))
				goto bad_arg;
			break;
		case 'v':
			if (log_level == LOG_UNSET)
				log_level = 0;
			if (optarg) {
				if (optarg[0] == 'v')
					/* handle -vvvvv */
					log_level += strlen(optarg) + 1;
				else
					log_level = atoi(optarg);
			} else
				log_level++;
			break;
		case 1041:
			pr_info("Will allow link remaps on FS\n");
			opts.link_remap_ok = true;
			break;
		case 1042:
			pr_info("Will dump TCP connections\n");
			opts.tcp_established_ok = true;
			break;
		case 1043: {
			int fd;

			fd = atoi(optarg);
			pr_info("Closing fd %d\n", fd);
			close(fd);
			break;
		}
		case 1044:
			opts.log_file_per_pid = 1;
			break;
		case 1045:
			opts.evasive_devices = true;
			break;
		case 1046:
			opts.pidfile = optarg;
			break;
		case 1047:
			{
				char *aux;

				aux = strchr(optarg, '=');
				if (aux == NULL)
					goto bad_arg;

				*aux = '\0';
				if (veth_pair_add(optarg, aux + 1))
					return 1;
			}
			break;
		case 1049:
			if (add_script(optarg, 0))
				return 1;

			break;
		case 1050:
			opts.use_page_server = true;
			break;
		case 1051:
			opts.addr = optarg;
			break;
		case 1052:
			opts.ps_port = htons(atoi(optarg));
			if (!opts.ps_port)
				goto bad_arg;
			break;
		case 'j':
			opts.shell_job = true;
			break;
		case 'l':
			opts.handle_file_locks = true;
			break;
		case 1053:
			opts.img_parent = optarg;
			break;
		case 1055:
			opts.track_mem = true;
			break;
		case 1056:
			opts.auto_dedup = true;
			break;
		case 1057:
			if (parse_cpu_cap(&opts, optarg))
				goto usage;
			break;
		case 1058:
			opts.force_irmap = true;
			break;
		case 1054:
			opts.check_ms_kernel = true;
			break;
		case 'L':
			opts.libdir = optarg;
			break;
		case 1059:
			has_exec_cmd = true;
			break;
		case 1060:
			if (parse_manage_cgroups(&opts, optarg))
				goto usage;
			break;
		case 1061:
			{
				char *path, *ctl;

				path = strchr(optarg, ':');
				if (path) {
					*path = '\0';
					path++;
					ctl = optarg;
				} else {
					path = optarg;
					ctl = NULL;
				}

				if (new_cg_root_add(ctl, path))
					return -1;
			}
			break;
		case 1062:
			if (inherit_fd_parse(optarg) < 0)
				return 1;
			break;
		case 1063:
			if (check_add_feature(optarg) < 0)
				return 1;
			break;
		case 1064:
			if (!add_skip_mount(optarg))
				return 1;
			break;
		case 1065:
			if (!add_fsname_auto(optarg))
				return 1;
			break;
		case 1066:
			opts.enable_external_sharing = true;
			break;
		case 1067:
			opts.enable_external_masters = true;
			break;
		case 1068:
			opts.freeze_cgroup = optarg;
			break;
		case 1069:
			opts.ghost_limit = parse_size(optarg);
			break;
		case 'M':
			{
				char *aux;

				if (strcmp(optarg, "auto") == 0) {
					opts.autodetect_ext_mounts = true;
					break;
				}

				aux = strchr(optarg, ':');
				if (aux == NULL)
					goto bad_arg;

				*aux = '\0';
				if (ext_mount_add(optarg, aux + 1))
					return 1;
			}
			break;
		case 'V':
			pr_msg("Version: %s\n", CRIU_VERSION);
			if (strcmp(CRIU_GITID, "0"))
				pr_msg("GitID: %s\n", CRIU_GITID);
			return 0;
		case 'h':
			usage_error = false;
			goto usage;
		default:
			goto usage;
		}
	}

	if (!opts.restore_detach && opts.restore_sibling) {
		pr_msg("--restore-sibling only makes sense with --restore-detach\n");
		return 1;
	}

	if (!opts.autodetect_ext_mounts && (opts.enable_external_masters || opts.enable_external_sharing)) {
		pr_msg("must specify --ext-mount-map auto with --enable-external-{sharing|masters}");
		return 1;
	}

	if (work_dir == NULL)
		work_dir = imgs_dir;

	if (optind >= argc) {
		pr_msg("Error: command is required\n");
		goto usage;
	}

	if (has_exec_cmd) {
		if (argc - optind <= 1) {
			pr_msg("Error: --exec-cmd requires a command\n");
			goto usage;
		}

		if (strcmp(argv[optind], "restore")) {
			pr_msg("Error: --exec-cmd is available for the restore command only\n");
			goto usage;
		}

		if (opts.restore_detach) {
			pr_msg("Error: --restore-detached and --exec-cmd cannot be used together\n");
			goto usage;
		}

		opts.exec_cmd = xmalloc((argc - optind) * sizeof(char *));
		if (!opts.exec_cmd)
			return 1;
		memcpy(opts.exec_cmd, &argv[optind + 1], (argc - optind - 1) * sizeof(char *));
		opts.exec_cmd[argc - optind - 1] = NULL;
	} else if (optind + 1 != argc) {
		pr_err("Unable to handle more than one command\n");
		goto usage;
	}

	/* We must not open imgs dir, if service is called */
	if (strcmp(argv[optind], "service")) {
		ret = open_image_dir(imgs_dir);
		if (ret < 0)
			return 1;
	}

	if (chdir(work_dir)) {
		pr_perror("Can't change directory to %s", work_dir);
		return 1;
	}

	log_set_loglevel(log_level);

	if (log_init(opts.output))
		return 1;

	if (!list_empty(&opts.inherit_fds)) {
		if (strcmp(argv[optind], "restore")) {
			pr_err("--inherit-fd is restore-only option\n");
			return 1;
		}
		/* now that log file is set up, print inherit fd list */
		inherit_fd_log();
	}

	if (opts.img_parent)
		pr_info("Will do snapshot from %s\n", opts.img_parent);

	if (!strcmp(argv[optind], "dump")) {
		preload_socket_modules();

		if (!tree_id)
			goto opt_pid_missing;
		return cr_dump_tasks(tree_id);
	}

	if (!strcmp(argv[optind], "pre-dump")) {
		if (!tree_id)
			goto opt_pid_missing;

		return cr_pre_dump_tasks(tree_id) != 0;
	}

	if (!strcmp(argv[optind], "restore")) {
		if (tree_id)
			pr_warn("Using -t with criu restore is obsoleted\n");

		ret = cr_restore_tasks();
		if (ret == 0 && opts.exec_cmd) {
			close_pid_proc();
			execvp(opts.exec_cmd[0], opts.exec_cmd);
			pr_perror("Failed to exec command %s", opts.exec_cmd[0]);
			ret = 1;
		}

		return ret != 0;
	}

	if (!strcmp(argv[optind], "show"))
		return cr_show(pid) != 0;

	if (!strcmp(argv[optind], "check"))
		return cr_check() != 0;

	if (!strcmp(argv[optind], "exec")) {
		if (!pid)
			pid = tree_id; /* old usage */
		if (!pid)
			goto opt_pid_missing;
		return cr_exec(pid, argv + optind + 1) != 0;
	}

	if (!strcmp(argv[optind], "page-server"))
		return cr_page_server(opts.daemon_mode, -1) > 0 ? 0 : 1;

	if (!strcmp(argv[optind], "service"))
		return cr_service(opts.daemon_mode);

	if (!strcmp(argv[optind], "dedup"))
		return cr_dedup() != 0;

	if (!strcmp(argv[optind], "cpuinfo")) {
		if (!argv[optind + 1])
			goto usage;
		if (!strcmp(argv[optind + 1], "dump"))
			return cpuinfo_dump();
		else if (!strcmp(argv[optind + 1], "check"))
			return cpuinfo_check();
	}

	pr_msg("Error: unknown command: %s\n", argv[optind]);
usage:
	pr_msg("\n"
"Usage:\n"
"  criu dump|pre-dump -t PID [<options>]\n"
"  criu restore [<options>]\n"
"  criu check [--ms]\n"
"  criu exec -p PID <syscall-string>\n"
"  criu page-server\n"
"  criu service [<options>]\n"
"  criu dedup\n"
"\n"
"Commands:\n"
"  dump           checkpoint a process/tree identified by pid\n"
"  pre-dump       pre-dump task(s) minimizing their frozen time\n"
"  restore        restore a process/tree\n"
"  check          checks whether the kernel support is up-to-date\n"
"  exec           execute a system call by other task\n"
"  page-server    launch page server\n"
"  service        launch service\n"
"  dedup          remove duplicates in memory dump\n"
"  cpuinfo dump   writes cpu information into image file\n"
"  cpuinfo check  validates cpu information read from image file\n"
	);

	if (usage_error) {
		pr_msg("\nTry -h|--help for more info\n");
		return 1;
	}

	pr_msg("\n"
"Dump/Restore options:\n"
"\n"
"* Generic:\n"
"  -t|--tree PID         checkpoint a process tree identified by PID\n"
"  -d|--restore-detached detach after restore\n"
"  -S|--restore-sibling  restore root task as sibling\n"
"  -s|--leave-stopped    leave tasks in stopped state after checkpoint\n"
"  -R|--leave-running    leave tasks in running state after checkpoint\n"
"  -D|--images-dir DIR   directory for image files\n"
"     --pidfile FILE     write root task, service or page-server pid to FILE\n"
"  -W|--work-dir DIR     directory to cd and write logs/pidfiles/stats to\n"
"                        (if not specified, value of --images-dir is used)\n"
"     --cpu-cap [CAP]    require certain cpu capability. CAP: may be one of:\n"
"                        'cpu','fpu','all','ins','none'. To disable capability, prefix it with '^'.\n"
"     --exec-cmd         execute the command specified after '--' on successful\n"
"                        restore making it the parent of the restored process\n"
"  --freeze-cgroup\n"
"                        use cgroup freezer to collect processes\n"
"\n"
"* Special resources support:\n"
"  -x|--" USK_EXT_PARAM "inode,.." "      allow external unix connections (optionally can be assign socket's inode that allows one-sided dump)\n"
"     --" SK_EST_PARAM "  checkpoint/restore established TCP connections\n"
"  -r|--root PATH        change the root filesystem (when run in mount namespace)\n"
"  --evasive-devices     use any path to a device file if the original one\n"
"                        is inaccessible\n"
"  --veth-pair IN=OUT    map inside veth device name to outside one\n"
"                        can optionally append @<bridge-name> to OUT for moving\n"
"                        the outside veth to the named bridge\n"
"  --link-remap          allow one to link unlinked files back when possible\n"
"  --ghost-limit size    specify maximum size of deleted file contents to be carried inside an image file\n"
"  --action-script FILE  add an external action script\n"
"  -j|--" OPT_SHELL_JOB "        allow one to dump and restore shell jobs\n"
"  -l|--" OPT_FILE_LOCKS "       handle file locks, for safety, only used for container\n"
"  -L|--libdir           path to a plugin directory (by default " CR_PLUGIN_DEFAULT ")\n"
"  --force-irmap         force resolving names for inotify/fsnotify watches\n"
"  -M|--ext-mount-map KEY:VALUE\n"
"                        add external mount mapping\n"
"  -M|--ext-mount-map auto\n"
"                        attempt to autodetect external mount mapings\n"
"  --enable-external-sharing\n"
"                        allow autoresolving mounts with external sharing\n"
"  --enable-external-masters\n"
"                        allow autoresolving mounts with external masters\n"
"  --manage-cgroups [m]  dump or restore cgroups the process is in usig mode:\n"
"                        'none', 'props', 'soft' (default), 'full' and 'strict'.\n"
"  --cgroup-root [controller:]/newroot\n"
"                        change the root cgroup the controller will be\n"
"                        installed into. No controller means that root is the\n"
"                        default for all controllers not specified.\n"
"  --skip-mnt PATH       ignore this mountpoint when dumping the mount namespace.\n"
"  --enable-fs FSNAMES   a comma separated list of filesystem names or \"all\".\n"
"                        force criu to (try to) dump/restore these filesystem's\n"
"                        mountpoints even if fs is not supported.\n"
"\n"
"* Logging:\n"
"  -o|--log-file FILE    log file name\n"
"     --log-pid          enable per-process logging to separate FILE.pid files\n"
"  -v[NUM]               set logging level (higher level means more output):\n"
"                          -v1|-v    - only errors and messages\n"
"                          -v2|-vv   - also warnings (default level)\n"
"                          -v3|-vvv  - also information messages and timestamps\n"
"                          -v4|-vvvv - lots of debug\n"
"\n"
"* Memory dumping options:\n"
"  --track-mem           turn on memory changes tracker in kernel\n"
"  --prev-images-dir DIR path to images from previous dump (relative to -D)\n"
"  --page-server         send pages to page server (see options below as well)\n"
"  --auto-dedup          when used on dump it will deduplicate \"old\" data in\n"
"                        pages images of previous dump\n"
"                        when used on restore, as soon as page is restored, it\n"
"                        will be punched from the image.\n"
"\n"
"Page/Service server options:\n"
"  --address ADDR        address of server or service\n"
"  --port PORT           port of page server\n"
"  -d|--daemon           run in the background after creating socket\n"
"\n"
"Other options:\n"
"  -h|--help             show this text\n"
"  -V|--version          show version\n"
"     --ms               don't check not yet merged kernel features\n"
	);

	return 0;

opt_pid_missing:
	pr_msg("Error: pid not specified\n");
	return 1;

bad_arg:
	if (idx < 0) /* short option */
		pr_msg("Error: invalid argument for -%c: %s\n",
				opt, optarg);
	else /* long option */
		pr_msg("Error: invalid argument for --%s: %s\n",
				long_opts[idx].name, optarg);
	return 1;
}
コード例 #24
0
ファイル: ispvideo.c プロジェクト: borkmann/kasan
/*
 * omap3isp_video_buffer_next - Complete the current buffer and return the next
 * @video: ISP video object
 *
 * Remove the current video buffer from the DMA queue and fill its timestamp,
 * field count and state fields before waking up its completion handler.
 *
 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
 *
 * The DMA queue is expected to contain at least one buffer.
 *
 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
 * empty.
 */
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{
    struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
    struct isp_video_queue *queue = video->queue;
    enum isp_pipeline_state state;
    struct isp_video_buffer *buf;
    unsigned long flags;
    struct timespec ts;

    spin_lock_irqsave(&queue->irqlock, flags);
    if (WARN_ON(list_empty(&video->dmaqueue))) {
        spin_unlock_irqrestore(&queue->irqlock, flags);
        return NULL;
    }

    buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
                           irqlist);
    list_del(&buf->irqlist);
    spin_unlock_irqrestore(&queue->irqlock, flags);

    ktime_get_ts(&ts);
    buf->vbuf.timestamp.tv_sec = ts.tv_sec;
    buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;

    /* Do frame number propagation only if this is the output video node.
     * Frame number either comes from the CSI receivers or it gets
     * incremented here if H3A is not active.
     * Note: There is no guarantee that the output buffer will finish
     * first, so the input number might lag behind by 1 in some cases.
     */
    if (video == pipe->output && !pipe->do_propagation)
        buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
    else
        buf->vbuf.sequence = atomic_read(&pipe->frame_number);

    /* Report pipeline errors to userspace on the capture device side. */
    if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
        buf->state = ISP_BUF_STATE_ERROR;
        pipe->error = false;
    } else {
        buf->state = ISP_BUF_STATE_DONE;
    }

    wake_up(&buf->wait);

    if (list_empty(&video->dmaqueue)) {
        if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
            state = ISP_PIPELINE_QUEUE_OUTPUT
                    | ISP_PIPELINE_STREAM;
        else
            state = ISP_PIPELINE_QUEUE_INPUT
                    | ISP_PIPELINE_STREAM;

        spin_lock_irqsave(&pipe->lock, flags);
        pipe->state &= ~state;
        if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
            video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
        spin_unlock_irqrestore(&pipe->lock, flags);
        return NULL;
    }

    if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
        spin_lock_irqsave(&pipe->lock, flags);
        pipe->state &= ~ISP_PIPELINE_STREAM;
        spin_unlock_irqrestore(&pipe->lock, flags);
    }

    buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
                           irqlist);
    buf->state = ISP_BUF_STATE_ACTIVE;
    return to_isp_buffer(buf);
}
コード例 #25
0
ファイル: buddy_allocator.c プロジェクト: agustingianni/ArgOS
page_t *
buddy_page_alloc(uint32_t order, pgflags_t flags)
{
        /*
         * TODO: Aca tendria que adquirir un lock
         * para protejer el manejo de listas. En linux
         * usa un interrupt safe spinlock
         * Lo que seria bueno es lockear el acceso cuando es absolutamente
         * necesario, esto nos permitiria que se puedan satisfaces
         * multiples alocaciones al mismo tiempo.
         */

        if (order >= MAX_ORDER)
		return NULL;

	uint32_t current_order = order;

	buddy_list_t *free_list = &buddy.free_lists[order];

	struct list_head *head;
	struct list_head *curr;

	page_t *page;

	do
	{
		/* Lista de bloques de paginas libres del orden actual */
		head = &free_list->head;

		/* el primer bloque de paginas libre */
		curr = head->next;

		/* Si la lista no esta vacia, quiere decir que hay paginas para sacar */
		if (!list_empty(head))
		{
			/* Indice de la pagina dentro del mem_map */
			unsigned int index;

			/* Obtenemos la estructura page (head no es la variable, es el miembro) */
			page = list_entry(curr, page_t, head);

			/* Eliminamos el bloque actual de paginas de la lista */
			list_del(curr);

			/* Calculamos el indice en el mem_map de la primer pagina del bloque */
			index = page - mem_map;

			#define MARK_USED(index, order, free_list) \
				bit_complement((index) >> (1+(order)), (free_list)->page_bitmap)

			/* Si current order es MAX_ORDER no existe un buddy */
			if (current_order != MAX_ORDER-1)
				MARK_USED(index, current_order, free_list);

			/* Tamano del bloque actual */
			uint32_t size = 1 << current_order;

			/*
			 * Si current_order > order, quiere decir que no habia un bloque
			 * de justo tamano 2**order por lo que se busco un order mayor
			 * para dividirlo.
			 */
                        while (current_order > order)
			{
				/* Obtenemos la free list de un orden anterior */
                                free_list--;

				/* Decrementamos el current order */
                                current_order--;

				/* Calculamos el tamano del bloque de 2**current_order */
				size >>= 1;

				/* Agregamos al buddy1 a la lista de bloques libres */
				list_add(&(page)->head, &free_list->head);

				/*
				 * Aca marcamos que uno de los dos buddies
				 * esta siendo usado, lo que no quiere decir
				 * cual de los dos.
				 */
                                MARK_USED(index, current_order, free_list);

				/* Seguimos con la siguiente */
                                index += size;

				/*
				 * Siguiente pagina, size es en realidad  la
				 * cantidad de paginas que vamos a saltear, es
				 * decir el el tamano del buddy1
				 */
                                page += size;
                        }

			/* current_order = order -> tenemos una pagina */
			//set_page_count(page, 1);

			return page;
		}

		current_order++;
		free_list++;
	} while (current_order < MAX_ORDER);
コード例 #26
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;
	bool queue_was_empty = list_empty(&rqueue->fifo);
	unsigned long bv_page_flags = 0;

	if (rq->bio && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page)
		bv_page_flags = rq->bio->bi_io_vec->bv_page->flags;

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			if (hrtimer_try_to_cancel(
				&rd->rd_idle_data.hr_timer) >= 0) {
				row_log_rowq(rd, rqueue->prio,
				    "Canceled delayed work on %d",
				    rd->rd_idle_data.idling_queue_idx);
				rd->rd_idle_data.idling_queue_idx =
					ROWQ_MAX_PRIO;
			}
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("%s(): time delta error: diff_ms < 0",
				__func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}

		if ((bv_page_flags & (1L << PG_readahead)) ||
		    (diff_ms < rd->rd_idle_data.freq_ms)) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {
		/* Handle High Priority queues */
		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
		    queue_was_empty) {
			row_log_rowq(rd, rqueue->prio,
				"added (high prio) urgent request");
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		} else  if (row_rowq_unserved(rd, rqueue->prio)) {
			/* Handle Regular priotity queues */
			row_log_rowq(rd, rqueue->prio,
				"added urgent request (total on queue=%d)",
				rqueue->nr_req);
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		}
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
コード例 #27
0
ファイル: lib_ccx.c プロジェクト: Abhinav95/ccextractor
struct encoder_ctx *update_encoder_list_cinfo(struct lib_ccx_ctx *ctx, struct cap_info* cinfo)
{
	struct encoder_ctx *enc_ctx;
	unsigned int pn = 0;
	unsigned char in_format = 1;
	char *extension;


	if (ctx->write_format == CCX_OF_NULL)
		return NULL;

	if(cinfo)
	{
		pn = cinfo->program_number;
		if (cinfo->codec == CCX_CODEC_ISDB_CC)
			in_format = 3;
		else if (cinfo->codec == CCX_CODEC_TELETEXT)
			in_format = 2;
		else
			in_format = 1;
	}
	list_for_each_entry(enc_ctx, &ctx->enc_ctx_head, list, struct encoder_ctx)
	{
		if ( ctx->multiprogram == CCX_FALSE)
			return enc_ctx;

		if (enc_ctx->program_number == pn)
			return enc_ctx;
	}

	extension = get_file_extension(ccx_options.enc_cfg.write_format);
	if (!extension && ccx_options.enc_cfg.write_format != CCX_OF_CURL)
		return NULL;

	if(ctx->multiprogram == CCX_FALSE)
	{
		if(ctx->out_interval != -1)
		{
			int len;

			len = strlen(ctx->basefilename) + 10 + strlen(extension);

			freep(&ccx_options.enc_cfg.output_filename);
			ccx_options.enc_cfg.output_filename = malloc(len);

			sprintf(ccx_options.enc_cfg.output_filename, "%s_%06d%s", ctx->basefilename, ctx->segment_counter+1, extension);
		}
		if (list_empty(&ctx->enc_ctx_head))
		{
			ccx_options.enc_cfg.program_number = pn;
			ccx_options.enc_cfg.in_format = in_format;
			enc_ctx = init_encoder(&ccx_options.enc_cfg);
			if (!enc_ctx)
				return NULL;
			list_add_tail( &(enc_ctx->list), &(ctx->enc_ctx_head) );
		}
	}
	else
	{
		int len;

		len = strlen(ctx->basefilename) + 10 + strlen(extension);

		ccx_options.enc_cfg.program_number = pn;
		ccx_options.enc_cfg.output_filename = malloc(len);
		if (!ccx_options.enc_cfg.output_filename)
		{
			freep(&extension);
			return NULL;
		}

		sprintf(ccx_options.enc_cfg.output_filename, "%s_%d%s", ctx->basefilename, pn, extension);
		enc_ctx = init_encoder(&ccx_options.enc_cfg);
		if (!enc_ctx)
		{
			freep(&extension);
			freep(&ccx_options.enc_cfg.output_filename);
			return NULL;
		}

		list_add_tail( &(enc_ctx->list), &(ctx->enc_ctx_head) );
		freep(&extension);
		freep(&ccx_options.enc_cfg.output_filename);
	}
	// DVB related
	enc_ctx->prev = NULL;
	if (cinfo)
		if (cinfo->codec == CCX_CODEC_DVB)
			enc_ctx->write_previous = 0;
	freep(&extension);
	return enc_ctx;
}
コード例 #28
0
/*
 * row_get_ioprio_class_to_serve() - Return the next I/O priority
 *				      class to dispatch requests from
 * @rd:	pointer to struct row_data
 * @force:	flag indicating if forced dispatch
 *
 * This function returns the next I/O priority class to serve
 * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
 * If there are no more requests in scheduler or if we're idling on some queue
 * IOPRIO_CLASS_NONE will be returned.
 * If idling is scheduled on a lower priority queue than the one that needs
 * to be served, it will be canceled.
 *
 */
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	/* First, go over the high priority queues */
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	/*
	 * At the moment idling is implemented only for READ queues.
	 * If enabled on WRITE, this needs updating
	 */
	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	/* Check for (high priority) idling and enable if needed */
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	/* Regular priority queues */
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			/* We can idle only if this is not a forced dispatch */
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}
コード例 #29
0
ファイル: soc_common.c プロジェクト: 325116067/semc-qsd8x50
int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
				struct skt_dev_info *sinfo)
{
	struct soc_pcmcia_socket *skt;
	int ret, i;

	mutex_lock(&soc_pcmcia_sockets_lock);

	/*
	 * Initialise the per-socket structure.
	 */
	for (i = 0; i < sinfo->nskt; i++) {
		skt = &sinfo->skt[i];

		skt->socket.ops = &soc_common_pcmcia_operations;
		skt->socket.owner = ops->owner;
		skt->socket.dev.parent = dev;

		init_timer(&skt->poll_timer);
		skt->poll_timer.function = soc_common_pcmcia_poll_event;
		skt->poll_timer.data = (unsigned long)skt;
		skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;

		skt->dev	= dev;
		skt->ops	= ops;

		ret = request_resource(&iomem_resource, &skt->res_skt);
		if (ret)
			goto out_err_1;

		ret = request_resource(&skt->res_skt, &skt->res_io);
		if (ret)
			goto out_err_2;

		ret = request_resource(&skt->res_skt, &skt->res_mem);
		if (ret)
			goto out_err_3;

		ret = request_resource(&skt->res_skt, &skt->res_attr);
		if (ret)
			goto out_err_4;

		skt->virt_io = ioremap(skt->res_io.start, 0x10000);
		if (skt->virt_io == NULL) {
			ret = -ENOMEM;
			goto out_err_5;
		}

		if (list_empty(&soc_pcmcia_sockets))
			soc_pcmcia_cpufreq_register();

		list_add(&skt->node, &soc_pcmcia_sockets);

		/*
		 * We initialize default socket timing here, because
		 * we are not guaranteed to see a SetIOMap operation at
		 * runtime.
		 */
		ops->set_timing(skt);

		ret = ops->hw_init(skt);
		if (ret)
			goto out_err_6;

		skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
		skt->socket.resource_ops = &pccard_static_ops;
		skt->socket.irq_mask = 0;
		skt->socket.map_size = PAGE_SIZE;
		skt->socket.pci_irq = skt->irq;
		skt->socket.io_offset = (unsigned long)skt->virt_io;

		skt->status = soc_common_pcmcia_skt_state(skt);

		ret = pcmcia_register_socket(&skt->socket);
		if (ret)
			goto out_err_7;

		WARN_ON(skt->socket.sock != i);

		add_timer(&skt->poll_timer);

		ret = device_create_file(&skt->socket.dev, &dev_attr_status);
		if (ret)
			goto out_err_8;
	}

	dev_set_drvdata(dev, sinfo);
	ret = 0;
	goto out;

	do {
		skt = &sinfo->skt[i];

		device_remove_file(&skt->socket.dev, &dev_attr_status);
 out_err_8:
		del_timer_sync(&skt->poll_timer);
		pcmcia_unregister_socket(&skt->socket);

 out_err_7:
		flush_scheduled_work();

		ops->hw_shutdown(skt);
 out_err_6:
 		list_del(&skt->node);
		iounmap(skt->virt_io);
 out_err_5:
		release_resource(&skt->res_attr);
 out_err_4:
		release_resource(&skt->res_mem);
 out_err_3:
		release_resource(&skt->res_io);
 out_err_2:
		release_resource(&skt->res_skt);
 out_err_1:
		i--;
	} while (i > 0);

	kfree(sinfo);

 out:
	mutex_unlock(&soc_pcmcia_sockets_lock);
	return ret;
}
コード例 #30
0
ファイル: list.c プロジェクト: mkopta/d8gx36kod
void list_destroy(List *l) {
  while (!list_empty(l))
    (void) list_pop(l);
  free(l);
}