Beispiel #1
0
/**
 * An implementation of cl_io_operations::cio_io_submit() method for osc
 * layer. Iterates over pages in the in-queue, prepares each for io by calling
 * cl_page_prep() and then either submits them through osc_io_submit_page()
 * or, if page is already submitted, changes osc flags through
 * osc_set_async_flags().
 */
static int osc_io_submit(const struct lu_env *env,
                         const struct cl_io_slice *ios,
			 enum cl_req_type crt, struct cl_2queue *queue)
{
	struct cl_page	  *page;
	struct cl_page	  *tmp;
	struct client_obd *cli  = NULL;
	struct osc_object *osc  = NULL;	/* to keep gcc happy */
	struct osc_page	  *opg;
	struct cl_io	  *io;
	struct list_head  list = LIST_HEAD_INIT(list);

	struct cl_page_list *qin      = &queue->c2_qin;
	struct cl_page_list *qout     = &queue->c2_qout;
	unsigned int queued = 0;
	int result = 0;
	int cmd;
	int brw_flags;
	unsigned int max_pages;

	LASSERT(qin->pl_nr > 0);

	CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);

	osc = cl2osc(ios->cis_obj);
	cli = osc_cli(osc);
	max_pages = cli->cl_max_pages_per_rpc;

	cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
	brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;

        /*
         * NOTE: here @page is a top-level page. This is done to avoid
         *       creation of sub-page-list.
         */
        cl_page_list_for_each_safe(page, tmp, qin) {
                struct osc_async_page *oap;

                /* Top level IO. */
                io = page->cp_owner;
                LASSERT(io != NULL);

		opg = osc_cl_page_osc(page, osc);
		oap = &opg->ops_oap;
		LASSERT(osc == oap->oap_obj);

		if (!list_empty(&oap->oap_pending_item) ||
		    !list_empty(&oap->oap_rpc_item)) {
			CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
			       oap, opg);
                        result = -EBUSY;
                        break;
                }

                result = cl_page_prep(env, io, page, crt);
		if (result != 0) {
                        LASSERT(result < 0);
                        if (result != -EALREADY)
                                break;
                        /*
                         * Handle -EALREADY error: for read case, the page is
                         * already in UPTODATE state; for write, the page
                         * is not dirty.
                         */
                        result = 0;
			continue;
                }

		spin_lock(&oap->oap_lock);
		oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
		oap->oap_async_flags |= ASYNC_COUNT_STABLE;
		spin_unlock(&oap->oap_lock);

		osc_page_submit(env, opg, crt, brw_flags);
		list_add_tail(&oap->oap_pending_item, &list);

		if (page->cp_sync_io != NULL)
			cl_page_list_move(qout, qin, page);
		else /* async IO */
			cl_page_list_del(env, qin, page);

		if (++queued == max_pages) {
			queued = 0;
			result = osc_queue_sync_pages(env, osc, &list, cmd,
						      brw_flags);
			if (result < 0)
				break;
		}
	}

	if (queued > 0)
		result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);

	CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
	return qout->pl_nr > 0 ? 0 : result;
}
Beispiel #2
0
long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
		size_t msgsz, long msgtyp, int msgflg)
{
	struct msg_queue *msq;
	struct msg_msg *msg;
	int mode;
	struct ipc_namespace *ns;

	if (msqid < 0 || (long) msgsz < 0)
		return -EINVAL;
	mode = convert_mode(&msgtyp, msgflg);
	ns = current->nsproxy->ipc_ns;

	msq = msg_lock_check(ns, msqid);
	if (IS_ERR(msq))
		return PTR_ERR(msq);

	for (;;) {
		struct msg_receiver msr_d;
		struct list_head *tmp;

		msg = ERR_PTR(-EACCES);
		if (ipcperms(&msq->q_perm, S_IRUGO))
			goto out_unlock;

		msg = ERR_PTR(-EAGAIN);
		tmp = msq->q_messages.next;
		while (tmp != &msq->q_messages) {
			struct msg_msg *walk_msg;

			walk_msg = list_entry(tmp, struct msg_msg, m_list);
			if (testmsg(walk_msg, msgtyp, mode) &&
			    !security_msg_queue_msgrcv(msq, walk_msg, current,
						       msgtyp, mode)) {

				msg = walk_msg;
				if (mode == SEARCH_LESSEQUAL &&
						walk_msg->m_type != 1) {
					msg = walk_msg;
					msgtyp = walk_msg->m_type - 1;
				} else {
					msg = walk_msg;
					break;
				}
			}
			tmp = tmp->next;
		}
		if (!IS_ERR(msg)) {
			/*
			 * Found a suitable message.
			 * Unlink it from the queue.
			 */
			if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
				msg = ERR_PTR(-E2BIG);
				goto out_unlock;
			}
			list_del(&msg->m_list);
			msq->q_qnum--;
			msq->q_rtime = get_seconds();
			msq->q_lrpid = task_tgid_vnr(current);
			msq->q_cbytes -= msg->m_ts;
			atomic_sub(msg->m_ts, &ns->msg_bytes);
			atomic_dec(&ns->msg_hdrs);
			ss_wakeup(&msq->q_senders, 0);
			msg_unlock(msq);
			break;
		}
		/* No message waiting. Wait for a message */
		if (msgflg & IPC_NOWAIT) {
			msg = ERR_PTR(-ENOMSG);
			goto out_unlock;
		}
		list_add_tail(&msr_d.r_list, &msq->q_receivers);
		msr_d.r_tsk = current;
		msr_d.r_msgtype = msgtyp;
		msr_d.r_mode = mode;
		if (msgflg & MSG_NOERROR)
			msr_d.r_maxsize = INT_MAX;
		else
			msr_d.r_maxsize = msgsz;
		msr_d.r_msg = ERR_PTR(-EAGAIN);
		current->state = TASK_INTERRUPTIBLE;
		msg_unlock(msq);

		schedule();

		/* Lockless receive, part 1:
		 * Disable preemption.  We don't hold a reference to the queue
		 * and getting a reference would defeat the idea of a lockless
		 * operation, thus the code relies on rcu to guarantee the
		 * existance of msq:
		 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
		 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
		 * rcu_read_lock() prevents preemption between reading r_msg
		 * and the spin_lock() inside ipc_lock_by_ptr().
		 */
		rcu_read_lock();

		/* Lockless receive, part 2:
		 * Wait until pipelined_send or expunge_all are outside of
		 * wake_up_process(). There is a race with exit(), see
		 * ipc/mqueue.c for the details.
		 */
		msg = (struct msg_msg*)msr_d.r_msg;
		while (msg == NULL) {
			cpu_relax();
			msg = (struct msg_msg *)msr_d.r_msg;
		}

		/* Lockless receive, part 3:
		 * If there is a message or an error then accept it without
		 * locking.
		 */
		if (msg != ERR_PTR(-EAGAIN)) {
			rcu_read_unlock();
			break;
		}

		/* Lockless receive, part 3:
		 * Acquire the queue spinlock.
		 */
		ipc_lock_by_ptr(&msq->q_perm);
		rcu_read_unlock();

		/* Lockless receive, part 4:
		 * Repeat test after acquiring the spinlock.
		 */
		msg = (struct msg_msg*)msr_d.r_msg;
		if (msg != ERR_PTR(-EAGAIN))
			goto out_unlock;

		list_del(&msr_d.r_list);
		if (signal_pending(current)) {
			msg = ERR_PTR(-ERESTARTNOHAND);
out_unlock:
			msg_unlock(msq);
			break;
		}
	}
	if (IS_ERR(msg))
		return PTR_ERR(msg);

	msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
	*pmtype = msg->m_type;
	if (store_msg(mtext, msg, msgsz))
		msgsz = -EFAULT;

	free_msg(msg);

	return msgsz;
}
Beispiel #3
0
static dispex_data_t *preprocess_dispex_data(DispatchEx *This)
{
    const tid_t *tid = This->data->iface_tids;
    FUNCDESC *funcdesc;
    dispex_data_t *data;
    DWORD size = 16, i;
    ITypeInfo *ti, *dti;
    HRESULT hres;

    TRACE("(%p)\n", This);

    if(This->data->disp_tid) {
        hres = get_typeinfo(This->data->disp_tid, &dti);
        if(FAILED(hres)) {
            ERR("Could not get disp type info: %08x\n", hres);
            return NULL;
        }
    }

    data = heap_alloc(sizeof(dispex_data_t));
    data->func_cnt = 0;
    data->func_disp_cnt = 0;
    data->funcs = heap_alloc(size*sizeof(func_info_t));
    list_add_tail(&dispex_data_list, &data->entry);

    while(*tid) {
        hres = get_typeinfo(*tid, &ti);
        if(FAILED(hres))
            break;

        i=7;
        while(1) {
            hres = ITypeInfo_GetFuncDesc(ti, i++, &funcdesc);
            if(FAILED(hres))
                break;

            add_func_info(data, &size, *tid, funcdesc, dti);
            ITypeInfo_ReleaseFuncDesc(ti, funcdesc);
        }

        tid++;
    }

    if(!data->func_cnt) {
        heap_free(data->funcs);
        data->funcs = NULL;
    }else if(data->func_cnt != size) {
        data->funcs = heap_realloc(data->funcs, data->func_cnt * sizeof(func_info_t));
    }

    qsort(data->funcs, data->func_cnt, sizeof(func_info_t), dispid_cmp);

    if(data->funcs) {
        data->name_table = heap_alloc(data->func_cnt * sizeof(func_info_t*));
        for(i=0; i < data->func_cnt; i++)
            data->name_table[i] = data->funcs+i;
        qsort(data->name_table, data->func_cnt, sizeof(func_info_t*), func_name_cmp);
    }else {
        data->name_table = NULL;
    }

    return data;
}
static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{

	struct vpif_fh *fh = priv;
	struct channel_obj *ch = fh->channel;
	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
	struct v4l2_buffer tbuf = *buf;
	struct videobuf_buffer *buf1;
	unsigned long addr = 0;
	unsigned long flags;
	int ret = 0;

	if (common->fmt.type != tbuf.type)
		return -EINVAL;

	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
		vpif_err("fh->io_allowed\n");
		return -EACCES;
	}

	if (!(list_empty(&common->dma_queue)) ||
	    (common->cur_frm != common->next_frm) ||
	    !(common->started) ||
	    (common->started && (0 == ch->field_id)))
		return videobuf_qbuf(&common->buffer_queue, buf);

	/* bufferqueue is empty store buffer address in VPIF registers */
	mutex_lock(&common->buffer_queue.vb_lock);
	buf1 = common->buffer_queue.bufs[tbuf.index];
	if (buf1->memory != tbuf.memory) {
		vpif_err("invalid buffer type\n");
		goto qbuf_exit;
	}

	if ((buf1->state == VIDEOBUF_QUEUED) ||
	    (buf1->state == VIDEOBUF_ACTIVE)) {
		vpif_err("invalid state\n");
		goto qbuf_exit;
	}

	switch (buf1->memory) {
	case V4L2_MEMORY_MMAP:
		if (buf1->baddr == 0)
			goto qbuf_exit;
		break;

	case V4L2_MEMORY_USERPTR:
		if (tbuf.length < buf1->bsize)
			goto qbuf_exit;

		if ((VIDEOBUF_NEEDS_INIT != buf1->state)
			    && (buf1->baddr != tbuf.m.userptr))
			vpif_buffer_release(&common->buffer_queue, buf1);
			buf1->baddr = tbuf.m.userptr;
		break;

	default:
		goto qbuf_exit;
	}

	local_irq_save(flags);
	ret = vpif_buffer_prepare(&common->buffer_queue, buf1,
					common->buffer_queue.field);
	if (ret < 0) {
		local_irq_restore(flags);
		goto qbuf_exit;
	}

	buf1->state = VIDEOBUF_ACTIVE;
	addr = buf1->boff;
	common->next_frm = buf1;
	if (tbuf.type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
		common->set_addr((addr + common->ytop_off),
				 (addr + common->ybtm_off),
				 (addr + common->ctop_off),
				 (addr + common->cbtm_off));
	}

	local_irq_restore(flags);
	list_add_tail(&buf1->stream, &common->buffer_queue.stream);
	mutex_unlock(&common->buffer_queue.vb_lock);
	return 0;

qbuf_exit:
	mutex_unlock(&common->buffer_queue.vb_lock);
	return -EINVAL;
}
Beispiel #5
0
static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
{
	mss->tsk = current;
	current->state = TASK_INTERRUPTIBLE;
	list_add_tail(&mss->list, &msq->q_senders);
}
/**
 * mount_ubifs - mount UBIFS file-system.
 * @c: UBIFS file-system description object
 *
 * This function mounts UBIFS file system. Returns zero in case of success and
 * a negative error code in case of failure.
 *
 * Note, the function does not de-allocate resources it it fails half way
 * through, and the caller has to do this instead.
 */
static int mount_ubifs(struct ubifs_info *c)
{
	struct super_block *sb = c->vfs_sb;
	int err, mounted_read_only = (sb->s_flags & MS_RDONLY);
	long long x;
	size_t sz;

	err = init_constants_early(c);
	if (err)
		return err;

	err = ubifs_debugging_init(c);
	if (err)
		return err;

	err = check_volume_empty(c);
	if (err)
		goto out_free;

	if (c->empty && (mounted_read_only || c->ro_media)) {
		/*
		 * This UBI volume is empty, and read-only, or the file system
		 * is mounted read-only - we cannot format it.
		 */
		ubifs_err("can't format empty UBI volume: read-only %s",
			  c->ro_media ? "UBI volume" : "mount");
		err = -EROFS;
		goto out_free;
	}

	if (c->ro_media && !mounted_read_only) {
		ubifs_err("cannot mount read-write - read-only media");
		err = -EROFS;
		goto out_free;
	}

	/*
	 * The requirement for the buffer is that it should fit indexing B-tree
	 * height amount of integers. We assume the height if the TNC tree will
	 * never exceed 64.
	 */
	err = -ENOMEM;
	c->bottom_up_buf = kmalloc(BOTTOM_UP_HEIGHT * sizeof(int), GFP_KERNEL);
	if (!c->bottom_up_buf)
		goto out_free;

	c->sbuf = vmalloc(c->leb_size);
	if (!c->sbuf)
		goto out_free;

	/*
	 * We have to check all CRCs, even for data nodes, when we mount the FS
	 * (specifically, when we are replaying).
	 */
	c->always_chk_crc = 1;

	err = ubifs_read_superblock(c);
	if (err)
		goto out_free;

	/*
	 * Make sure the compressor which is set as default in the superblock
	 * or overridden by mount options is actually compiled in.
	 */
	if (!ubifs_compr_present(c->default_compr)) {
		ubifs_err("'compressor \"%s\" is not compiled in",
			  ubifs_compr_name(c->default_compr));
		goto out_free;
	}

	dbg_failure_mode_registration(c);

	err = init_constants_sb(c);
	if (err)
		goto out_free;

	sz = ALIGN(c->max_idx_node_sz, c->min_io_size);
	sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size);
	c->cbuf = kmalloc(sz, GFP_NOFS);
	if (!c->cbuf) {
		err = -ENOMEM;
		goto out_free;
	}

	sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);

	err = ubifs_read_master(c);
	if (err)
		goto out_master;

	init_constants_master(c);

	if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
		ubifs_msg("recovery needed");
		c->need_recovery = 1;
	}

	err = ubifs_lpt_init(c, 1, !mounted_read_only);
	if (err)
		goto out_lpt;

	err = dbg_check_idx_size(c, c->old_idx_sz);
	if (err)
		goto out_lpt;

	err = ubifs_replay_journal(c);
	if (err)
		goto out_journal;

	err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
	if (err)
		goto out_orphans;

	if (c->need_recovery) {
		err = ubifs_recover_size(c);
		if (err)
			goto out_orphans;
	}

	spin_lock(&ubifs_infos_lock);
	list_add_tail(&c->infos_list, &ubifs_infos);
	spin_unlock(&ubifs_infos_lock);

	if (c->need_recovery) {
		if (mounted_read_only)
			ubifs_msg("recovery deferred");
		else {
			c->need_recovery = 0;
			ubifs_msg("recovery completed");
		}
	}

	err = dbg_check_filesystem(c);
	if (err)
		goto out_infos;

	c->always_chk_crc = 0;

	ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
		  c->vi.ubi_num, c->vi.vol_id, c->vi.name);
	if (mounted_read_only)
		ubifs_msg("mounted read-only");
	x = (long long)c->main_lebs * c->leb_size;
	ubifs_msg("file system size:   %lld bytes (%lld KiB, %lld MiB, %d "
		  "LEBs)", x, x >> 10, x >> 20, c->main_lebs);
	x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
	ubifs_msg("journal size:       %lld bytes (%lld KiB, %lld MiB, %d "
		  "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
	ubifs_msg("media format:       w%d/r%d (latest is w%d/r%d)",
		  c->fmt_version, c->ro_compat_version,
		  UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
	ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
	ubifs_msg("reserved for root:  %llu bytes (%llu KiB)",
		c->report_rp_size, c->report_rp_size >> 10);

	dbg_msg("compiled on:         " __DATE__ " at " __TIME__);
	dbg_msg("min. I/O unit size:  %d bytes", c->min_io_size);
	dbg_msg("LEB size:            %d bytes (%d KiB)",
		c->leb_size, c->leb_size >> 10);
	dbg_msg("data journal heads:  %d",
		c->jhead_cnt - NONDATA_JHEADS_CNT);
	dbg_msg("UUID:                %02X%02X%02X%02X-%02X%02X"
	       "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
	       c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3],
	       c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
	       c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
	       c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
	dbg_msg("big_lpt              %d", c->big_lpt);
	dbg_msg("log LEBs:            %d (%d - %d)",
		c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
	dbg_msg("LPT area LEBs:       %d (%d - %d)",
		c->lpt_lebs, c->lpt_first, c->lpt_last);
	dbg_msg("orphan area LEBs:    %d (%d - %d)",
		c->orph_lebs, c->orph_first, c->orph_last);
	dbg_msg("main area LEBs:      %d (%d - %d)",
		c->main_lebs, c->main_first, c->leb_cnt - 1);
	dbg_msg("index LEBs:          %d", c->lst.idx_lebs);
	dbg_msg("total index bytes:   %lld (%lld KiB, %lld MiB)",
		c->old_idx_sz, c->old_idx_sz >> 10, c->old_idx_sz >> 20);
	dbg_msg("key hash type:       %d", c->key_hash_type);
	dbg_msg("tree fanout:         %d", c->fanout);
	dbg_msg("reserved GC LEB:     %d", c->gc_lnum);
	dbg_msg("first main LEB:      %d", c->main_first);
	dbg_msg("max. znode size      %d", c->max_znode_sz);
	dbg_msg("max. index node size %d", c->max_idx_node_sz);
	dbg_msg("node sizes:          data %zu, inode %zu, dentry %zu",
		UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ);
	dbg_msg("node sizes:          trun %zu, sb %zu, master %zu",
		UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ);
	dbg_msg("node sizes:          ref %zu, cmt. start %zu, orph %zu",
		UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
	dbg_msg("max. node sizes:     data %zu, inode %zu dentry %zu",
	        UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
		UBIFS_MAX_DENT_NODE_SZ);
	dbg_msg("dead watermark:      %d", c->dead_wm);
	dbg_msg("dark watermark:      %d", c->dark_wm);
	dbg_msg("LEB overhead:        %d", c->leb_overhead);
	x = (long long)c->main_lebs * c->dark_wm;
	dbg_msg("max. dark space:     %lld (%lld KiB, %lld MiB)",
		x, x >> 10, x >> 20);
	dbg_msg("maximum bud bytes:   %lld (%lld KiB, %lld MiB)",
		c->max_bud_bytes, c->max_bud_bytes >> 10,
		c->max_bud_bytes >> 20);
	dbg_msg("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
		c->bg_bud_bytes, c->bg_bud_bytes >> 10,
		c->bg_bud_bytes >> 20);
	dbg_msg("current bud bytes    %lld (%lld KiB, %lld MiB)",
		c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20);
	dbg_msg("max. seq. number:    %llu", c->max_sqnum);
	dbg_msg("commit number:       %llu", c->cmt_no);

	return 0;

out_infos:
	spin_lock(&ubifs_infos_lock);
	list_del(&c->infos_list);
	spin_unlock(&ubifs_infos_lock);
out_orphans:
	free_orphans(c);
out_journal:
out_lpt:
	ubifs_lpt_free(c, 0);
out_master:
	kfree(c->mst_node);
	kfree(c->rcvrd_mst_node);
	if (c->bgt)
		kthread_stop(c->bgt);
	kfree(c->cbuf);
out_free:
	vfree(c->ileb_buf);
	vfree(c->sbuf);
	kfree(c->bottom_up_buf);
	ubifs_debugging_exit(c);
	return err;
}
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);
	get_seccomp_filter(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = p->prev_stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_cgroup;
	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_MEMCG
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	uprobe_copy_process(p);
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	if (clone_flags & CLONE_THREAD)
		p->exit_signal = -1;
	else if (clone_flags & CLONE_PARENT)
		p->exit_signal = current->group_leader->exit_signal;
	else
		p->exit_signal = (clone_flags & CSIGNAL);

	p->pdeath_signal = 0;
	p->exit_state = 0;

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);
	p->task_works = NULL;

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
	*/
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		current->signal->nr_threads++;
		atomic_inc(&current->signal->live);
		atomic_inc(&current->signal->sigcnt);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		if (thread_group_leader(p)) {
			if (is_child_reaper(pid))
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__this_cpu_inc(process_counts);
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);

	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	if (unlikely(clone_flags & CLONE_NEWPID))
		pid_ns_release_proc(p->nsproxy->pid_ns);
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
static void resp_avail_cb(struct urb *urb)
{
	struct usb_device		*udev;
	struct ctrl_pkt_list_elem	*list_elem = NULL;
	struct rmnet_ctrl_dev		*dev = urb->context;
	void				*cpkt;
	int				status = 0;
	size_t				cpkt_size = 0;

	udev = interface_to_usbdev(dev->intf);

	switch (urb->status) {
	case 0:
		/*success*/
		dev->get_encap_resp_cnt++;
		break;

	/*do not resubmit*/
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		return;

	/*resubmit*/
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		pr_debug_ratelimited("%s: Non zero urb status = %d\n",
				__func__, urb->status);
		goto resubmit_int_urb;
	}

	dev_dbg(dev->devicep, "Read %d bytes for %s\n",
		urb->actual_length, dev->name);

	cpkt = urb->transfer_buffer;
	cpkt_size = urb->actual_length;

	list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
	if (!list_elem) {
		dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
		return;
	}
	list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
	if (!list_elem->cpkt.data) {
		dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
			__func__);
		kfree(list_elem);
		return;
	}
	memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
	list_elem->cpkt.data_size = cpkt_size;
	spin_lock(&dev->rx_lock);
	list_add_tail(&list_elem->list, &dev->rx_list);
	spin_unlock(&dev->rx_lock);

	wake_up(&dev->read_wait_queue);

resubmit_int_urb:
	/*re-submit int urb to check response available*/
	status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
	if (status)
		dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
			__func__, status);
}
static void resp_avail_cb(struct urb *urb)
{
	struct usb_device		*udev;
	struct ctrl_pkt_list_elem	*list_elem = NULL;
	struct rmnet_ctrl_dev		*rx_dev, *dev = urb->context;
	void				*cpkt;
	int				ch_id, status = 0;
	size_t				cpkt_size = 0;
	unsigned int		iface_num;

	udev = interface_to_usbdev(dev->intf);
	iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber;

	usb_autopm_put_interface_async(dev->intf);

	switch (urb->status) {
	case 0:
		pr_info("[RACB:%d]<\n", iface_num);
		/*success*/
		break;

	/*do not resubmit*/
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		return;

	/*resubmit*/
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		pr_debug_ratelimited("%s: Non zero urb status = %d\n",
				__func__, urb->status);
		goto resubmit_int_urb;
	}

	dev_dbg(dev->devicep, "Read %d bytes for %s\n",
		urb->actual_length, dev->name);

	cpkt = urb->transfer_buffer;
	cpkt_size = urb->actual_length;
	if (!cpkt_size) {
		dev->zlp_cnt++;
		dev_dbg(dev->devicep, "%s: zero length pkt received\n",
				__func__);
		goto resubmit_int_urb;
	}

	list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
	if (!list_elem) {
		dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
		return;
	}
	list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
	if (!list_elem->cpkt.data) {
		dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
			__func__);
		kfree(list_elem);
		return;
	}
	memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
	list_elem->cpkt.data_size = cpkt_size;

	rx_dev = dev;

	if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
		ch_id = rmnet_usb_ctrl_dmux(list_elem);
		if (ch_id < 0) {
			kfree(list_elem->cpkt.data);
			kfree(list_elem);
			goto resubmit_int_urb;
		}

		rx_dev = &ctrl_devs[dev->id][ch_id];
	}

	rx_dev->get_encap_resp_cnt++;

	spin_lock(&rx_dev->rx_lock);
	list_add_tail(&list_elem->list, &rx_dev->rx_list);
	spin_unlock(&rx_dev->rx_lock);

	wake_up(&rx_dev->read_wait_queue);

resubmit_int_urb:
	/*check if it is already submitted in resume*/
	if (!dev->inturb->anchor) {
		usb_mark_last_busy(udev);
		usb_anchor_urb(dev->inturb, &dev->rx_submitted);
		status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
		if (status) {
			usb_unanchor_urb(dev->inturb);
			if (status != -ENODEV)
				dev_err(dev->devicep,
				"%s: Error re-submitting Int URB %d\n",
				__func__, status);
		}
		pr_info("[CHKRA:%d]>\n", iface_num);
	}
}
Beispiel #10
0
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{
	list_add_tail(&zops->list, &zcrypt_ops_list);
}
Beispiel #11
0
/*
 *  This function will allocate both the DMA descriptor structure, and the
 *  buffers it contains.  These are used to contain the descriptors used
 *  by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
		      struct list_head *head, const char *name,
		      int nbuf, int ndesc, bool is_tx)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	u8 *ds;
	struct ath_buf *bf;
	int i, bsize, error, desc_len;

	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
		name, nbuf, ndesc);

	INIT_LIST_HEAD(head);

	if (is_tx)
		desc_len = sc->sc_ah->caps.tx_desc_len;
	else
		desc_len = sizeof(struct ath_desc);

	/* ath_desc must be a multiple of DWORDs */
	if ((desc_len % 4) != 0) {
		ath_err(common, "ath_desc not DWORD aligned\n");
		BUG_ON((desc_len % 4) != 0);
		error = -ENOMEM;
		goto fail;
	}

	dd->dd_desc_len = desc_len * nbuf * ndesc;

	/*
	 * Need additional DMA memory because we can't use
	 * descriptors that cross the 4K page boundary. Assume
	 * one skipped descriptor per 4K page.
	 */
	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
		u32 ndesc_skipped =
			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
		u32 dma_len;

		while (ndesc_skipped) {
			dma_len = ndesc_skipped * desc_len;
			dd->dd_desc_len += dma_len;

			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
		}
	}

	/* allocate descriptors */
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (dd->dd_desc == NULL) {
		error = -ENOMEM;
		goto fail;
	}
	ds = (u8 *) dd->dd_desc;
	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
		name, ds, (u32) dd->dd_desc_len,
		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);

	/* allocate buffers */
	bsize = sizeof(struct ath_buf) * nbuf;
	bf = kzalloc(bsize, GFP_KERNEL);
	if (bf == NULL) {
		error = -ENOMEM;
		goto fail2;
	}
	dd->dd_bufptr = bf;

	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
		bf->bf_desc = ds;
		bf->bf_daddr = DS2PHYS(dd, ds);

		if (!(sc->sc_ah->caps.hw_caps &
		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
			/*
			 * Skip descriptor addresses which can cause 4KB
			 * boundary crossing (addr + length) with a 32 dword
			 * descriptor fetch.
			 */
			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
				BUG_ON((caddr_t) bf->bf_desc >=
				       ((caddr_t) dd->dd_desc +
					dd->dd_desc_len));

				ds += (desc_len * ndesc);
				bf->bf_desc = ds;
				bf->bf_daddr = DS2PHYS(dd, ds);
			}
		}
		list_add_tail(&bf->list, head);
	}
	return 0;
fail2:
	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
fail:
	memset(dd, 0, sizeof(*dd));
	return error;
}
Beispiel #12
0
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
	int ret = 0;
	int cnt;
	int i;
	struct device *dev = hba->dev;
	struct device_node *np = dev->of_node;
	char *name;
	u32 *clkfreq = NULL;
	struct ufs_clk_info *clki;
	int len = 0;
	size_t sz = 0;

	if (!np)
		goto out;

	INIT_LIST_HEAD(&hba->clk_list_head);

	cnt = of_property_count_strings(np, "clock-names");
	if (!cnt || (cnt == -EINVAL)) {
		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
				__func__);
	} else if (cnt < 0) {
		dev_err(dev, "%s: count clock strings failed, err %d\n",
				__func__, cnt);
		ret = cnt;
	}

	if (cnt <= 0)
		goto out;

	if (!of_get_property(np, "freq-table-hz", &len)) {
		dev_info(dev, "freq-table-hz property not specified\n");
		goto out;
	}

	if (len <= 0)
		goto out;

	sz = len / sizeof(*clkfreq);
	if (sz != 2 * cnt) {
		dev_err(dev, "%s len mismatch\n", "freq-table-hz");
		ret = -EINVAL;
		goto out;
	}

	clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
			GFP_KERNEL);
	if (!clkfreq) {
		ret = -ENOMEM;
		goto out;
	}

	ret = of_property_read_u32_array(np, "freq-table-hz",
			clkfreq, sz);
	if (ret && (ret != -EINVAL)) {
		dev_err(dev, "%s: error reading array %d\n",
				"freq-table-hz", ret);
		return ret;
	}

	for (i = 0; i < sz; i += 2) {
		ret = of_property_read_string_index(np,
				"clock-names", i/2, (const char **)&name);
		if (ret)
			goto out;

		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
		if (!clki) {
			ret = -ENOMEM;
			goto out;
		}

		clki->min_freq = clkfreq[i];
		clki->max_freq = clkfreq[i+1];
		clki->name = kstrdup(name, GFP_KERNEL);
		dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
				clki->min_freq, clki->max_freq, clki->name);
		list_add_tail(&clki->list, &hba->clk_list_head);
	}
out:
	return ret;
}
Beispiel #13
0
static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent)
{
	struct saa7146_pci_extension_data *pci_ext = (struct saa7146_pci_extension_data *)ent->driver_data;
	struct saa7146_extension *ext = pci_ext->ext;
	struct saa7146_dev *dev;
	int err = -ENOMEM;

	/* clear out mem for sure */
	dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
	if (!dev) {
		ERR(("out of memory.\n"));
		goto out;
	}

	DEB_EE(("pci:%p\n",pci));

	err = pci_enable_device(pci);
	if (err < 0) {
		ERR(("pci_enable_device() failed.\n"));
		goto err_free;
	}

	/* enable bus-mastering */
	pci_set_master(pci);

	dev->pci = pci;

	/* get chip-revision; this is needed to enable bug-fixes */
	dev->revision = pci->revision;

	/* remap the memory from virtual to physical address */

	err = pci_request_region(pci, 0, "saa7146");
	if (err < 0)
		goto err_disable;

	dev->mem = ioremap(pci_resource_start(pci, 0),
			   pci_resource_len(pci, 0));
	if (!dev->mem) {
		ERR(("ioremap() failed.\n"));
		err = -ENODEV;
		goto err_release;
	}

	/* we don't do a master reset here anymore, it screws up
	   some boards that don't have an i2c-eeprom for configuration
	   values */
/*
	saa7146_write(dev, MC1, MASK_31);
*/

	/* disable all irqs */
	saa7146_write(dev, IER, 0);

	/* shut down all dma transfers and rps tasks */
	saa7146_write(dev, MC1, 0x30ff0000);

	/* clear out any rps-signals pending */
	saa7146_write(dev, MC2, 0xf8000000);

	/* request an interrupt for the saa7146 */
	err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED,
			  dev->name, dev);
	if (err < 0) {
		ERR(("request_irq() failed.\n"));
		goto err_unmap;
	}

	err = -ENOMEM;

	/* get memory for various stuff */
	dev->d_rps0.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
						    &dev->d_rps0.dma_handle);
	if (!dev->d_rps0.cpu_addr)
		goto err_free_irq;
	memset(dev->d_rps0.cpu_addr, 0x0, SAA7146_RPS_MEM);

	dev->d_rps1.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
						    &dev->d_rps1.dma_handle);
	if (!dev->d_rps1.cpu_addr)
		goto err_free_rps0;
	memset(dev->d_rps1.cpu_addr, 0x0, SAA7146_RPS_MEM);

	dev->d_i2c.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
						   &dev->d_i2c.dma_handle);
	if (!dev->d_i2c.cpu_addr)
		goto err_free_rps1;
	memset(dev->d_i2c.cpu_addr, 0x0, SAA7146_RPS_MEM);

	/* the rest + print status message */

	/* create a nice device name */
	sprintf(dev->name, "saa7146 (%d)", saa7146_num);

	INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
	dev->ext = ext;

	mutex_init(&dev->v4l2_lock);
	spin_lock_init(&dev->int_slock);
	spin_lock_init(&dev->slock);

	mutex_init(&dev->i2c_lock);

	dev->module = THIS_MODULE;
	init_waitqueue_head(&dev->i2c_wq);

	/* set some sane pci arbitrition values */
	saa7146_write(dev, PCI_BT_V1, 0x1c00101f);

	/* TODO: use the status code of the callback */

	err = -ENODEV;

	if (ext->probe && ext->probe(dev)) {
		DEB_D(("ext->probe() failed for %p. skipping device.\n",dev));
		goto err_free_i2c;
	}

	if (ext->attach(dev, pci_ext)) {
		DEB_D(("ext->attach() failed for %p. skipping device.\n",dev));
		goto err_free_i2c;
	}
	/* V4L extensions will set the pci drvdata to the v4l2_device in the
	   attach() above. So for those cards that do not use V4L we have to
	   set it explicitly. */
	pci_set_drvdata(pci, &dev->v4l2_dev);

	INIT_LIST_HEAD(&dev->item);
	list_add_tail(&dev->item,&saa7146_devices);
	saa7146_num++;

	err = 0;
out:
	return err;

err_free_i2c:
	pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_i2c.cpu_addr,
			    dev->d_i2c.dma_handle);
err_free_rps1:
	pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_rps1.cpu_addr,
			    dev->d_rps1.dma_handle);
err_free_rps0:
	pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_rps0.cpu_addr,
			    dev->d_rps0.dma_handle);
err_free_irq:
	free_irq(pci->irq, (void *)dev);
err_unmap:
	iounmap(dev->mem);
err_release:
	pci_release_region(pci, 0);
err_disable:
	pci_disable_device(pci);
err_free:
	kfree(dev);
	goto out;
}
Beispiel #14
0
static int jffs2_find_nextblock(struct jffs2_sb_info *c)
{
	struct list_head *next;

	/* Take the next block off the 'free' list */

	if (list_empty(&c->free_list)) {

		if (!c->nr_erasing_blocks &&
			!list_empty(&c->erasable_list)) {
			struct jffs2_eraseblock *ejeb;

			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
			list_del(&ejeb->list);
			list_add_tail(&ejeb->list, &c->erase_pending_list);
			c->nr_erasing_blocks++;
			jffs2_erase_pending_trigger(c);
			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
				  ejeb->offset));
		}

		if (!c->nr_erasing_blocks &&
			!list_empty(&c->erasable_pending_wbuf_list)) {
			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
			/* c->nextblock is NULL, no update to c->nextblock allowed */
			spin_unlock(&c->erase_completion_lock);
			jffs2_flush_wbuf_pad(c);
			spin_lock(&c->erase_completion_lock);
			/* Have another go. It'll be on the erasable_list now */
			return -EAGAIN;
		}

		if (!c->nr_erasing_blocks) {
			/* Ouch. We're in GC, or we wouldn't have got here.
			   And there's no space left. At all. */
			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
			return -ENOSPC;
		}

		spin_unlock(&c->erase_completion_lock);
		/* Don't wait for it; just erase one right now */
		jffs2_erase_pending_blocks(c, 1);
		spin_lock(&c->erase_completion_lock);

		/* An erase may have failed, decreasing the
		   amount of free space available. So we must
		   restart from the beginning */
		return -EAGAIN;
	}

	next = c->free_list.next;
	list_del(next);
	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
	c->nr_free_blocks--;

	jffs2_sum_reset_collected(c->summary); /* reset collected summary */

	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));

	return 0;
}
/**
 * iwl_rx_handle - Main entry function for receiving responses from uCode
 *
 * Uses the priv->rx_handlers callback function array to invoke
 * the appropriate handlers, including command responses,
 * frame-received notifications, and other notifications.
 */
static void iwl_rx_handle(struct iwl_trans *trans)
{
	struct iwl_rx_mem_buffer *rxb;
	struct iwl_rx_packet *pkt;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
	struct iwl_device_cmd *cmd;
	u32 r, i;
	int reclaim;
	unsigned long flags;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;
	int index, cmd_index;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
		IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
		int len, err;
		u16 sequence;

		rxb = rxq->queue[i];

		/* If an RXB doesn't have a Rx queue slot associated with it,
		 * then a bug has been introduced in the queue refilling
		 * routines -- catch it here */
		if (WARN_ON(rxb == NULL)) {
			i = (i + 1) & RX_QUEUE_MASK;
			continue;
		}

		rxq->queue[i] = NULL;

		dma_unmap_page(bus(trans)->dev, rxb->page_dma,
			       PAGE_SIZE << hw_params(trans).rx_page_order,
			       DMA_FROM_DEVICE);
		pkt = rxb_addr(rxb);

		IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
			i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(priv(trans), pkt, len);

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
			(pkt->hdr.cmd != REPLY_RX) &&
			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
			(pkt->hdr.cmd != REPLY_TX);

		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

		if (reclaim)
			cmd = txq->cmd[cmd_index];
		else
			cmd = NULL;

		/* warn if this is cmd response / notification and the uCode
		 * didn't set the SEQ_RX_FRAME for a frame that is
		 * uCode-originated
		 * If you saw this code after the second half of 2012, then
		 * please remove it
		 */
		WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
		     (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
		     "reclaim is false, SEQ_RX_FRAME unset: %s\n",
		     get_cmd_string(pkt->hdr.cmd));

		err = iwl_rx_dispatch(priv(trans), rxb, cmd);

		/*
		 * XXX: After here, we should always check rxb->page
		 * against NULL before touching it or its virtual
		 * memory (pkt). Because some rx_handler might have
		 * already taken or freed the pages.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (rxb->page)
				iwl_tx_cmd_complete(trans, rxb, err);
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		/* Reuse the page if possible. For notification packets and
		 * SKBs that fail to Rx correctly, add them back into the
		 * rx_free list for reuse later. */
		spin_lock_irqsave(&rxq->lock, flags);
		if (rxb->page != NULL) {
			rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
				0, PAGE_SIZE <<
				    hw_params(trans).rx_page_order,
				DMA_FROM_DEVICE);
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		} else
			list_add_tail(&rxb->list, &rxq->rx_used);

		spin_unlock_irqrestore(&rxq->lock, flags);

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
				iwlagn_rx_replenish_now(trans);
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
		iwlagn_rx_replenish_now(trans);
	else
		iwlagn_rx_queue_restock(trans);
}
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 *
	 * The mutex spinners are queued up using MCS lock so that only one
	 * spinner can compete for the mutex. However, if mutex spinning isn't
	 * going to happen, there is no point in going through the lock/unlock
	 * overhead.
	 */
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;

	for (;;) {
		struct task_struct *owner;
		mspin_node_t	    node;

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		mspin_lock(MLOCK(lock), &node);
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner)) {
			mspin_unlock(MLOCK(lock), &node);
			break;
		}

		if ((atomic_read(&lock->count) == 1) &&
		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			mspin_unlock(MLOCK(lock), &node);
			preempt_enable();
			return 0;
		}
		mspin_unlock(MLOCK(lock), &node);

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		arch_mutex_cpu_relax();
	}
slowpath:
#endif
	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
		goto done;

	lock_contended(&lock->dep_map, ip);

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		if (MUTEX_SHOW_NO_WAITER(lock) &&
		   (atomic_xchg(&lock->count, -1) == 1))
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			mutex_release(&lock->dep_map, 1, ip);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			preempt_enable();
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule_preempt_disabled();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	lock_acquired(&lock->dep_map, ip);
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);
	preempt_enable();

	return 0;
}
Beispiel #17
0
static int
mic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	int brdnum = mic_data.dd_numdevs;
	int err = 0;
	bd_info_t *bd_info;
	mic_ctx_t *mic_ctx;
#ifdef CONFIG_PCI_MSI
	int i=0;
#endif
	if ((bd_info = (bd_info_t *)kzalloc(sizeof(bd_info_t), GFP_KERNEL)) == NULL) {
		printk("MIC: probe failed allocating memory for bd_info\n");
		return -ENOSPC;
	}

	mic_ctx = &bd_info->bi_ctx;
	mic_ctx->bd_info = bd_info;
	mic_ctx->bi_id = brdnum;
	mic_ctx->bi_pdev = pdev;
	mic_ctx->msie = 0;
	mic_data.dd_bi[brdnum] = bd_info;

	if ((err = pci_enable_device(pdev))) {
		printk("pci_enable failed board #%d\n", brdnum);
		goto probe_freebd;
	}

	pci_set_master(pdev);
	err = pci_reenable_device(pdev);
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		printk("mic %d: ERROR DMA not available\n", brdnum);
		goto probe_freebd;
	}
	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		printk("mic %d: ERROR pci_set_consistent_dma_mask(64) %d\n", brdnum, err);
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			printk("mic %d: ERROR pci_set_consistent_dma_mask(32) %d\n", brdnum, err);
			goto probe_freebd;
		}
	}

	// Allocate bar 4 for MMIO and GTT
	bd_info->bi_ctx.mmio.pa = pci_resource_start(pdev, DLDR_MMIO_BAR);
	bd_info->bi_ctx.mmio.len = pci_resource_len(pdev, DLDR_MMIO_BAR);
	if (request_mem_region(bd_info->bi_ctx.mmio.pa,
	    bd_info->bi_ctx.mmio.len, "mic") == NULL) {
		printk("mic %d: failed to reserve mmio space\n", brdnum);
		goto probe_freebd;
	}

	// Allocate bar 0 for access Aperture
	bd_info->bi_ctx.aper.pa = pci_resource_start(pdev, DLDR_APT_BAR);
	bd_info->bi_ctx.aper.len = pci_resource_len(pdev, DLDR_APT_BAR);
	if (request_mem_region(bd_info->bi_ctx.aper.pa,
	    bd_info->bi_ctx.aper.len, "mic") == NULL) {
		printk("mic %d: failed to reserve aperture space\n", brdnum);
		goto probe_relmmio;
	}

#ifdef CONFIG_PCI_MSI
	if (mic_msi_enable){
		for (i = 0; i < MIC_NUM_MSIX_ENTRIES; i ++)
			bd_info->bi_msix_entries[i].entry = i;
		err = pci_enable_msix(mic_ctx->bi_pdev, bd_info->bi_msix_entries,
				      MIC_NUM_MSIX_ENTRIES);
		if (err == 0 ) {
			// Only support 1 MSIx for now
			err = request_irq(bd_info->bi_msix_entries[0].vector,
					  mic_irq_isr, 0, "mic", mic_ctx);
			if (err != 0) {
				printk("MIC: Error in request_irq %d\n", err);
				goto probe_relaper;
			}
			mic_ctx->msie = 1;
		}
	}
#endif

	// TODO: this needs to be hardened and actually return errors
	if ((err = adapter_init_device(mic_ctx)) != 0) {
		printk("MIC: Adapter init device failed %d\n", err);
		goto probe_relaper;
	}

	// Adding sysfs entries
	set_sysfs_entries(mic_ctx);

	bd_info->bi_sysfsdev = device_create(mic_lindata.dd_class, &pdev->dev,
			mic_lindata.dd_dev + 2 + mic_ctx->bd_info->bi_ctx.bi_id,
			NULL, "mic%d", mic_ctx->bd_info->bi_ctx.bi_id);
	err = sysfs_create_group(&mic_ctx->bd_info->bi_sysfsdev->kobj, &bd_attr_group);
	mic_ctx->sysfs_state = sysfs_get_dirent(mic_ctx->bd_info->bi_sysfsdev->kobj.sd,
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) && LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
				NULL,
#endif
				"state");

	dev_set_drvdata(mic_ctx->bd_info->bi_sysfsdev, mic_ctx);

	if (!mic_ctx->msie)
		if ((err = request_irq(mic_ctx->bi_pdev->irq, mic_irq_isr,
				       IRQF_SHARED, "mic", mic_ctx)) != 0) {
			printk("MIC: Error in request_irq %d\n", err);
			goto probe_unmapaper;
		}

	adapter_probe(&bd_info->bi_ctx);

	if (mic_ctx->bi_psmi.enabled) {
		err = sysfs_create_group(&mic_ctx->bd_info->bi_sysfsdev->kobj,
						&psmi_attr_group);
		err = device_create_bin_file(mic_ctx->bd_info->bi_sysfsdev,
						&mic_psmi_ptes_attr);
	}

	adapter_wait_reset(mic_ctx);

	// Adding a board instance so increment the total number of MICs in the system.
	list_add_tail(&bd_info->bi_list, &mic_data.dd_bdlist);
	mic_data.dd_numdevs++;
	printk("mic_probe %d:%d:%d as board #%d\n", pdev->bus->number,
	       PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), brdnum);
	return 0;

probe_unmapaper:
	wait_event(mic_ctx->ioremapwq, mic_ctx->aper.va || mic_ctx->state == MIC_RESETFAIL);
	if (mic_ctx->aper.va)
		iounmap((void *)bd_info->bi_ctx.aper.va);
	iounmap((void *)bd_info->bi_ctx.mmio.va);

probe_relaper:
	release_mem_region(bd_info->bi_ctx.aper.pa, bd_info->bi_ctx.aper.len);

probe_relmmio:
	release_mem_region(bd_info->bi_ctx.mmio.pa, bd_info->bi_ctx.mmio.len);

probe_freebd:
	kfree(bd_info);
	return err;
}
Beispiel #18
0
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
	unsigned long flags;
	bool page_stolen = false;
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
	u32 offset = 0;

	if (WARN_ON(!rxb))
		return;

	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		struct iwl_device_cmd *cmd;
		u16 sequence;
		bool reclaim;
		int index, cmd_index, err, len;
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
			._rx_page_order = trans_pcie->rx_page_order,
			._page = rxb->page,
			._page_stolen = false,
			.truesize = max_len,
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
			pkt->hdr.cmd);

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
			}
		}

		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

		if (reclaim)
			cmd = txq->entries[cmd_index].cmd;
		else
			cmd = NULL;

		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);

		if (reclaim) {
			kfree(txq->entries[cmd_index].free_buf);
			txq->entries[cmd_index].free_buf = NULL;
		}

		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
				iwl_pcie_hcmd_complete(trans, &rxcb, err);
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
	}

	/* page was stolen from us -- free our reference */
	if (page_stolen) {
		__free_pages(rxb->page, trans_pcie->rx_page_order);
		rxb->page = NULL;
	}

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	spin_lock_irqsave(&rxq->lock, flags);
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
			list_add_tail(&rxb->list, &rxq->rx_used);
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
	} else
		list_add_tail(&rxb->list, &rxq->rx_used);
	spin_unlock_irqrestore(&rxq->lock, flags);
}

/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
 */
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	u32 r, i;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
		struct iwl_rx_mem_buffer *rxb;

		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
			     r, i, rxb);
		iwl_pcie_rx_handle_rb(trans, rxb);

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
				iwl_pcie_rx_replenish_now(trans);
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
		iwl_pcie_rx_replenish_now(trans);
	else
		iwl_pcie_rxq_restock(trans);
}

/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
 */
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
	if (trans->cfg->internal_wimax_coex &&
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
		iwl_op_mode_wimax_active(trans->op_mode);
		wake_up(&trans_pcie->wait_command_queue);
		return;
	}

	iwl_pcie_dump_csr(trans);
	iwl_pcie_dump_fh(trans, NULL);

	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
	wake_up(&trans_pcie->wait_command_queue);

	local_bh_disable();
	iwl_op_mode_nic_error(trans->op_mode);
	local_bh_enable();
}

irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
	struct iwl_trans *trans = dev_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
	u32 i;

	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
	iwl_write32(trans, CSR_INT,
		    trans_pcie->inta | ~trans_pcie->inta_mask);

	inta = trans_pcie->inta;

	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
			      inta, iwl_read32(trans, CSR_INT_MASK));

	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
	trans_pcie->inta = 0;

	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");

		/* Tell the device to stop sending interrupts */
		iwl_disable_interrupts(trans);

		isr_stats->hw++;
		iwl_pcie_irq_handle_error(trans);

		handled |= CSR_INT_BIT_HW_ERR;

		goto out;
	}

	if (iwl_have_debug_level(IWL_DL_ISR)) {
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
			isr_stats->sch++;
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
			isr_stats->alive++;
		}
	}

	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
		bool hw_rfkill;

		hw_rfkill = iwl_is_rfkill_set(trans);
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
			 hw_rfkill ? "disable radio" : "enable radio");

		isr_stats->rfkill++;

		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
		if (hw_rfkill) {
			set_bit(STATUS_RFKILL, &trans_pcie->status);
			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
					       &trans_pcie->status))
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
			clear_bit(STATUS_RFKILL, &trans_pcie->status);
		}

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
		isr_stats->ctkill++;
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
		IWL_ERR(trans, "Microcode SW error detected. "
			" Restarting 0x%X.\n", inta);
		isr_stats->sw++;
		iwl_pcie_irq_handle_error(trans);
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
		iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
			iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);

		isr_stats->wakeup++;

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
		    CSR_INT_BIT_RX_PERIODIC)) {
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
			iwl_write32(trans, CSR_FH_INT_STATUS,
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
			iwl_write32(trans,
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
			    CSR_INT_PERIODIC_DIS);

		iwl_pcie_rx_handle(trans);

		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
				   CSR_INT_PERIODIC_ENA);

		isr_stats->rx++;
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
		isr_stats->tx++;
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
	}

	if (inta & ~handled) {
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
		isr_stats->unhandled++;
	}

	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
		iwl_enable_interrupts(trans);
	/* Re-enable RF_KILL if it occurred */
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
}

/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))

/* Free dram table */
void iwl_pcie_free_ict(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (trans_pcie->ict_tbl) {
		dma_free_coherent(trans->dev, ICT_SIZE,
				  trans_pcie->ict_tbl,
				  trans_pcie->ict_tbl_dma);
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
	}
}

/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
 * also reset all data related to ICT table interrupt.
 */
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans_pcie->ict_tbl =
		dma_alloc_coherent(trans->dev, ICT_SIZE,
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
		return -ENOMEM;

	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
		iwl_pcie_free_ict(trans);
		return -EINVAL;
	}

	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
		      (unsigned long long)trans_pcie->ict_tbl_dma);

	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);

	/* reset table and index to all 0 */
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
	trans_pcie->ict_index = 0;

	/* add periodic RX interrupt */
	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 val;
	unsigned long flags;

	if (!trans_pcie->ict_tbl)
		return;

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	iwl_disable_interrupts(trans);

	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);

	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;

	val |= CSR_DRAM_INT_TBL_ENABLE;
	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;

	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);

	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
	iwl_enable_interrupts(trans);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}

/* Device is going down disable ict interrupt usage */
void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	trans_pcie->use_ict = false;
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}

/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
static irqreturn_t iwl_pcie_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 inta, inta_mask;

	lockdep_assert_held(&trans_pcie->irq_lock);

	trace_iwlwifi_dev_irq(trans->dev);

	/* Disable (but don't clear!) interrupts here to avoid
	 *    back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the irq thread will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here. */
	inta_mask = iwl_read32(trans, CSR_INT_MASK);
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	if (inta & (~inta_mask)) {
		IWL_DEBUG_ISR(trans,
			      "We got a masked interrupt (0x%08x)...Ack and ignore\n",
			      inta & (~inta_mask));
		iwl_write32(trans, CSR_INT, inta & (~inta_mask));
		inta &= inta_mask;
	}

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!inta) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		goto none;
	}

	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/* Hardware disappeared. It might have already raised
		 * an interrupt */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
		return IRQ_HANDLED;
	}

	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
			      inta, inta_mask,
			      iwl_read32(trans, CSR_FH_INT_STATUS));

	trans_pcie->inta |= inta;
	/* the thread will service interrupts and re-enable them */
	if (likely(inta))
		return IRQ_WAKE_THREAD;
	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
		 !trans_pcie->inta)
		iwl_enable_interrupts(trans);
	return IRQ_HANDLED;

none:
	/* re-enable interrupts here since we don't have anything to service. */
	/* only Re-enable if disabled by irq  and no schedules tasklet. */
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
	    !trans_pcie->inta)
		iwl_enable_interrupts(trans);

	return IRQ_NONE;
}

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
{
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
	u32 inta;
	u32 val = 0;
	u32 read;
	unsigned long flags;

	if (!trans)
		return IRQ_NONE;

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (unlikely(!trans_pcie->use_ict)) {
		irqreturn_t ret = iwl_pcie_isr(irq, data);
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return ret;
	}

	trace_iwlwifi_dev_irq(trans->dev);

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
	if (!read) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		goto none;
	}

	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index, read);
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);

		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
					   read);
	} while (read);

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
		      inta, trans_pcie->inta_mask, val);
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
			      iwl_read32(trans, CSR_INT_MASK));

	inta &= trans_pcie->inta_mask;
	trans_pcie->inta |= inta;

	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
	if (likely(inta)) {
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return IRQ_WAKE_THREAD;
	} else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
		 !trans_pcie->inta) {
		/* Allow interrupt if was disabled by this handler and
		 * no tasklet was schedules, We should not enable interrupt,
		 * tasklet will enable it.
		 */
		iwl_enable_interrupts(trans);
	}

	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
	return IRQ_HANDLED;

 none:
	/* re-enable interrupts here since we don't have anything to service.
	 * only Re-enable if disabled by irq.
	 */
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
	    !trans_pcie->inta)
		iwl_enable_interrupts(trans);

	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
	return IRQ_NONE;
}
Beispiel #19
0
static void audio_mvs_process_rpc_request(uint32_t procedure,
					  uint32_t xid,
					  void *data,
					  uint32_t length,
					  struct audio_mvs_info_type *audio)
{
	int rc = 0;

	pr_debug("%s:\n", __func__);

	switch (procedure) {
	case MVS_EVENT_CB_TYPE_PROC: {
		struct audio_mvs_cb_func_args *args = data;
		struct rpc_reply_hdr reply_hdr;

		pr_debug("%s: MVS CB CB_FUNC_ID 0x%x\n",
			 __func__, be32_to_cpu(args->cb_func_id));

		if (be32_to_cpu(args->valid_ptr)) {
			uint32_t event_type = be32_to_cpu(args->event);

			pr_debug("%s: MVS CB event type %d\n",
				 __func__, be32_to_cpu(args->event));

			if (event_type == AUDIO_MVS_COMMAND) {
				uint32_t cmd_status = be32_to_cpu(
			args->event_data.mvs_ev_command_type.cmd_status);

				pr_debug("%s: MVS CB command status %d\n",
					 __func__, cmd_status);

				if (cmd_status == AUDIO_MVS_CMD_SUCCESS)
					audio->rpc_status = RPC_STATUS_SUCCESS;

				wake_up(&audio->wait);
			} else if (event_type == AUDIO_MVS_MODE) {
				uint32_t mode_status = be32_to_cpu(
				args->event_data.mvs_ev_mode_type.mode_status);

				pr_debug("%s: MVS CB mode status %d\n",
					 __func__, mode_status);

				if (mode_status != AUDIO_MVS_MODE_NOT_AVAIL)
					audio->rpc_status = RPC_STATUS_SUCCESS;

				wake_up(&audio->wait);
			} else {
				pr_err("%s: MVS CB unknown event type %d\n",
				       __func__, event_type);
			}
		} else {
			pr_err("%s: MVS CB event pointer not valid\n",
			       __func__);
		}

		/* Send ack to modem. */
		memset(&reply_hdr, 0, sizeof(reply_hdr));
		reply_hdr.xid = cpu_to_be32(xid);
		reply_hdr.type = cpu_to_be32(RPC_TYPE_REPLY);
		reply_hdr.reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED);

		reply_hdr.data.acc_hdr.accept_stat = cpu_to_be32(
			RPC_ACCEPTSTAT_SUCCESS);
		reply_hdr.data.acc_hdr.verf_flavor = 0;
		reply_hdr.data.acc_hdr.verf_length = 0;

		rc = msm_rpc_write(audio->rpc_endpt,
				   &reply_hdr,
				   sizeof(reply_hdr));

		if (rc < 0)
			pr_err("%s: RPC write for response failed %d\n",
			       __func__, rc);

		break;
	}

	case MVS_PACKET_UL_FN_TYPE_PROC: {
		uint32_t *args = data;
		uint32_t pkt_len;
		uint32_t frame_mode;
		struct audio_mvs_ul_reply ul_reply;
		struct audio_mvs_buf_node *buf_node = NULL;

		pr_debug("%s: MVS UL CB_FUNC_ID 0x%x\n",
			 __func__, be32_to_cpu(*args));
		args++;

		pkt_len = be32_to_cpu(*args);
		pr_debug("%s: UL pkt_len %d\n", __func__, pkt_len);
		args++;

		/* Copy the vocoder packets. */
		mutex_lock(&audio->out_lock);

		if (!list_empty(&audio->free_out_queue)) {
			buf_node = list_first_entry(&audio->free_out_queue,
						    struct audio_mvs_buf_node,
						    list);
			list_del(&buf_node->list);

			memcpy(&buf_node->frame.voc_pkt[0], args, pkt_len);
			buf_node->frame.len = pkt_len;
			pkt_len = ALIGN(pkt_len, 4);
			args = args + pkt_len/4;

			pr_debug("%s: UL valid_ptr 0x%x\n",
				 __func__, be32_to_cpu(*args));
			args++;

			frame_mode = be32_to_cpu(*args);
			pr_debug("%s: UL frame_mode %d\n",
				 __func__, frame_mode);
			args++;

			pr_debug("%s: UL frame_mode %d\n",
				 __func__, be32_to_cpu(*args));
			args++;

			pr_debug("%s: UL frame_mode %d\n",
				 __func__, be32_to_cpu(*args));
			args++;

			pr_debug("%s: UL mvs_mode %d\n",
				 __func__, be32_to_cpu(*args));
			args++;

			pr_debug("%s: UL buf_free_cnt %d\n",
				 __func__, be32_to_cpu(*args));
			args++;

			if (frame_mode == MVS_FRAME_MODE_AMR_UL) {
				/* Extract AMR frame type. */
				buf_node->frame.frame_type = be32_to_cpu(*args);

				pr_debug("%s: UL AMR frame_type %d\n",
					 __func__, be32_to_cpu(*args));
			} else if ((frame_mode == MVS_FRAME_MODE_PCM_UL) ||
				   (frame_mode == MVS_FRAME_MODE_VOC_TX)) {
				/* PCM and EVRC don't have frame_type */
				buf_node->frame.frame_type = 0;
			} else {
				pr_err("%s: UL Unknown frame mode %d\n",
				       __func__, frame_mode);
			}

			list_add_tail(&buf_node->list, &audio->out_queue);
		} else {
			pr_err("%s: UL data dropped, read is slow\n", __func__);
		}

		mutex_unlock(&audio->out_lock);

		wake_up(&audio->out_wait);

		/* Send UL message accept to modem. */
		memset(&ul_reply, 0, sizeof(ul_reply));
		ul_reply.reply_hdr.xid = cpu_to_be32(xid);
		ul_reply.reply_hdr.type = cpu_to_be32(RPC_TYPE_REPLY);
		ul_reply.reply_hdr.reply_stat = cpu_to_be32(
			RPCMSG_REPLYSTAT_ACCEPTED);

		ul_reply.reply_hdr.data.acc_hdr.accept_stat = cpu_to_be32(
			RPC_ACCEPTSTAT_SUCCESS);
		ul_reply.reply_hdr.data.acc_hdr.verf_flavor = 0;
		ul_reply.reply_hdr.data.acc_hdr.verf_length = 0;

		ul_reply.valid_pkt_status_ptr = cpu_to_be32(0x00000001);
		ul_reply.pkt_status = cpu_to_be32(0x00000000);

		rc = msm_rpc_write(audio->rpc_endpt,
				   &ul_reply,
				   sizeof(ul_reply));

		if (rc < 0)
			pr_err("%s: RPC write for UL response failed %d\n",
			       __func__, rc);

		break;
	}
Beispiel #20
0
int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
{
	struct usbhsg_gpriv *gpriv;
	struct usbhsg_uep *uep;
	struct device *dev = usbhs_priv_to_dev(priv);
	struct renesas_usbhs_driver_pipe_config *pipe_configs =
					usbhs_get_dparam(priv, pipe_configs);
	int pipe_size = usbhs_get_dparam(priv, pipe_size);
	int i;
	int ret;

	gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL);
	if (!gpriv) {
		dev_err(dev, "Could not allocate gadget priv\n");
		return -ENOMEM;
	}

	uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL);
	if (!uep) {
		dev_err(dev, "Could not allocate ep\n");
		ret = -ENOMEM;
		goto usbhs_mod_gadget_probe_err_gpriv;
	}

	gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
	dev_info(dev, "%stransceiver found\n",
		 gpriv->transceiver ? "" : "no ");

	/*
	 * CAUTION
	 *
	 * There is no guarantee that it is possible to access usb module here.
	 * Don't accesses to it.
	 * The accesse will be enable after "usbhsg_start"
	 */

	/*
	 * register itself
	 */
	usbhs_mod_register(priv, &gpriv->mod, USBHS_GADGET);

	/* init gpriv */
	gpriv->mod.name		= "gadget";
	gpriv->mod.start	= usbhsg_start;
	gpriv->mod.stop		= usbhsg_stop;
	gpriv->uep		= uep;
	gpriv->uep_size		= pipe_size;
	usbhsg_status_init(gpriv);

	/*
	 * init gadget
	 */
	gpriv->gadget.dev.parent	= dev;
	gpriv->gadget.name		= "renesas_usbhs_udc";
	gpriv->gadget.ops		= &usbhsg_gadget_ops;
	gpriv->gadget.max_speed		= USB_SPEED_HIGH;

	INIT_LIST_HEAD(&gpriv->gadget.ep_list);

	/*
	 * init usb_ep
	 */
	usbhsg_for_each_uep_with_dcp(uep, gpriv, i) {
		uep->gpriv	= gpriv;
		uep->pipe	= NULL;
		snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i);

		uep->ep.name		= uep->ep_name;
		uep->ep.ops		= &usbhsg_ep_ops;
		INIT_LIST_HEAD(&uep->ep.ep_list);

		/* init DCP */
		if (usbhsg_is_dcp(uep)) {
			gpriv->gadget.ep0 = &uep->ep;
			usb_ep_set_maxpacket_limit(&uep->ep, 64);
			uep->ep.caps.type_control = true;
		} else {
			/* init normal pipe */
			if (pipe_configs[i].type == USB_ENDPOINT_XFER_ISOC)
				uep->ep.caps.type_iso = true;
			if (pipe_configs[i].type == USB_ENDPOINT_XFER_BULK)
				uep->ep.caps.type_bulk = true;
			if (pipe_configs[i].type == USB_ENDPOINT_XFER_INT)
				uep->ep.caps.type_int = true;
			usb_ep_set_maxpacket_limit(&uep->ep,
						   pipe_configs[i].bufsize);
			list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list);
		}
		uep->ep.caps.dir_in = true;
		uep->ep.caps.dir_out = true;
	}
Beispiel #21
0
static int __init sdma_probe(struct platform_device *pdev)
{
	int ret;
	int irq;
	struct resource *iores;
	struct sdma_platform_data *pdata = pdev->dev.platform_data;
	int i;
	struct sdma_engine *sdma;

	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
	if (!sdma)
		return -ENOMEM;

	sdma->dev = &pdev->dev;

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (!iores || irq < 0 || !pdata) {
		ret = -EINVAL;
		goto err_irq;
	}

	if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
		ret = -EBUSY;
		goto err_request_region;
	}

	sdma->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(sdma->clk)) {
		ret = PTR_ERR(sdma->clk);
		goto err_clk;
	}

	sdma->regs = ioremap(iores->start, resource_size(iores));
	if (!sdma->regs) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	ret = request_irq(irq, sdma_int_handler, 0, "xillybus_sdma", sdma);
	if (ret)
		goto err_request_irq;

	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
	if (!sdma->script_addrs)
		goto err_alloc;

	sdma->version = pdata->sdma_version;

	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);

	INIT_LIST_HEAD(&sdma->dma_device.channels);
	/* Initialize channel parameters */
	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
		struct sdma_channel *sdmac = &sdma->channel[i];

		sdmac->sdma = sdma;
		spin_lock_init(&sdmac->lock);

		sdmac->chan.device = &sdma->dma_device;
		sdmac->channel = i;

		/*
		 * Add the channel to the DMAC list. Do not add channel 0 though
		 * because we need it internally in the SDMA driver. This also means
		 * that channel 0 in dmaengine counting matches sdma channel 1.
		 */
		if (i)
			list_add_tail(&sdmac->chan.device_node,
				      &sdma->dma_device.channels);
	}

	ret = sdma_init(sdma);
	if (ret)
		goto err_init;

	sdma->dma_device.dev = &pdev->dev;

	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
	sdma->dma_device.device_tx_status = sdma_tx_status;
	sdma->dma_device.device_control = sdma_control;
	sdma->dma_device.device_issue_pending = sdma_issue_pending;
	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
	dma_set_max_seg_size(sdma->dma_device.dev, 65535);

	xillybus_handler = NULL; /* Just to be sure */
	xillybus_dev = sdma->dev;

	if ((ret = setup_fpga_interface(sdma))) {
		printk(KERN_ERR THIS "setup_fpga_interface() failed!\n");
		goto err_init;
	}

	return 0;
	
err_init:
	kfree(sdma->script_addrs);
err_alloc:
	free_irq(irq, sdma);
err_request_irq:
	iounmap(sdma->regs);
err_ioremap:
	clk_put(sdma->clk);
err_clk:
	release_mem_region(iores->start, resource_size(iores));
err_request_region:
err_irq:
	kfree(sdma);
	return ret;
}
Beispiel #22
0
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
		    struct drm_i915_file_private *file_priv)
{
	struct i915_gem_context *ctx;
	int ret;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);

	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

	kref_init(&ctx->ref);
	list_add_tail(&ctx->link, &dev_priv->context_list);
	ctx->i915 = dev_priv;

	if (dev_priv->hw_context_size) {
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

		obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
			goto err_out;
		}

		vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
	}

	/* Default context will never have a file_priv */
	ret = DEFAULT_CONTEXT_HANDLE;
	if (file_priv) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
		if (ret < 0)
			goto err_out;
	}
	ctx->user_handle = ret;

	ctx->file_priv = file_priv;
	if (file_priv) {
		ctx->pid = get_task_pid(current, PIDTYPE_PID);
		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
				      current->comm,
				      pid_nr(ctx->pid),
				      ctx->user_handle);
		if (!ctx->name) {
			ret = -ENOMEM;
			goto err_pid;
		}
	}

	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);

	i915_gem_context_set_bannable(ctx);
	ctx->ring_size = 4 * PAGE_SIZE;
	ctx->desc_template =
		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);

	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
	 * present or not in use we still need a small bias as ring wraparound
	 * at offset 0 sometimes hangs. No idea why.
	 */
	if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
	else
		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;

	return ctx;

err_pid:
	put_pid(ctx->pid);
	idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
	context_close(ctx);
	return ERR_PTR(ret);
}
/**
 * led_classdev_register - register a new object of led_classdev class.
 * @parent: The device to register.
 * @led_cdev: the led_classdev structure for this device.
 */
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
	int rc;

	led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
				      "%s", led_cdev->name);
	if (IS_ERR(led_cdev->dev))
		return PTR_ERR(led_cdev->dev);

	/* register the attributes */
	rc = device_create_file(led_cdev->dev, &led_class_attrs[2]);
	if (rc)
		goto err_out;

#ifdef CONFIG_LEDS_TRIGGERS
	init_rwsem(&led_cdev->trigger_lock);
#endif
	/* add to the list of leds */
	down_write(&leds_list_lock);
	list_add_tail(&led_cdev->node, &leds_list);
	up_write(&leds_list_lock);

	if (!led_cdev->max_brightness)
		led_cdev->max_brightness = LED_FULL;

	rc = device_create_file(led_cdev->dev, &led_class_attrs[3]);
	if (rc)
		goto err_out_attr_max;

	led_update_brightness(led_cdev);

#ifdef CONFIG_LEDS_TRIGGERS
#ifndef SUPPORT_LCD_ACL_CTL
	rc = device_create_file(led_cdev->dev, &led_class_attrs[4]);
#else
	rc = device_create_file(led_cdev->dev, &led_class_attrs[5]);
#endif
	if (rc)
		goto err_out_led_list;

	led_trigger_set_default(led_cdev);
#endif

    #if 1  // Archer custom feature
	/* register the attributes */
	rc = device_create_file(led_cdev->dev, &led_class_attrs[1]);
	if (rc)
		goto err_out;
    
	led_update_lcd_gamma(led_cdev);


#ifdef SUPPORT_LCD_ACL_CTL
	/* register the attributes */
	rc = device_create_file(led_cdev->dev, &led_class_attrs[4]);
	if (rc)
		goto err_out;
    
	lcd_update_ACL_state(led_cdev);
#endif

	rc = device_create_file(led_cdev->dev, &led_class_attrs[0]);
	if (rc)
		goto err_out;
    
	led_update_flashlight(led_cdev);
    #endif 
	printk(KERN_INFO "Registered led device: %s\n",	led_cdev->name);

	return 0;

#ifdef CONFIG_LEDS_TRIGGERS
err_out_led_list:
	device_remove_file(led_cdev->dev, &led_class_attrs[3]);

#endif
err_out_attr_max:
	device_remove_file(led_cdev->dev, &led_class_attrs[2]);
	device_remove_file(led_cdev->dev, &led_class_attrs[1]);
#ifdef SUPPORT_LCD_ACL_CTL	
	device_remove_file(led_cdev->dev, &led_class_attrs[4]);
#endif	
	list_del(&led_cdev->node);
err_out:
	device_unregister(led_cdev->dev);
	return rc;
}
Beispiel #24
0
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
{
	struct thermal_hwmon_device *hwmon;
	struct thermal_hwmon_temp *temp;
	int new_hwmon_device = 1;
	int result;

	hwmon = thermal_hwmon_lookup_by_type(tz);
	if (hwmon) {
		new_hwmon_device = 0;
		goto register_sys_interface;
	}

	hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
	if (!hwmon)
		return -ENOMEM;

	INIT_LIST_HEAD(&hwmon->tz_list);
	strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
	hwmon->device = hwmon_device_register(NULL);
	if (IS_ERR(hwmon->device)) {
		result = PTR_ERR(hwmon->device);
		goto free_mem;
	}
	dev_set_drvdata(hwmon->device, hwmon);
	result = device_create_file(hwmon->device, &dev_attr_name);
	if (result)
		goto free_mem;

 register_sys_interface:
	temp = kzalloc(sizeof(*temp), GFP_KERNEL);
	if (!temp) {
		result = -ENOMEM;
		goto unregister_name;
	}

	temp->tz = tz;
	hwmon->count++;

	snprintf(temp->temp_input.name, sizeof(temp->temp_input.name),
		 "temp%d_input", hwmon->count);
	temp->temp_input.attr.attr.name = temp->temp_input.name;
	temp->temp_input.attr.attr.mode = 0444;
	temp->temp_input.attr.show = temp_input_show;
	sysfs_attr_init(&temp->temp_input.attr.attr);
	result = device_create_file(hwmon->device, &temp->temp_input.attr);
	if (result)
		goto free_temp_mem;

	if (thermal_zone_crit_temp_valid(tz)) {
		snprintf(temp->temp_crit.name,
				sizeof(temp->temp_crit.name),
				"temp%d_crit", hwmon->count);
		temp->temp_crit.attr.attr.name = temp->temp_crit.name;
		temp->temp_crit.attr.attr.mode = 0444;
		temp->temp_crit.attr.show = temp_crit_show;
		sysfs_attr_init(&temp->temp_crit.attr.attr);
		result = device_create_file(hwmon->device,
					    &temp->temp_crit.attr);
		if (result)
			goto unregister_input;
	}

	mutex_lock(&thermal_hwmon_list_lock);
	if (new_hwmon_device)
		list_add_tail(&hwmon->node, &thermal_hwmon_list);
	list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
	mutex_unlock(&thermal_hwmon_list_lock);

	return 0;

 unregister_input:
	device_remove_file(hwmon->device, &temp->temp_input.attr);
 free_temp_mem:
	kfree(temp);
 unregister_name:
	if (new_hwmon_device) {
		device_remove_file(hwmon->device, &dev_attr_name);
		hwmon_device_unregister(hwmon->device);
	}
 free_mem:
	if (new_hwmon_device)
		kfree(hwmon);

	return result;
}
Beispiel #25
0
long do_msgsnd(int msqid, long mtype, void __user *mtext,
		size_t msgsz, int msgflg)
{
	struct msg_queue *msq;
	struct msg_msg *msg;
	int err;
	struct ipc_namespace *ns;

	ns = current->nsproxy->ipc_ns;

	if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
		return -EINVAL;
	if (mtype < 1)
		return -EINVAL;

	msg = load_msg(mtext, msgsz);
	if (IS_ERR(msg))
		return PTR_ERR(msg);

	msg->m_type = mtype;
	msg->m_ts = msgsz;

	msq = msg_lock_check(ns, msqid);
	if (IS_ERR(msq)) {
		err = PTR_ERR(msq);
		goto out_free;
	}

	for (;;) {
		struct msg_sender s;

		err = -EACCES;
		if (ipcperms(&msq->q_perm, S_IWUGO))
			goto out_unlock_free;

		err = security_msg_queue_msgsnd(msq, msg, msgflg);
		if (err)
			goto out_unlock_free;

		if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
				1 + msq->q_qnum <= msq->q_qbytes) {
			break;
		}

		/* queue full, wait: */
		if (msgflg & IPC_NOWAIT) {
			err = -EAGAIN;
			goto out_unlock_free;
		}
		ss_add(msq, &s);
		ipc_rcu_getref(msq);
		msg_unlock(msq);
		schedule();

		ipc_lock_by_ptr(&msq->q_perm);
		ipc_rcu_putref(msq);
		if (msq->q_perm.deleted) {
			err = -EIDRM;
			goto out_unlock_free;
		}
		ss_del(&s);

		if (signal_pending(current)) {
			err = -ERESTARTNOHAND;
			goto out_unlock_free;
		}
	}

	msq->q_lspid = task_tgid_vnr(current);
	msq->q_stime = get_seconds();

	if (!pipelined_send(msq, msg)) {
		/* noone is waiting for this message, enqueue it */
		list_add_tail(&msg->m_list, &msq->q_messages);
		msq->q_cbytes += msgsz;
		msq->q_qnum++;
		atomic_add(msgsz, &ns->msg_bytes);
		atomic_inc(&ns->msg_hdrs);
	}

	err = 0;
	msg = NULL;

out_unlock_free:
	msg_unlock(msq);
out_free:
	if (msg != NULL)
		free_msg(msg);
	return err;
}
Beispiel #26
0
/* All actions that we need after sending hello on passive conn:
 * 1) Cope with 1st easy case: conn is already linked to a peer
 * 2) Cope with 2nd easy case: remove zombie conn
  * 3) Resolve race:
 *    a) find the peer
 *    b) link the conn to the peer if conn[idx] is empty
 *    c) if the conn[idx] isn't empty and is in READY state,
 *       remove the conn as duplicated
 *    d) if the conn[idx] isn't empty and isn't in READY state,
 *       override conn[idx] with the conn
 */
int
usocklnd_passiveconn_hellosent(usock_conn_t *conn)
{
	usock_conn_t    *conn2;
	usock_peer_t    *peer;
	struct list_head tx_list;
	struct list_head zcack_list;
	int              idx;
	int              rc = 0;

	/* almost nothing to do if conn is already linked to peer hash table */
	if (conn->uc_peer != NULL)
		goto passive_hellosent_done;

	/* conn->uc_peer == NULL, so the conn isn't accessible via
	 * peer hash list, so nobody can touch the conn but us */

	if (conn->uc_ni == NULL) /* remove zombie conn */
		goto passive_hellosent_connkill;

	/* all code below is race resolution, because normally
	 * passive conn is linked to peer just after receiving hello */
	CFS_INIT_LIST_HEAD (&tx_list);
	CFS_INIT_LIST_HEAD (&zcack_list);

	/* conn is passive and isn't linked to any peer,
	   so its tx and zc_ack lists have to be empty */
	LASSERT (list_empty(&conn->uc_tx_list) &&
		 list_empty(&conn->uc_zcack_list) &&
		 conn->uc_sending == 0);

	rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
	if (rc)
		return rc;

	idx = usocklnd_type2idx(conn->uc_type);

	/* try to link conn to peer */
	pthread_mutex_lock(&peer->up_lock);
	if (peer->up_conns[idx] == NULL) {
		usocklnd_link_conn_to_peer(conn, peer, idx);
		usocklnd_conn_addref(conn);
		conn->uc_peer = peer;
		usocklnd_peer_addref(peer);
	} else {
		conn2 = peer->up_conns[idx];
		pthread_mutex_lock(&conn2->uc_lock);

		if (conn2->uc_state == UC_READY) {
			/* conn2 is in READY state, so conn is "duplicated" */
			pthread_mutex_unlock(&conn2->uc_lock);
			pthread_mutex_unlock(&peer->up_lock);
			usocklnd_peer_decref(peer);
			usocklnd_conn_kill(conn2);
			goto passive_hellosent_connkill;
		}

		/* uc_state != UC_READY => switch conn and conn2 */
		/* Relink txs and zc_acks from conn2 to conn.
		 * We're sure that nobody but us can access to conn,
		 * nevertheless we use mutex (if we're wrong yet,
		 * deadlock is easy to see that corrupted list */
		list_add(&tx_list, &conn2->uc_tx_list);
		list_del_init(&conn2->uc_tx_list);
		list_add(&zcack_list, &conn2->uc_zcack_list);
		list_del_init(&conn2->uc_zcack_list);

		pthread_mutex_lock(&conn->uc_lock);
		list_add_tail(&conn->uc_tx_list, &tx_list);
		list_del_init(&tx_list);
		list_add_tail(&conn->uc_zcack_list, &zcack_list);
		list_del_init(&zcack_list);
		conn->uc_peer = peer;
		pthread_mutex_unlock(&conn->uc_lock);

		conn2->uc_peer = NULL; /* make conn2 zombie */
		pthread_mutex_unlock(&conn2->uc_lock);
		usocklnd_conn_decref(conn2);

		usocklnd_link_conn_to_peer(conn, peer, idx);
		usocklnd_conn_addref(conn);
		conn->uc_peer = peer;
	}

	lnet_ni_decref(conn->uc_ni);
	conn->uc_ni = NULL;
	pthread_mutex_unlock(&peer->up_lock);
	usocklnd_peer_decref(peer);

  passive_hellosent_done:
	/* safely transit to UC_READY state */
	/* rc == 0 */
	pthread_mutex_lock(&conn->uc_lock);
	if (conn->uc_state != UC_DEAD) {
		usocklnd_rx_ksmhdr_state_transition(conn);

		/* we're ready to recive incoming packets and maybe
		   already have smth. to transmit */
		LASSERT (conn->uc_sending == 0);
		if ( list_empty(&conn->uc_tx_list) &&
		     list_empty(&conn->uc_zcack_list) ) {
			conn->uc_tx_flag = 0;
			rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
						 POLLIN);
		} else {
			conn->uc_tx_deadline =
				cfs_time_shift(usock_tuns.ut_timeout);
			conn->uc_tx_flag = 1;
			rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
						      POLLIN | POLLOUT);
		}

		if (rc == 0)
			conn->uc_state = UC_READY;
	}
	pthread_mutex_unlock(&conn->uc_lock);
	return rc;

  passive_hellosent_connkill:
	usocklnd_conn_kill(conn);
	return 0;
}
Beispiel #27
0
static void data_bridge_process_rx(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct timestamp_info	*info;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	struct bridge		*brdg = dev->brdg;
#if !defined(CONFIG_MDM_HSIC_PM)
	/* if the bridge is open or not, resume to consume mdm request
	 * because this link is not dead, it's alive
	 */
	if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
		return;
#endif

	while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
#ifdef CONFIG_MDM_HSIC_PM
		/* if the bridge is open or not, resume to consume mdm request
		 * because this link is not dead, it's alive
		 */
		if (!brdg) {
			print_hex_dump(KERN_INFO, "dun:", 0, 1, 1, skb->data,
							skb->len, false);
			dev_kfree_skb_any(skb);
			continue;
		}
#endif
		dev->to_host++;
		info = (struct timestamp_info *)skb->cb;
		info->rx_done_sent = get_timestamp();
		/* hand off sk_buff to client,they'll need to free it */
		retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
		if (retval == -ENOTCONN || retval == -EINVAL) {
			return;
		} else if (retval == -EBUSY) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while (!list_empty(&dev->rx_idle)) {
		if (dev->rx_done.qlen > stop_submit_urb_limit)
			break;

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval) {
			list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
			break;
		}
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}
/**
 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
 *
 * When moving to rx_free an SKB is allocated for the slot.
 *
 * Also restock the Rx queue via iwl_rx_queue_restock.
 * This is called as a scheduled work item (except for during initialization)
 */
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

		if (hw_params(trans).rx_page_order > 0)
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
		page = alloc_pages(gfp_mask,
				  hw_params(trans).rx_page_order);
		if (!page) {
			if (net_ratelimit())
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
					   "order: %d\n",
					   hw_params(trans).rx_page_order);

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, hw_params(trans).rx_page_order);
			return;
		}
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
		rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
				PAGE_SIZE << hw_params(trans).rx_page_order,
				DMA_FROM_DEVICE);
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}
Beispiel #29
0
static void insert_dt_entry_in_queue(struct dt_entry_node *dt_list, struct dt_entry_node *dt_node_member)
{
	list_add_tail(&dt_list->node, &dt_node_member->node);
}
static int __devinit bma250_probe(struct i2c_client *ic_dev,
		const struct i2c_device_id *id)
{
	struct driver_data *dd;
	int                 rc;
	struct bma250_platform_data *pdata = ic_dev->dev.platform_data;

	if (!pdata || !pdata->hw_config)
		return -ENODEV;

	if (pdata->bypass_state && pdata->read_axis_data
		&& pdata->check_sleep_status && pdata->vote_sleep_status)
		slave_hw = TRUE;
	else if (!pdata->bypass_state && !pdata->read_axis_data
		&& !pdata->check_sleep_status && !pdata->vote_sleep_status)
		slave_hw = FALSE;
	else
		return -ENODEV;

	dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
	if (!dd) {
		rc = -ENOMEM;
		goto probe_exit;
	}

	mutex_lock(&bma250_dd_lock);
	list_add_tail(&dd->next_dd, &dd_list);
	mutex_unlock(&bma250_dd_lock);
	dd->ic_dev = ic_dev;

	INIT_DELAYED_WORK(&dd->work_data, bma250_work_f);

	dd->pdata = pdata;

	/* initial configuration */
	dd->rate = pdata->rate;
	dd->delay_jiffies = msecs_to_jiffies(dd->rate);
	dd->bw_sel = pdata->reg->bw_sel;
	dd->range = pdata->reg->range;

	rc = bma250_power_up(dd);
	if (rc)
		goto probe_err_cfg;
	rc = bma250_hwid(dd);
	bma250_power_down(dd);
	if (rc)
		goto probe_err_cfg;

	bma250_create_dbfs_entry(dd);
	bma250_ic_set_data(ic_dev, dd);

	dd->ip_dev = input_allocate_device();
	if (!dd->ip_dev) {
		rc = -ENOMEM;
		goto probe_err_reg;
	}
	input_set_drvdata(dd->ip_dev, dd);
	dd->ip_dev->open       = bma250_open;
	dd->ip_dev->close      = bma250_release;
	dd->ip_dev->name       = BMA250_NAME;
	dd->ip_dev->id.vendor  = BMA250_VENDORID;
	dd->ip_dev->id.product = 1;
	dd->ip_dev->id.version = 1;
	__set_bit(EV_ABS,       dd->ip_dev->evbit);
	__set_bit(ABS_X,        dd->ip_dev->absbit);
	__set_bit(ABS_Y,        dd->ip_dev->absbit);
	__set_bit(ABS_Z,        dd->ip_dev->absbit);
	__set_bit(ABS_MISC,     dd->ip_dev->absbit);
	input_set_abs_params(dd->ip_dev, ABS_X, -4096, 4095, 0, 0);
	input_set_abs_params(dd->ip_dev, ABS_Y, -4096, 4095, 0, 0);
	input_set_abs_params(dd->ip_dev, ABS_Z, -4096, 4095, 0, 0);
	input_set_abs_params(dd->ip_dev, ABS_MISC, -80, 175, 0, 0);

	rc = input_register_device(dd->ip_dev);
	if (rc) {
		input_free_device(dd->ip_dev);
		goto probe_err_reg;
	}

	rc = add_sysfs_interfaces(&dd->ip_dev->dev);
	if (rc)
		goto probe_err_sysfs;

	return rc;

probe_err_sysfs:
	input_unregister_device(dd->ip_dev);
probe_err_reg:
	bma250_remove_dbfs_entry(dd);
	bma250_ic_set_data(ic_dev, NULL);
probe_err_cfg:
	mutex_lock(&bma250_dd_lock);
	list_del(&dd->next_dd);
	mutex_unlock(&bma250_dd_lock);
	kfree(dd);
probe_exit:
	return rc;
}