Exemple #1
0
/*
 * This is called to asynchronously write the inode associated with this
 * inode log item out to disk. The inode will already have been locked by
 * a successful call to xfs_inode_item_trylock().
 */
STATIC void
xfs_inode_item_push(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
	ASSERT(!completion_done(&ip->i_flush));

	/*
	 * Since we were able to lock the inode's flush lock and
	 * we found it on the AIL, the inode must be dirty.  This
	 * is because the inode is removed from the AIL while still
	 * holding the flush lock in xfs_iflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the inode.
	 */
	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
	       iip->ili_format.ilf_fields != 0);

	/*
	 * Push the inode to it's backing buffer. This will not remove the
	 * inode from the AIL - a further push will be required to trigger a
	 * buffer push. However, this allows all the dirty inodes to be pushed
	 * to the buffer before it is pushed to disk. The buffer IO completion
	 * will pull the inode from the AIL, mark it clean and unlock the flush
	 * lock.
	 */
	(void) xfs_iflush(ip, SYNC_TRYLOCK);
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
}
Exemple #2
0
static ssize_t core_info_read(struct file *file, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct msm_vidc_core *core = file->private_data;
	int i = 0;
	if (!core) {
		dprintk(VIDC_ERR, "Invalid params, core: %p\n", core);
		return 0;
	}
	INIT_DBG_BUF(dbg_buf);
	write_str(&dbg_buf, "===============================\n");
	write_str(&dbg_buf, "CORE %d: 0x%p\n", core->id, core);
	write_str(&dbg_buf, "===============================\n");
	write_str(&dbg_buf, "state: %d\n", core->state);
	write_str(&dbg_buf, "base addr: 0x%x\n", core->base_addr);
	write_str(&dbg_buf, "register_base: 0x%x\n", core->register_base);
	write_str(&dbg_buf, "register_size: %u\n", core->register_size);
	write_str(&dbg_buf, "irq: %u\n", core->irq);
	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
		write_str(&dbg_buf, "completions[%d]: %s\n", i,
			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
			"pending" : "done");
	}
	return simple_read_from_buffer(buf, count, ppos,
			dbg_buf.ptr, dbg_buf.filled_size);
}
/*
 * This is called to asynchronously write the inode associated with this
 * inode log item out to disk. The inode will already have been locked by
 * a successful call to xfs_inode_item_trylock().
 */
STATIC void
xfs_inode_item_push(
	xfs_inode_log_item_t	*iip)
{
	xfs_inode_t	*ip;

	ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
	ASSERT(!completion_done(&ip->i_flush));
	/*
	 * Since we were able to lock the inode's flush lock and
	 * we found it on the AIL, the inode must be dirty.  This
	 * is because the inode is removed from the AIL while still
	 * holding the flush lock in xfs_iflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the inode.
	 */
	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
	       iip->ili_format.ilf_fields != 0);

	/*
	 * Write out the inode.  The completion routine ('iflush_done') will
	 * pull it from the AIL, mark it clean, unlock the flush lock.
	 */
	(void) xfs_iflush(ip, XFS_IFLUSH_ASYNC);
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	return;
}
Exemple #4
0
/*
 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
 * the dquot is locked by us, but the flush lock isn't. So, here we are
 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
 * If so, we want to push it out to help us take this item off the AIL as soon
 * as possible.
 *
 * We must not be holding the AIL lock at this point. Calling incore() to
 * search the buffer cache can be a time consuming thing, and AIL lock is a
 * spinlock.
 */
STATIC void
xfs_qm_dquot_logitem_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);
	struct xfs_dquot	*dqp = qlip->qli_dquot;
	struct xfs_buf		*bp;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if (completion_done(&dqp->q_flush) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_dqunlock(dqp);
		return;
	}

	bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
			dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
	xfs_dqunlock(dqp);
	if (!bp)
		return;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	xfs_buf_relse(bp);
}
Exemple #5
0
/*
 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
 * failed to get the inode flush lock but did get the inode locked SHARED.
 * Here we're trying to see if the inode buffer is incore, and if so whether it's
 * marked delayed write. If that's the case, we'll promote it and that will
 * allow the caller to write the buffer by triggering the xfsbufd to run.
 */
STATIC bool
xfs_inode_item_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;
	struct xfs_buf		*bp;
	bool			ret = true;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
	 * If a flush is not in progress anymore, chances are that the
	 * inode was taken off the AIL. So, just get out.
	 */
	if (completion_done(&ip->i_flush) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return true;
	}

	bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
			iip->ili_format.ilf_len, XBF_TRYLOCK);

	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	if (!bp)
		return true;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	if (xfs_buf_ispinned(bp))
		ret = false;
	xfs_buf_relse(bp);
	return ret;
}
Exemple #6
0
static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
	struct sk_buff *skb)
{
	struct fip_header *fiph;
	struct fip_desc *desc;
	u16 vid = 0;
	ssize_t rlen;
	size_t dlen;

	fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2);

	rlen = ntohs(fiph->fip_dl_len) * 4;
	desc = (struct fip_desc *)(fiph + 1);
	while (rlen > 0) {
		dlen = desc->fip_dlen * FIP_BPW;
		switch (desc->fip_dtype) {
		case FIP_DT_VLAN:
			vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
			break;
		}
		desc = (struct fip_desc *)((char *)desc + dlen);
		rlen -= dlen;
	}

	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
		   "vid=0x%x.\n", vid);

	if (vid > 0 && qedf->vlan_id != vid) {
		qedf_set_vlan_id(qedf, vid);

		/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
		if (!completion_done(&qedf->fipvlan_compl))
			complete(&qedf->fipvlan_compl);
	}
}
Exemple #7
0
/*
 * Given the logitem, this writes the corresponding dquot entry to disk
 * asynchronously. This is called with the dquot entry securely locked;
 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
 * at the end.
 */
STATIC void
xfs_qm_dquot_logitem_push(
	struct xfs_log_item	*lip)
{
	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
	int			error;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	ASSERT(!completion_done(&dqp->q_flush));

	/*
	 * Since we were able to lock the dquot's flush lock and
	 * we found it on the AIL, the dquot must be dirty.  This
	 * is because the dquot is removed from the AIL while still
	 * holding the flush lock in xfs_dqflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the dquot.
	 */
	error = xfs_qm_dqflush(dqp, 0);
	if (error)
		xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
			"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
			error, dqp);
	xfs_dqunlock(dqp);
}
Exemple #8
0
static ssize_t ppi_chr_read(struct file* filp, char __user* buffer, size_t count, loff_t* offset) {
    if(bfin_read_PPI_STATUS() != 0) {
        printk(KERN_WARNING DRIVER_NAME ": PPI error. PPI_STATUS (%d)\n", bfin_read_PPI_STATUS());
        bfin_write_PPI_STATUS(0);
    }

    if(sizeof(current_buffer_pointer) != count) {
        return -EINVAL;
    }

    /* Wait for buffer to fill and pointer to be set */
    if(wait_for_completion_interruptible(&buffer_ready)) {
        return -EINTR;
    }

    /* Check for backlog */
    if(completion_done(&buffer_ready)) {
        printk(KERN_WARNING DRIVER_NAME ": Missed data packet!\n");
    }

    /* Copy value of the pointer to the just filled buffer to the user buffer */
    if(copy_to_user(buffer, &current_buffer_pointer, count)) {
        return -EFAULT;
    }

    /* Reset the completion flag so completions don't pile up */
    INIT_COMPLETION(buffer_ready);

    return 0;
}
Exemple #9
0
static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
{
	unsigned long flags;
	struct scpi_xfer *t, *match = NULL;

	spin_lock_irqsave(&ch->rx_lock, flags);
	if (list_empty(&ch->rx_pending)) {
		spin_unlock_irqrestore(&ch->rx_lock, flags);
		return;
	}

	list_for_each_entry(t, &ch->rx_pending, node)
		if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
			list_del(&t->node);
			match = t;
			break;
		}
	/* check if wait_for_completion is in progress or timed-out */
	if (match && !completion_done(&match->done)) {
		struct scpi_shared_mem *mem = ch->rx_payload;

		match->status = le32_to_cpu(mem->status);
		memcpy_fromio(match->rx_buf, mem->payload, CMD_SIZE(cmd));
		complete(&match->done);
	}
	spin_unlock_irqrestore(&ch->rx_lock, flags);
}
static inline bool pending_buffer_requests(struct audio_stream *stream)
{
	int i;
	for (i = 0; i < stream->num_bufs; i++)
		if (!completion_done(&stream->comp[i]))
			return true;
	return false;
}
int sync_wait_on_multiple_events(struct sync_object **events,
				     unsigned count, unsigned timeout,
				     unsigned *index)
{
	unsigned i;
	int status = -EPERM;
	struct completion m_comp;

	init_completion(&m_comp);

	if (SYNC_INFINITE == timeout)
		timeout = MAX_SCHEDULE_TIMEOUT;

	spin_lock_bh(&sync_lock);
	for (i = 0; i < count; i++) {
		if (completion_done(&events[i]->comp)) {
			INIT_COMPLETION(events[i]->comp);
			*index = i;
			spin_unlock_bh(&sync_lock);
			status = 0;
			goto func_end;
		}
	}

	for (i = 0; i < count; i++)
		events[i]->multi_comp = &m_comp;

	spin_unlock_bh(&sync_lock);

	if (!wait_for_completion_interruptible_timeout(&m_comp,
					msecs_to_jiffies(timeout)))
		status = -ETIME;

	spin_lock_bh(&sync_lock);
	for (i = 0; i < count; i++) {
		if (completion_done(&events[i]->comp)) {
			INIT_COMPLETION(events[i]->comp);
			*index = i;
			status = 0;
		}
		events[i]->multi_comp = NULL;
	}
	spin_unlock_bh(&sync_lock);
func_end:
	return status;
}
Exemple #12
0
static UBYTE IicPortBusy(UBYTE Port)
{
	UBYTE Result = 0;

	if (!completion_done(&IicCtrl[Port].message_completion))
		Result = 1;

	return Result;
}
Exemple #13
0
void
xfs_inode_free(
	struct xfs_inode	*ip)
{
	switch (ip->i_d.di_mode & S_IFMT) {
	case S_IFREG:
	case S_IFDIR:
	case S_IFLNK:
		xfs_idestroy_fork(ip, XFS_DATA_FORK);
		break;
	}

	if (ip->i_afp)
		xfs_idestroy_fork(ip, XFS_ATTR_FORK);

	if (ip->i_itemp) {
		/*
		 * Only if we are shutting down the fs will we see an
		 * inode still in the AIL. If it is there, we should remove
		 * it to prevent a use-after-free from occurring.
		 */
		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
		struct xfs_ail	*ailp = lip->li_ailp;

		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
				       XFS_FORCED_SHUTDOWN(ip->i_mount));
		if (lip->li_flags & XFS_LI_IN_AIL) {
			spin_lock(&ailp->xa_lock);
			if (lip->li_flags & XFS_LI_IN_AIL)
				xfs_trans_ail_delete(ailp, lip);
			else
				spin_unlock(&ailp->xa_lock);
		}
		xfs_inode_item_destroy(ip);
		ip->i_itemp = NULL;
	}

	/* asserts to verify all state is correct here */
	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));

	/*
	 * Because we use RCU freeing we need to ensure the inode always
	 * appears to be reclaimed with an invalid inode number when in the
	 * free state. The ip->i_flags_lock provides the barrier against lookup
	 * races.
	 */
	spin_lock(&ip->i_flags_lock);
	ip->i_flags = XFS_IRECLAIM;
	ip->i_ino = 0;
	spin_unlock(&ip->i_flags_lock);

	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
Exemple #14
0
static void bebob_remove(struct fw_unit *unit)
{
	struct snd_bebob *bebob = dev_get_drvdata(&unit->device);

	if (bebob == NULL)
		return;

	/* Awake bus-reset waiters. */
	if (!completion_done(&bebob->bus_reset))
		complete_all(&bebob->bus_reset);

	/* No need to wait for releasing card object in this context. */
	snd_card_free_when_closed(bebob->card);
}
Exemple #15
0
static void tegra_dc_continuous_irq(struct tegra_dc *dc, unsigned long status)
{
	/* Schedule any additional bottom-half vblank actvities. */
	if (status & V_BLANK_INT)
		queue_work(system_freezable_wq, &dc->vblank_work);

	if (status & FRAME_END_INT) {
		/* Mark the frame_end as complete. */
		if (!completion_done(&dc->frame_end_complete))
			complete(&dc->frame_end_complete);

		tegra_dc_trigger_windows(dc);
	}
}
Exemple #16
0
static int __test_mutex(unsigned int flags)
{
#define TIMEOUT (HZ / 16)
	struct test_mutex mtx;
	struct ww_acquire_ctx ctx;
	int ret;

	ww_mutex_init(&mtx.mutex, &ww_class);
	ww_acquire_init(&ctx, &ww_class);

	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
	init_completion(&mtx.ready);
	init_completion(&mtx.go);
	init_completion(&mtx.done);
	mtx.flags = flags;

	schedule_work(&mtx.work);

	wait_for_completion(&mtx.ready);
	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
	complete(&mtx.go);
	if (flags & TEST_MTX_SPIN) {
		unsigned long timeout = jiffies + TIMEOUT;

		ret = 0;
		do {
			if (completion_done(&mtx.done)) {
				ret = -EINVAL;
				break;
			}
			cond_resched();
		} while (time_before(jiffies, timeout));
	} else {
		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
	}
	ww_mutex_unlock(&mtx.mutex);
	ww_acquire_fini(&ctx);

	if (ret) {
		pr_err("%s(flags=%x): mutual exclusion failure\n",
		       __func__, flags);
		ret = -EINVAL;
	}

	flush_work(&mtx.work);
	destroy_work_on_stack(&mtx.work);
	return ret;
#undef TIMEOUT
}
/*
 * Input Capture method of measuring frame intervals. Not subject
 * to interrupt latency.
 */
static void fim_input_capture_handler(int channel, void *dev_id,
				      struct timespec *ts)
{
	struct imx_media_fim *fim = dev_id;
	unsigned long flags;

	spin_lock_irqsave(&fim->lock, flags);

	frame_interval_monitor(fim, ts);

	if (!completion_done(&fim->icap_first_event))
		complete(&fim->icap_first_event);

	spin_unlock_irqrestore(&fim->lock, flags);
}
int ev3_uart_set_mode(void *context, const u8 mode)
{
	struct tty_struct *tty = context;
	struct ev3_uart_port_data *port;
	const int data_size = 3;
	u8 data[data_size];
	int retries = 10;
	int ret;

	if (!tty)
		return -ENODEV;

	port = tty->disc_data;
	if (!port->synced || !port->info_done)
		return -ENODEV;
	if (mode >= port->sensor.num_modes)
		return -EINVAL;
	if (!completion_done(&port->set_mode_completion))
		return -EBUSY;

	data[0] = ev3_uart_set_msg_hdr(EV3_UART_MSG_TYPE_CMD,
					   data_size - 2,
					   EV3_UART_CMD_SELECT);
	data[1] = mode;
	data[2] = 0xFF ^ data[0] ^ data[1];

	port->new_mode = mode;
	reinit_completion(&port->set_mode_completion);
	while (retries--) {
		set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
		ret = tty->ops->write(tty, data, data_size);
		if (ret < 0)
			return ret;

		ret = wait_for_completion_timeout(&port->set_mode_completion,
						  msecs_to_jiffies(50));
		if (ret)
			break;
	}
	port->set_mode_completion.done++;
	if (!ret)
		return -ETIMEDOUT;

	port->requested_mode = mode;

	return 0;
}
static ssize_t inst_info_read(struct file *file, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct msm_vidc_inst *inst = file->private_data;
	int i, j;
	if (!inst) {
		dprintk(VIDC_ERR, "Invalid params, core: %p\n", inst);
		return 0;
	}
	INIT_DBG_BUF(dbg_buf);
	write_str(&dbg_buf, "===============================\n");
	write_str(&dbg_buf, "INSTANCE: 0x%p (%s)\n", inst,
		inst->session_type == MSM_VIDC_ENCODER ? "Encoder" : "Decoder");
	write_str(&dbg_buf, "===============================\n");
	write_str(&dbg_buf, "core: 0x%p\n", inst->core);
	write_str(&dbg_buf, "height: %d\n", inst->prop.height);
	write_str(&dbg_buf, "width: %d\n", inst->prop.width);
	write_str(&dbg_buf, "fps: %d\n", inst->prop.fps);
	write_str(&dbg_buf, "state: %d\n", inst->state);
	write_str(&dbg_buf, "-----------Formats-------------\n");
	for (i = 0; i < MAX_PORT_NUM; i++) {
		write_str(&dbg_buf, "capability: %s\n", i == OUTPUT_PORT ?
			"Output" : "Capture");
		write_str(&dbg_buf, "name : %s\n", inst->fmts[i]->name);
		write_str(&dbg_buf, "planes : %d\n", inst->fmts[i]->num_planes);
		write_str(
		&dbg_buf, "type: %s\n", inst->fmts[i]->type == OUTPUT_PORT ?
		"Output" : "Capture");
		for (j = 0; j < inst->fmts[i]->num_planes; j++)
			write_str(&dbg_buf, "size for plane %d: %u\n", j,
			inst->bufq[i].vb2_bufq.plane_sizes[j]);
	}
	write_str(&dbg_buf, "-------------------------------\n");
	for (i = SESSION_MSG_START; i < SESSION_MSG_END; i++) {
		write_str(&dbg_buf, "completions[%d]: %s\n", i,
		completion_done(&inst->completions[SESSION_MSG_INDEX(i)]) ?
		"pending" : "done");
	}
	write_str(&dbg_buf, "ETB Count: %d\n", inst->count.etb);
	write_str(&dbg_buf, "EBD Count: %d\n", inst->count.ebd);
	write_str(&dbg_buf, "FTB Count: %d\n", inst->count.ftb);
	write_str(&dbg_buf, "FBD Count: %d\n", inst->count.fbd);
	return simple_read_from_buffer(buf, count, ppos,
		dbg_buf.ptr, dbg_buf.filled_size);
}
Exemple #20
0
/*
 * Allocate and initialise an xfs_inode.
 */
STATIC struct xfs_inode *
xfs_inode_alloc(
	struct xfs_mount	*mp,
	xfs_ino_t		ino)
{
	struct xfs_inode	*ip;

	/*
	 * if this didn't occur in transactions, we could use
	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
	 * code up to do this anyway.
	 */
	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
	if (!ip)
		return NULL;
	if (inode_init_always(mp->m_super, VFS_I(ip))) {
		kmem_zone_free(xfs_inode_zone, ip);
		return NULL;
	}

	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));
	ASSERT(ip->i_ino == 0);

	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
			&xfs_iolock_active, "xfs_iolock_active");

	/* initialise the xfs inode */
	ip->i_ino = ino;
	ip->i_mount = mp;
	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
	ip->i_afp = NULL;
	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
	ip->i_flags = 0;
	ip->i_update_core = 0;
	ip->i_delayed_blks = 0;
	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
	ip->i_size = 0;
	ip->i_new_size = 0;

	return ip;
}
Exemple #21
0
void
xfs_inode_free(
    struct xfs_inode	*ip)
{
    switch (ip->i_d.di_mode & S_IFMT) {
    case S_IFREG:
    case S_IFDIR:
    case S_IFLNK:
        xfs_idestroy_fork(ip, XFS_DATA_FORK);
        break;
    }

    if (ip->i_afp)
        xfs_idestroy_fork(ip, XFS_ATTR_FORK);

    if (ip->i_itemp) {
        /*
         * Only if we are shutting down the fs will we see an
         * inode still in the AIL. If it is there, we should remove
         * it to prevent a use-after-free from occurring.
         */
        xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
        struct xfs_ail	*ailp = lip->li_ailp;

        ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
               XFS_FORCED_SHUTDOWN(ip->i_mount));
        if (lip->li_flags & XFS_LI_IN_AIL) {
            spin_lock(&ailp->xa_lock);
            if (lip->li_flags & XFS_LI_IN_AIL)
                xfs_trans_ail_delete(ailp, lip);
            else
                spin_unlock(&ailp->xa_lock);
        }
        xfs_inode_item_destroy(ip);
        ip->i_itemp = NULL;
    }

    /* asserts to verify all state is correct here */
    ASSERT(atomic_read(&ip->i_iocount) == 0);
    ASSERT(atomic_read(&ip->i_pincount) == 0);
    ASSERT(!spin_is_locked(&ip->i_flags_lock));
    ASSERT(completion_done(&ip->i_flush));

    kmem_zone_free(xfs_inode_zone, ip);
}
Exemple #22
0
static RESULT IicPortReceive(UBYTE Port, UBYTE *pTmpBuffer)
{
	RESULT Result = BUSY;

	if (completion_done(&IicCtrl[Port].message_completion)) {
		memset(pTmpBuffer, 0, IIC_DATA_LENGTH);
		memcpy(pTmpBuffer, IicCtrl[Port].msg1_buf,
		       IicPort[Port].InLength);

		if (IicCtrl[Port].xfer_result == IicCtrl[Port].num_msg) {
			Result = OK;
		} else {
			Result = FAIL;
		}
	}

	return Result;
}
static void ev3_uart_close(struct tty_struct *tty)
{
	struct ev3_uart_port_data *port = tty->disc_data;

	port->closing = true;
	if (!completion_done(&port->set_mode_completion))
		complete(&port->set_mode_completion);
	cancel_work_sync(&port->rx_data_work);
	cancel_delayed_work_sync(&port->send_ack_work);
	cancel_work_sync(&port->change_bitrate_work);
	hrtimer_cancel(&port->keep_alive_timer);
	tasklet_kill(&port->keep_alive_tasklet);
	if (port->sensor.context)
		unregister_lego_sensor(&port->sensor);
	if (port->in_port)
		put_device(&port->in_port->dev);
	tty->disc_data = NULL;
	kfree(port);
}
Exemple #24
0
static void ath10k_htc_control_rx_complete(struct ath10k *ar,
					   struct sk_buff *skb)
{
	struct ath10k_htc *htc = &ar->htc;
	struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;

	switch (__le16_to_cpu(msg->hdr.message_id)) {
	case ATH10K_HTC_MSG_READY_ID:
	case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
		/* handle HTC control message */
		if (completion_done(&htc->ctl_resp)) {
			/* this is a fatal error, target should not be
			 * sending unsolicited messages on the ep 0
			 */
			ath10k_warn(ar, "HTC rx ctrl still processing\n");
			complete(&htc->ctl_resp);
			goto out;
		}

		htc->control_resp_len =
			min_t(int, skb->len,
			      ATH10K_HTC_MAX_CTRL_MSG_LEN);

		memcpy(htc->control_resp_buffer, skb->data,
		       htc->control_resp_len);

		complete(&htc->ctl_resp);
		break;
	case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
		htc->htc_ops.target_send_suspend_complete(ar);
		break;
	default:
		ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
		break;
	}

out:
	kfree_skb(skb);
}
Exemple #25
0
static void tegra_dc_one_shot_irq(struct tegra_dc *dc, unsigned long status)
{
	/* pending user vblank, so wakeup */
	if ((status & (V_BLANK_INT | MSF_INT)) &&
	    (dc->out->user_needs_vblank)) {
		dc->out->user_needs_vblank = false;
		complete(&dc->out->user_vblank_comp);
	}

	if (status & V_BLANK_INT) {
		/* Sync up windows. */
		tegra_dc_trigger_windows(dc);

		/* Schedule any additional bottom-half vblank actvities. */
		queue_work(system_freezable_wq, &dc->vblank_work);
	}

	if (status & FRAME_END_INT) {
		/* Mark the frame_end as complete. */
		if (!completion_done(&dc->frame_end_complete))
			complete(&dc->frame_end_complete);
	}
}
static ssize_t core_info_read(struct file *file, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct msm_vidc_core *core = file->private_data;
	struct hfi_device *hdev;
	int i = 0;
	if (!core || !core->device) {
		dprintk(VIDC_ERR, "Invalid params, core: %p\n", core);
		return 0;
	}
	hdev = core->device;
	INIT_DBG_BUF(dbg_buf);
	write_str(&dbg_buf, "===============================\n");
	write_str(&dbg_buf, "CORE %d: 0x%p\n", core->id, core);
	write_str(&dbg_buf, "===============================\n");
	write_str(&dbg_buf, "state: %d\n", core->state);
	write_str(&dbg_buf, "base addr: 0x%x\n",
		call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data,
					FW_BASE_ADDRESS));
	write_str(&dbg_buf, "register_base: 0x%x\n",
		call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data,
					FW_REGISTER_BASE));
	write_str(&dbg_buf, "register_size: %u\n",
		call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data,
					FW_REGISTER_SIZE));
	write_str(&dbg_buf, "irq: %u\n",
		call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data,
					FW_IRQ));
	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
		write_str(&dbg_buf, "completions[%d]: %s\n", i,
			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
			"pending" : "done");
	}
	return simple_read_from_buffer(buf, count, ppos,
			dbg_buf.ptr, dbg_buf.filled_size);
}
Exemple #27
0
void exit_lpapm_mode_mx50(int high_bus_freq)
{
	u32 reg;
	unsigned long flags;

	if (clk_get_usecount(pll1_sw_clk) == 1) {
		/* Relock PLL1 to 800MHz. */
		clk_set_parent(pll1_sw_clk, pll2);
		/* Set the divider to ARM_PODF to 3, cpu is at 160MHz. */
		__raw_writel(0x02, MXC_CCM_CACRR);

		clk_set_rate(pll1, cpu_wp_tbl[0].pll_rate);

		/* Set the divider to ARM_PODF to 5 before
		  * switching the parent.
		  */
		__raw_writel(0x4, MXC_CCM_CACRR);
		clk_set_parent(pll1_sw_clk, pll1);
	}

	if (!completion_done(&voltage_change_cmpl))
		wait_for_completion_interruptible(&voltage_change_cmpl);
	spin_lock_irqsave(&voltage_lock, flags);
	if (lp_voltage != LP_NORMAL_VOLTAGE) {
		INIT_COMPLETION(voltage_change_cmpl);
		lp_voltage = LP_NORMAL_VOLTAGE;
		if (!queue_work(voltage_wq, &voltage_change_handler))
			printk(KERN_ERR "WORK_NOT_ADDED\n");
		spin_unlock_irqrestore(&voltage_lock, flags);
		wait_for_completion_interruptible(&voltage_change_cmpl);
	} else {
		spin_unlock_irqrestore(&voltage_lock, flags);
		if (!completion_done(&voltage_change_cmpl))
			wait_for_completion_interruptible(&voltage_change_cmpl);
	}

	spin_lock_irqsave(&freq_lock, flags);
	if (!low_bus_freq_mode) {
		spin_unlock_irqrestore(&freq_lock, flags);
		return;
	}

	/* Temporarily set the dividers when the source is PLL3.
	 * No clock rate is above 133MHz.
	 */
	reg = __raw_readl(MXC_CCM_CBCDR);
	reg &= ~(MXC_CCM_CBCDR_AXI_A_PODF_MASK
		| MXC_CCM_CBCDR_AXI_B_PODF_MASK
		| MXC_CCM_CBCDR_AHB_PODF_MASK
		| MX50_CCM_CBCDR_WEIM_PODF_MASK);
	reg |= (1 << MXC_CCM_CBCDR_AXI_A_PODF_OFFSET
		|1 << MXC_CCM_CBCDR_AXI_B_PODF_OFFSET
		|1 << MXC_CCM_CBCDR_AHB_PODF_OFFSET
		|0 << MX50_CCM_CBCDR_WEIM_PODF_OFFSET);
	__raw_writel(reg, MXC_CCM_CBCDR);

	while (__raw_readl(MXC_CCM_CDHIPR) & 0xF)
		udelay(10);

	clk_set_parent(main_bus_clk, pll3);

	if (bus_freq_scaling_is_active && !high_bus_freq) {
		/* Set the dividers to the medium setpoint dividers */
		reg = __raw_readl(MXC_CCM_CBCDR);
		reg &= ~(MXC_CCM_CBCDR_AXI_A_PODF_MASK
			| MXC_CCM_CBCDR_AXI_B_PODF_MASK
			| MXC_CCM_CBCDR_AHB_PODF_MASK
			| MX50_CCM_CBCDR_WEIM_PODF_MASK);
		reg |= (1 << MXC_CCM_CBCDR_AXI_A_PODF_OFFSET
			|3 << MXC_CCM_CBCDR_AXI_B_PODF_OFFSET
			|5 << MXC_CCM_CBCDR_AHB_PODF_OFFSET
			|0 << MX50_CCM_CBCDR_WEIM_PODF_OFFSET);
		__raw_writel(reg, MXC_CCM_CBCDR);

		while (__raw_readl(MXC_CCM_CDHIPR) & 0xF)
			udelay(10);

		/*Set the main_bus_clk parent to be PLL2. */
		clk_set_parent(main_bus_clk, pll2);

		/* Set to the medium setpoint. */
		high_bus_freq_mode = 0;
		low_bus_freq_mode = 0;
		med_bus_freq_mode = 1;
		set_ddr_freq(ddr_med_rate);
	} else {
		/* Set the dividers to the default dividers */
		reg = __raw_readl(MXC_CCM_CBCDR);
		reg &= ~(MXC_CCM_CBCDR_AXI_A_PODF_MASK
			| MXC_CCM_CBCDR_AXI_B_PODF_MASK
			| MXC_CCM_CBCDR_AHB_PODF_MASK
			| MX50_CCM_CBCDR_WEIM_PODF_MASK);
		reg |= (0 << MXC_CCM_CBCDR_AXI_A_PODF_OFFSET
			|1 << MXC_CCM_CBCDR_AXI_B_PODF_OFFSET
			|2 << MXC_CCM_CBCDR_AHB_PODF_OFFSET
			|0 << MX50_CCM_CBCDR_WEIM_PODF_OFFSET);
		__raw_writel(reg, MXC_CCM_CBCDR);

		while (__raw_readl(MXC_CCM_CDHIPR) & 0xF)
			udelay(10);

		/*Set the main_bus_clk parent to be PLL2. */
		clk_set_parent(main_bus_clk, pll2);

		/* Set to the high setpoint. */
		high_bus_freq_mode = 1;
		low_bus_freq_mode = 0;
		med_bus_freq_mode = 0;
		set_ddr_freq(ddr_normal_rate);
	}
	spin_unlock_irqrestore(&freq_lock, flags);

	udelay(100);
}
Exemple #28
0
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
	int status = 0;
	struct ath10k_htc *htc = &ar->htc;
	struct ath10k_htc_hdr *hdr;
	struct ath10k_htc_ep *ep;
	u16 payload_len;
	u32 trailer_len = 0;
	size_t min_len;
	u8 eid;
	bool trailer_present;

	hdr = (struct ath10k_htc_hdr *)skb->data;
	skb_pull(skb, sizeof(*hdr));

	eid = hdr->eid;

	if (eid >= ATH10K_HTC_EP_COUNT) {
		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
				hdr, sizeof(*hdr));
		goto out;
	}

	ep = &htc->endpoint[eid];

	payload_len = __le16_to_cpu(hdr->len);

	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
			    payload_len + sizeof(*hdr));
		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
				hdr, sizeof(*hdr));
		goto out;
	}

	if (skb->len < payload_len) {
		ath10k_dbg(ar, ATH10K_DBG_HTC,
			   "HTC Rx: insufficient length, got %d, expected %d\n",
			   skb->len, payload_len);
		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
				"", hdr, sizeof(*hdr));
		goto out;
	}

	/* get flags to check for trailer */
	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
	if (trailer_present) {
		u8 *trailer;

		trailer_len = hdr->trailer_len;
		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);

		if ((trailer_len < min_len) ||
		    (trailer_len > payload_len)) {
			ath10k_warn(ar, "Invalid trailer length: %d\n",
				    trailer_len);
			goto out;
		}

		trailer = (u8 *)hdr;
		trailer += sizeof(*hdr);
		trailer += payload_len;
		trailer -= trailer_len;
		status = ath10k_htc_process_trailer(htc, trailer,
						    trailer_len, hdr->eid);
		if (status)
			goto out;

		skb_trim(skb, skb->len - trailer_len);
	}

	if (((int)payload_len - (int)trailer_len) <= 0)
		/* zero length packet with trailer data, just drop these */
		goto out;

	if (eid == ATH10K_HTC_EP_0) {
		struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;

		switch (__le16_to_cpu(msg->hdr.message_id)) {
		case ATH10K_HTC_MSG_READY_ID:
		case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
			/* handle HTC control message */
			if (completion_done(&htc->ctl_resp)) {
				/*
				 * this is a fatal error, target should not be
				 * sending unsolicited messages on the ep 0
				 */
				ath10k_warn(ar, "HTC rx ctrl still processing\n");
				complete(&htc->ctl_resp);
				goto out;
			}

			htc->control_resp_len =
				min_t(int, skb->len,
				      ATH10K_HTC_MAX_CTRL_MSG_LEN);

			memcpy(htc->control_resp_buffer, skb->data,
			       htc->control_resp_len);

			complete(&htc->ctl_resp);
			break;
		case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
			htc->htc_ops.target_send_suspend_complete(ar);
			break;
		default:
			ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
			break;
		}
		goto out;
	}

	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
		   eid, skb);
	ep->ep_ops.ep_rx_complete(ar, skb);

	/* skb is now owned by the rx completion handler */
	skb = NULL;
out:
	kfree_skb(skb);
}
static void ev3_uart_handle_rx_data(struct work_struct *work)
{
	struct ev3_uart_port_data *port =
		container_of(work, struct ev3_uart_port_data, rx_data_work);
	struct circ_buf *cb = &port->circ_buf;
	u8 message[EV3_UART_MAX_MESSAGE_SIZE + 2];
	int count = CIRC_CNT(cb->head, cb->tail, EV3_UART_BUFFER_SIZE);
	int i, speed, size_to_end;
	u8 cmd, cmd2, type, mode, msg_type, msg_size, chksum;

#ifdef DEBUG
	printk("received: ");
	for (i = 0; i < count; i++) {
		cmd = cb->buf[(cb->tail + i) % EV3_UART_BUFFER_SIZE];
		if (cmd >= 32 && cmd < 127)
			printk("%c ", cmd);
		else
			printk("0x%02x ", cmd);
	}
	printk("(%d)\n", count);
#endif

	/*
	 * To get in sync with the data stream from the sensor, we look
	 * for a valid TYPE command.
	 */
	while (!port->synced) {
		if (count < 3)
			return;
		cmd = cb->buf[cb->tail];
		cb->tail++;
		if (cb->tail >= EV3_UART_BUFFER_SIZE)
			cb->tail = 0;
		count--;
		if (cmd != (EV3_UART_MSG_TYPE_CMD | EV3_UART_CMD_TYPE))
			continue;
		type = cb->buf[cb->tail];
		if (!type || type > EV3_UART_TYPE_MAX)
			continue;
		chksum = 0xFF ^ cmd ^ type;
		if ((u8)cb->buf[(cb->tail + 1) % EV3_UART_BUFFER_SIZE] != chksum)
			continue;
		port->sensor.num_modes = 1;
		port->sensor.num_view_modes = 1;
		for (i = 0; i <= EV3_UART_MODE_MAX; i++)
			port->mode_info[i] = ev3_uart_default_mode_info;
		port->type_id = type;
		/* look up well-known driver names */
		port->device_name[0] = 0;
		for (i = 0; i < NUM_LEGO_EV3_SENSOR_TYPES; i++) {
			if (type == ev3_uart_sensor_defs[i].type_id) {
				snprintf(port->device_name, LEGO_SENSOR_NAME_SIZE,
					 "%s", ev3_uart_sensor_defs[i].name);
				break;
			}
		}
		/* or use generic name if well-known name is not found */
		if (!port->device_name[0])
			snprintf(port->device_name, LEGO_SENSOR_NAME_SIZE,
				 EV3_UART_SENSOR_NAME("%u"), type);
		port->info_flags = EV3_UART_INFO_FLAG_CMD_TYPE;
		port->synced = 1;
		port->info_done = 0;
		port->data_rec = 0;
		port->num_data_err = 0;
		cb->tail = (cb->tail + 2) % EV3_UART_BUFFER_SIZE;
		count -= 2;
	}
	if (!port->synced)
		return;

	while (count > 0)
	{
		/*
		 * Sometimes we get 0xFF after switching baud rates, so just
		 * ignore it.
		 */
		if ((u8)cb->buf[cb->tail] == 0xFF) {

			cb->tail++;
			if (cb->tail >= EV3_UART_BUFFER_SIZE)
				cb->tail = 0;
			count--;
			continue;
		}
		msg_size = ev3_uart_msg_size((u8)cb->buf[cb->tail]);
		if (msg_size > count)
			break;
		size_to_end = CIRC_CNT_TO_END(cb->head, cb->tail, EV3_UART_BUFFER_SIZE);
		if (msg_size > size_to_end) {
			memcpy(message, cb->buf + cb->tail, size_to_end);
			memcpy(message + size_to_end, cb->buf, msg_size - size_to_end);
			cb->tail = msg_size - size_to_end;
		} else {
			memcpy(message, cb->buf + cb->tail, msg_size);
			cb->tail += msg_size;
			if (cb->tail >= EV3_UART_BUFFER_SIZE)
				cb->tail = 0;
		}
		count -= msg_size;
#ifdef DEBUG
		printk("processing: ");
		for (i = 0; i < msg_size; i++)
			printk("0x%02x ", message[i]);
		printk(" (%d)\n", msg_size);
#endif
		if (msg_size > EV3_UART_MAX_MESSAGE_SIZE) {
			port->last_err = "Bad message size.";
			goto err_invalid_state;
		}
		msg_type = message[0] & EV3_UART_MSG_TYPE_MASK;
		cmd = message[0] & EV3_UART_MSG_CMD_MASK;
		mode = cmd;
		cmd2 = message[1];
		if (msg_size > 1) {
			chksum = 0xFF;
			for (i = 0; i < msg_size - 1; i++)
				chksum ^= message[i];
			debug_pr("chksum:%d, actual:%d\n",
			         chksum, message[msg_size - 1]);
			/*
			 * The LEGO EV3 color sensor sends bad checksums
			 * for RGB-RAW data (mode 4). The check here could be
			 * improved if someone can find a pattern.
			 */
			if (chksum != message[msg_size - 1]
			    && port->type_id != EV3_UART_TYPE_ID_COLOR
			    && message[0] != 0xDC)
			{
				port->last_err = "Bad checksum.";
				if (port->info_done) {
					port->num_data_err++;
					goto err_bad_data_msg_checksum;
				} else
					goto err_invalid_state;
			}
		}
		switch (msg_type) {
		case EV3_UART_MSG_TYPE_SYS:
			debug_pr("SYS:%d\n", message[0] & EV3_UART_MSG_CMD_MASK);
			switch(cmd) {
			case EV3_UART_SYS_SYNC:
				/* IR sensor (type 33) sends checksum after SYNC */
				if (msg_size > 1 && (cmd ^ cmd2) == 0xFF)
					msg_size++;
				break;
			case EV3_UART_SYS_ACK:
				if (!port->sensor.num_modes) {
					port->last_err = "Received ACK before all mode INFO.";
					goto err_invalid_state;
				}
				if ((port->info_flags & EV3_UART_INFO_FLAG_REQUIRED)
				    != EV3_UART_INFO_FLAG_REQUIRED)
				{
					port->last_err = "Did not receive all required INFO.";
					goto err_invalid_state;
				}
				schedule_delayed_work(&port->send_ack_work,
						      msecs_to_jiffies(EV3_UART_SEND_ACK_DELAY));
				port->info_done = 1;
				return;
			}
			break;
		case EV3_UART_MSG_TYPE_CMD:
			debug_pr("CMD:%d\n", cmd);
			switch (cmd) {
			case EV3_UART_CMD_MODES:
				if (test_and_set_bit(EV3_UART_INFO_BIT_CMD_MODES,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate modes INFO.";
					goto err_invalid_state;
				}
				if (!cmd2 || cmd2 > EV3_UART_MODE_MAX) {
					port->last_err = "Number of modes is out of range.";
					goto err_invalid_state;
				}
				port->sensor.num_modes = cmd2 + 1;
				if (msg_size > 3)
					port->sensor.num_view_modes = message[2] + 1;
				else
					port->sensor.num_view_modes = port->sensor.num_modes;
				debug_pr("num_modes:%d, num_view_modes:%d\n",
					 port->sensor.num_modes, port->sensor.num_view_modes);
				break;
			case EV3_UART_CMD_SPEED:
				if (test_and_set_bit(EV3_UART_INFO_BIT_CMD_SPEED,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate speed INFO.";
					goto err_invalid_state;
				}
				speed = *(int*)(message + 1);
				if (speed < EV3_UART_SPEED_MIN
				    || speed > EV3_UART_SPEED_MAX)
				{
					port->last_err = "Speed is out of range.";
					goto err_invalid_state;
				}
				port->new_baud_rate = speed;
				debug_pr("speed:%d\n", speed);
				break;
			default:
				port->last_err = "Unknown command.";
				goto err_invalid_state;
			}
			break;
		case EV3_UART_MSG_TYPE_INFO:
			debug_pr("INFO:%d, mode:%d\n", cmd2, mode);
			switch (cmd2) {
			case EV3_UART_INFO_NAME:
				port->info_flags &= ~EV3_UART_INFO_FLAG_ALL_INFO;
				if (message[2] < 'A' || message[2] > 'z') {
					port->last_err = "Invalid name INFO.";
					goto err_invalid_state;
				}
				/*
				 * Name may not have null terminator and we
				 * are done with the checksum at this point
				 * so we are writing 0 over the checksum to
				 * ensure a null terminator for the string
				 * functions.
				 */
				message[msg_size - 1] = 0;
				if (strlen(message + 2) > EV3_UART_MODE_NAME_SIZE) {
					port->last_err = "Name is too long.";
					goto err_invalid_state;
				}
				snprintf(port->mode_info[mode].name,
				         EV3_UART_MODE_NAME_SIZE + 1, "%s",
				         message + 2);
				if (port->sensor.mode != mode) {
					port->sensor.mode = mode;
					kobject_uevent(&port->sensor.dev.kobj,
						       KOBJ_CHANGE);
				}
				port->info_flags |= EV3_UART_INFO_FLAG_INFO_NAME;
				debug_pr("mode %d name:%s\n",
				       mode, port->sensor.address);
				break;
			case EV3_UART_INFO_RAW:
				if (port->sensor.mode != mode) {
					port->last_err = "Received INFO for incorrect mode.";
					goto err_invalid_state;
				}
				if (test_and_set_bit(EV3_UART_INFO_BIT_INFO_RAW,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate raw scaling INFO.";
					goto err_invalid_state;
				}
				port->raw_min = *(u32 *)(message + 2);
				port->raw_max = *(u32 *)(message + 6);
				debug_pr("mode %d raw_min:%08x, raw_max:%08x\n",
				       mode, port->mode_info[mode].raw_min,
				       port->mode_info[mode].raw_max);
				break;
			case EV3_UART_INFO_PCT:
				if (port->sensor.mode != mode) {
					port->last_err = "Received INFO for incorrect mode.";
					goto err_invalid_state;
				}
				if (test_and_set_bit(EV3_UART_INFO_BIT_INFO_PCT,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate percent scaling INFO.";
					goto err_invalid_state;
				}
				port->pct_min = *(u32 *)(message + 2);
				port->pct_max = *(u32 *)(message + 6);
				debug_pr("mode %d pct_min:%08x, pct_max:%08x\n",
				       mode, port->mode_info[mode].pct_min,
				       port->mode_info[mode].pct_max);
				break;
			case EV3_UART_INFO_SI:
				if (port->sensor.mode != mode) {
					port->last_err = "Received INFO for incorrect mode.";
					goto err_invalid_state;
				}
				if (test_and_set_bit(EV3_UART_INFO_BIT_INFO_SI,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate SI scaling INFO.";
					goto err_invalid_state;
				}
				port->si_min = *(u32 *)(message + 2);
				port->si_max = *(u32 *)(message + 6);
				debug_pr("mode %d si_min:%08x, si_max:%08x\n",
				       mode, port->mode_info[mode].si_min,
				       port->mode_info[mode].si_max);
				break;
			case EV3_UART_INFO_UNITS:
				if (port->sensor.mode != mode) {
					port->last_err = "Received INFO for incorrect mode.";
					goto err_invalid_state;
				}
				if (test_and_set_bit(EV3_UART_INFO_BIT_INFO_UNITS,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate SI units INFO.";
					goto err_invalid_state;
				}
				/*
				 * Units may not have null terminator and we
				 * are done with the checksum at this point
				 * so we are writing 0 over the checksum to
				 * ensure a null terminator for the string
				 * functions.
				 */
				message[msg_size - 1] = 0;
				snprintf(port->mode_info[mode].units,
					 EV3_UART_UNITS_SIZE + 1, "%s",
					 message + 2);
				debug_pr("mode %d units:%s\n",
				       mode, port->mode_info[mode].units);
				break;
			case EV3_UART_INFO_FORMAT:
				if (port->sensor.mode != mode) {
					port->last_err = "Received INFO for incorrect mode.";
					goto err_invalid_state;
				}
				if (test_and_set_bit(EV3_UART_INFO_BIT_INFO_FORMAT,
						     &port->info_flags))
				{
					port->last_err = "Received duplicate format INFO.";
					goto err_invalid_state;
				}
				port->mode_info[mode].data_sets = message[2];
				if (!port->mode_info[mode].data_sets) {
					port->last_err = "Invalid number of data sets.";
					goto err_invalid_state;
				}
				if (msg_size < 7) {
					port->last_err = "Invalid format message size.";
					goto err_invalid_state;
				}
				if ((port->info_flags & EV3_UART_INFO_FLAG_REQUIRED)
						!= EV3_UART_INFO_FLAG_REQUIRED) {
					port->last_err = "Did not receive all required INFO.";
					goto err_invalid_state;
				}
				switch (message[3]) {
				case EV3_UART_DATA_8:
					port->mode_info[mode].data_type = LEGO_SENSOR_DATA_S8;
					break;
				case EV3_UART_DATA_16:
					port->mode_info[mode].data_type = LEGO_SENSOR_DATA_S16;
					break;
				case EV3_UART_DATA_32:
					port->mode_info[mode].data_type = LEGO_SENSOR_DATA_S32;
					break;
				case EV3_UART_DATA_FLOAT:
					port->mode_info[mode].data_type = LEGO_SENSOR_DATA_FLOAT;
					break;
				default:
					port->last_err = "Invalid data type.";
					goto err_invalid_state;
				}
				port->mode_info[mode].figures = message[4];
				port->mode_info[mode].decimals = message[5];
				if (port->info_flags & EV3_UART_INFO_FLAG_INFO_RAW) {
					port->mode_info[mode].raw_min =
						lego_sensor_ftoi(port->raw_min, 0);
					port->mode_info[mode].raw_max =
						lego_sensor_ftoi(port->raw_max, 0);
				}
				if (port->info_flags & EV3_UART_INFO_FLAG_INFO_PCT) {
					port->mode_info[mode].pct_min =
						lego_sensor_ftoi(port->pct_min, 0);
					port->mode_info[mode].pct_max =
						lego_sensor_ftoi(port->pct_max, 0);
				}
				if (port->info_flags & EV3_UART_INFO_FLAG_INFO_SI) {
					port->mode_info[mode].si_min =
						lego_sensor_ftoi(port->si_min,
							port->mode_info[mode].decimals);
					port->mode_info[mode].si_max =
						lego_sensor_ftoi(port->si_max,
							port->mode_info[mode].decimals);
				}
				if (port->sensor.mode)
					port->sensor.mode--;
				debug_pr("mode %d - data_sets:%d, data_type:%d, figures:%d, decimals:%d\n",
					 mode, port->mode_info[mode].data_sets,
					 port->mode_info[mode].data_type,
					 port->mode_info[mode].figures,
					 port->mode_info[mode].decimals);
				debug_pr("raw_min: %d, raw_max: %d\n",
					 port->mode_info[mode].raw_min,
					 port->mode_info[mode].raw_max);
				debug_pr("pct_min: %d, pct_max: %d\n",
					 port->mode_info[mode].pct_min,
					 port->mode_info[mode].pct_max);
				debug_pr("si_min: %d, si_max: %d\n",
					 port->mode_info[mode].si_min,
					 port->mode_info[mode].si_max);
				break;
			}
			break;
		case EV3_UART_MSG_TYPE_DATA:
			debug_pr("DATA:%d\n", message[0] & EV3_UART_MSG_CMD_MASK);
			if (!port->info_done) {
				port->last_err = "Received DATA before INFO was complete.";
				goto err_invalid_state;
			}
			if (mode > EV3_UART_MODE_MAX) {
				port->last_err = "Invalid mode received.";
				goto err_invalid_state;
			}
			if (mode != port->sensor.mode) {
				if (mode == port->new_mode) {
					port->sensor.mode = mode;
					kobject_uevent(&port->sensor.dev.kobj,
						       KOBJ_CHANGE);
				} else {
					port->last_err = "Unexpected mode.";
					goto err_invalid_state;
				}
			}
			if (!completion_done(&port->set_mode_completion)
			    && mode == port->new_mode)
				complete(&port->set_mode_completion);
			memcpy(port->mode_info[mode].raw_data, message + 1, msg_size - 2);
			port->data_rec = 1;
			if (port->num_data_err)
				port->num_data_err--;
			break;
		}
err_bad_data_msg_checksum:
		count = CIRC_CNT(cb->head, cb->tail, EV3_UART_BUFFER_SIZE);
	}
	return;

err_invalid_state:
	port->synced = 0;
	port->new_baud_rate = EV3_UART_SPEED_MIN;
	schedule_work(&port->change_bitrate_work);
}
static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
					    struct sk_buff *skb,
					    u8 pipe_id)
{
	int status = 0;
	struct ath10k_htc *htc = &ar->htc;
	struct ath10k_htc_hdr *hdr;
	struct ath10k_htc_ep *ep;
	u16 payload_len;
	u32 trailer_len = 0;
	size_t min_len;
	u8 eid;
	bool trailer_present;

	hdr = (struct ath10k_htc_hdr *)skb->data;
	skb_pull(skb, sizeof(*hdr));

	eid = hdr->eid;

	if (eid >= ATH10K_HTC_EP_COUNT) {
		ath10k_warn("HTC Rx: invalid eid %d\n", eid);
		ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
				hdr, sizeof(*hdr));
		status = -EINVAL;
		goto out;
	}

	ep = &htc->endpoint[eid];

	/*
	 * If this endpoint that received a message from the target has
	 * a to-target HIF pipe whose send completions are polled rather
	 * than interrupt-driven, this is a good point to ask HIF to check
	 * whether it has any completed sends to handle.
	 */
	if (ep->ul_is_polled)
		ath10k_htc_send_complete_check(ep, 1);

	payload_len = __le16_to_cpu(hdr->len);

	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
		ath10k_warn("HTC rx frame too long, len: %zu\n",
			    payload_len + sizeof(*hdr));
		ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
				hdr, sizeof(*hdr));
		status = -EINVAL;
		goto out;
	}

	if (skb->len < payload_len) {
		ath10k_dbg(ATH10K_DBG_HTC,
			   "HTC Rx: insufficient length, got %d, expected %d\n",
			   skb->len, payload_len);
		ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
				"", hdr, sizeof(*hdr));
		status = -EINVAL;
		goto out;
	}

	/* get flags to check for trailer */
	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
	if (trailer_present) {
		u8 *trailer;

		trailer_len = hdr->trailer_len;
		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);

		if ((trailer_len < min_len) ||
		    (trailer_len > payload_len)) {
			ath10k_warn("Invalid trailer length: %d\n",
				    trailer_len);
			status = -EPROTO;
			goto out;
		}

		trailer = (u8 *)hdr;
		trailer += sizeof(*hdr);
		trailer += payload_len;
		trailer -= trailer_len;
		status = ath10k_htc_process_trailer(htc, trailer,
						    trailer_len, hdr->eid);
		if (status)
			goto out;

		skb_trim(skb, skb->len - trailer_len);
	}

	if (((int)payload_len - (int)trailer_len) <= 0)
		/* zero length packet with trailer data, just drop these */
		goto out;

	if (eid == ATH10K_HTC_EP_0) {
		struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;

		switch (__le16_to_cpu(msg->hdr.message_id)) {
		default:
			/* handle HTC control message */
			if (completion_done(&htc->ctl_resp)) {
				/*
				 * this is a fatal error, target should not be
				 * sending unsolicited messages on the ep 0
				 */
				ath10k_warn("HTC rx ctrl still processing\n");
				status = -EINVAL;
				complete(&htc->ctl_resp);
				goto out;
			}

			htc->control_resp_len =
				min_t(int, skb->len,
				      ATH10K_HTC_MAX_CTRL_MSG_LEN);

			memcpy(htc->control_resp_buffer, skb->data,
			       htc->control_resp_len);

			complete(&htc->ctl_resp);
			break;
		case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
			htc->htc_ops.target_send_suspend_complete(ar);
		}
		goto out;
	}

	ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
		   eid, skb);
	ep->ep_ops.ep_rx_complete(ar, skb);

	/* skb is now owned by the rx completion handler */
	skb = NULL;
out:
	kfree_skb(skb);

	return status;
}