Exemple #1
0
static void diag_smd_cntl_send_req(int proc_num)
{
	int data_len = 0, type = -1, count_bytes = 0, j, r;
	struct bindpkt_params_per_process *pkt_params =
		 kzalloc(sizeof(struct bindpkt_params_per_process), GFP_KERNEL);
	struct diag_ctrl_msg *msg;
	struct cmd_code_range *range;
	struct bindpkt_params *temp;
	void *buf = NULL;
	smd_channel_t *smd_ch = NULL;

	if (proc_num == MODEM_PROC) {
		buf = driver->buf_in_cntl;
		smd_ch = driver->ch_cntl;
	} else if (proc_num == QDSP_PROC) {
		buf = driver->buf_in_qdsp_cntl;
		smd_ch = driver->chqdsp_cntl;
	} else if (proc_num == WCNSS_PROC) {
		buf = driver->buf_in_wcnss_cntl;
		smd_ch = driver->ch_wcnss_cntl;
	}

	if (!smd_ch || !buf)
		return;

	r = smd_read_avail(smd_ch);
	if (r > IN_BUF_SIZE) {
		if (r < MAX_IN_BUF_SIZE) {
			pr_err("diag: SMD CNTL sending pkt upto %d bytes", r);
			buf = krealloc(buf, r, GFP_KERNEL);
		} else {
			pr_err("diag: CNTL pkt > %d bytes", MAX_IN_BUF_SIZE);
			kfree(pkt_params);
			return;
		}
	}
	if (buf && r > 0) {
		smd_read(smd_ch, buf, r);
		while (count_bytes + HDR_SIZ <= r) {
			type = *(uint32_t *)(buf);
			data_len = *(uint32_t *)(buf + 4);
			count_bytes = count_bytes+HDR_SIZ+data_len;
			if (type == DIAG_CTRL_MSG_REG && r >= count_bytes) {
				msg = buf+HDR_SIZ;
				range = buf+HDR_SIZ+
						sizeof(struct diag_ctrl_msg);
				pkt_params->count = msg->count_entries;
				temp = kzalloc(pkt_params->count * sizeof(struct
						 bindpkt_params), GFP_KERNEL);
				for (j = 0; j < pkt_params->count; j++) {
					temp->cmd_code = msg->cmd_code;
					temp->subsys_id = msg->subsysid;
					temp->client_id = proc_num;
					temp->proc_id = proc_num;
					temp->cmd_code_lo = range->cmd_code_lo;
					temp->cmd_code_hi = range->cmd_code_hi;
					range++;
					temp++;
				}
				temp -= pkt_params->count;
				pkt_params->params = temp;
				diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
						 (unsigned long)pkt_params);
				kfree(temp);
				buf = buf + HDR_SIZ + data_len;
			}
		}
	}
	kfree(pkt_params);
}
static void diag_smd_cntl_send_req(int proc_num)
{
	int data_len = 0, type = -1, count_bytes = 0, j, r, flag = 0;
	struct bindpkt_params_per_process *pkt_params =
		 kzalloc(sizeof(struct bindpkt_params_per_process), GFP_KERNEL);
	struct diag_ctrl_msg *msg;
	struct cmd_code_range *range;
	struct bindpkt_params *temp;
	void *buf = NULL;
	smd_channel_t *smd_ch = NULL;

	if (pkt_params == NULL) {
		pr_alert("diag: Memory allocation failure\n");
		return;
	}

	if (proc_num == MODEM_PROC) {
		buf = driver->buf_in_cntl;
		smd_ch = driver->ch_cntl;
	} else if (proc_num == QDSP_PROC) {
		buf = driver->buf_in_qdsp_cntl;
		smd_ch = driver->chqdsp_cntl;
	} else if (proc_num == WCNSS_PROC) {
		buf = driver->buf_in_wcnss_cntl;
		smd_ch = driver->ch_wcnss_cntl;
	}

	if (!smd_ch || !buf) {
		kfree(pkt_params);
		return;
	}

	r = smd_read_avail(smd_ch);
	if (r > IN_BUF_SIZE) {
		if (r < MAX_IN_BUF_SIZE) {
			pr_err("diag: SMD CNTL sending pkt upto %d bytes", r);
			buf = krealloc(buf, r, GFP_KERNEL);
		} else {
			pr_err("diag: CNTL pkt > %d bytes", MAX_IN_BUF_SIZE);
			kfree(pkt_params);
			return;
		}
	}
	if (buf && r > 0) {
		smd_read(smd_ch, buf, r);
		while (count_bytes + HDR_SIZ <= r) {
			type = *(uint32_t *)(buf);
			data_len = *(uint32_t *)(buf + 4);
			if (type < DIAG_CTRL_MSG_REG ||
					 type > DIAG_CTRL_MSG_F3_MASK_V2) {
				pr_alert("diag: Invalid Msg type %d proc %d",
					 type, proc_num);
				break;
			}
			if (data_len < 0 || data_len > r) {
				pr_alert("diag: Invalid data len %d proc %d",
					 data_len, proc_num);
				break;
			}
			count_bytes = count_bytes+HDR_SIZ+data_len;
			if (type == DIAG_CTRL_MSG_REG && r >= count_bytes) {
				msg = buf+HDR_SIZ;
				range = buf+HDR_SIZ+
						sizeof(struct diag_ctrl_msg);
				pkt_params->count = msg->count_entries;
				temp = kzalloc(pkt_params->count * sizeof(struct
						 bindpkt_params), GFP_KERNEL);
				if (temp == NULL) {
					pr_alert("diag: Memory alloc fail\n");
					kfree(pkt_params);
					return;
				}
				for (j = 0; j < pkt_params->count; j++) {
					temp->cmd_code = msg->cmd_code;
					temp->subsys_id = msg->subsysid;
					temp->client_id = proc_num;
					temp->proc_id = proc_num;
					temp->cmd_code_lo = range->cmd_code_lo;
					temp->cmd_code_hi = range->cmd_code_hi;
					range++;
					temp++;
				}
				temp -= pkt_params->count;
				pkt_params->params = temp;
				flag = 1;
				diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
						 (unsigned long)pkt_params);
				kfree(temp);
			}
			buf = buf + HDR_SIZ + data_len;
		}
	}
	kfree(pkt_params);
	if (flag) {
		/* Poll SMD CNTL channels to check for data */
		if (proc_num == MODEM_PROC)
			diag_smd_cntl_notify(NULL, SMD_EVENT_DATA);
		else if (proc_num == QDSP_PROC)
			diag_smd_qdsp_cntl_notify(NULL, SMD_EVENT_DATA);
		else if (proc_num == WCNSS_PROC)
			diag_smd_wcnss_cntl_notify(NULL, SMD_EVENT_DATA);
	}
}
Exemple #3
0
static int diagchar_ioctl(struct inode *inode, struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;

		for (i = 0; i < diag_max_registration; i++) {
			if (driver->table[i].process_id == 0) {
				success = 1;
				driver->table[i].cmd_code =
					pkt_params->params->cmd_code;
				driver->table[i].subsys_id =
					pkt_params->params->subsys_id;
				driver->table[i].cmd_code_lo =
					pkt_params->params->cmd_code_lo;
				driver->table[i].cmd_code_hi =
					pkt_params->params->cmd_code_hi;
				driver->table[i].process_id = current->tgid;
				count_entries++;
				if (pkt_params->count > count_entries)
					pkt_params->params++;
				else
					return success;
			}
		}
		if (i < diag_threshold_registration) {
			/* Increase table size by amount required */
			diag_max_registration += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_registration > diag_threshold_registration)
				diag_max_registration =
						 diag_threshold_registration;
			driver->table = krealloc(driver->table,
					 diag_max_registration*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			for (j = i; j < diag_max_registration; j++) {
				success = 1;
				driver->table[j].cmd_code = pkt_params->
							params->cmd_code;
				driver->table[j].subsys_id = pkt_params->
							params->subsys_id;
				driver->table[j].cmd_code_lo = pkt_params->
							params->cmd_code_lo;
				driver->table[j].cmd_code_hi = pkt_params->
							params->cmd_code_hi;
				driver->table[j].process_id = current->tgid;
				count_entries++;
				if (pkt_params->count > count_entries)
					pkt_params->params++;
				else
					return success;
			}
		} else
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);

		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_qdsp_1 = 1;
			driver->in_busy_qdsp_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE)
			diagfwd_disconnect();
		else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE)
			diagfwd_connect();
		else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_qdsp_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
		} else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
								== USB_MODE)
			diagfwd_connect();
#endif /* DIAG over USB */
		success = 1;
	}

	return success;
}
static void diag_build_time_mask_update(uint8_t *buf,
					struct diag_ssid_range_t *range)
{
	int i;
	int j;
	int num_items = 0;
	int err = 0;
	int found = 0;
	int new_size = 0;
	uint8_t *temp = NULL;
	uint32_t *mask_ptr = (uint32_t *)buf;
	uint32_t *dest_ptr = NULL;
	struct diag_msg_mask_t *build_mask = NULL;

	if (!range || !buf)
		return;

	if (range->ssid_last < range->ssid_first) {
		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
		       __func__, range->ssid_first, range->ssid_last);
		return;
	}

	build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
	num_items = range->ssid_last - range->ssid_first + 1;

	mutex_lock(&driver->build_time_mask->lock);
	for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
		if (build_mask->ssid_first != range->ssid_first)
			continue;
		found = 1;
		err = update_msg_mask_tbl_entry(build_mask, range);
		if (err == -ENOMEM) {
			pr_err("diag: In %s, unable to increase the msg build mask table range\n",
			       __func__);
		}
		dest_ptr = build_mask->ptr;
		for (j = 0; j < build_mask->range; j++, mask_ptr++, dest_ptr++)
			*(uint32_t *)dest_ptr |= *mask_ptr;
		break;
	}

	if (found)
		goto end;
	new_size = (driver->msg_mask_tbl_count + 1) *
		   sizeof(struct diag_msg_mask_t);
	temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
	if (!temp) {
		pr_err("diag: In %s, unable to create a new entry for build time mask\n",
		       __func__);
		goto end;
	}
	driver->build_time_mask->ptr = temp;
	err = diag_create_msg_mask_table_entry(build_mask, range);
	if (err) {
		pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
		       __func__, err);
		goto end;
	}
	driver->msg_mask_tbl_count += 1;
end:
	mutex_unlock(&driver->build_time_mask->lock);
}
Exemple #5
0
/*
 * This function replicates the directory structure up-to given dentry
 * in the bindex branch.
 */
struct dentry *create_parents(struct inode *dir, struct dentry *dentry,
			      const char *name, int bindex)
{
	int err;
	struct dentry *child_dentry;
	struct dentry *parent_dentry;
	struct dentry *lower_parent_dentry = NULL;
	struct dentry *lower_dentry = NULL;
	const char *childname;
	unsigned int childnamelen;
	int nr_dentry;
	int count = 0;
	int old_bstart;
	int old_bend;
	struct dentry **path = NULL;
	struct super_block *sb;

	verify_locked(dentry);

	err = is_robranch_super(dir->i_sb, bindex);
	if (err) {
		lower_dentry = ERR_PTR(err);
		goto out;
	}

	old_bstart = dbstart(dentry);
	old_bend = dbend(dentry);

	lower_dentry = ERR_PTR(-ENOMEM);

	/* There is no sense allocating any less than the minimum. */
	nr_dentry = 1;
	path = kmalloc(nr_dentry * sizeof(struct dentry *), GFP_KERNEL);
	if (unlikely(!path))
		goto out;

	/* assume the negative dentry of unionfs as the parent dentry */
	parent_dentry = dentry;

	/*
	 * This loop finds the first parent that exists in the given branch.
	 * We start building the directory structure from there.  At the end
	 * of the loop, the following should hold:
	 *  - child_dentry is the first nonexistent child
	 *  - parent_dentry is the first existent parent
	 *  - path[0] is the = deepest child
	 *  - path[count] is the first child to create
	 */
	do {
		child_dentry = parent_dentry;

		/* find the parent directory dentry in unionfs */
		parent_dentry = dget_parent(child_dentry);

		/* find out the lower_parent_dentry in the given branch */
		lower_parent_dentry =
			unionfs_lower_dentry_idx(parent_dentry, bindex);

		/* grow path table */
		if (count == nr_dentry) {
			void *p;

			nr_dentry *= 2;
			p = krealloc(path, nr_dentry * sizeof(struct dentry *),
				     GFP_KERNEL);
			if (unlikely(!p)) {
				lower_dentry = ERR_PTR(-ENOMEM);
				goto out;
			}
			path = p;
		}

		/* store the child dentry */
		path[count++] = child_dentry;
	} while (!lower_parent_dentry);
	count--;

	sb = dentry->d_sb;

	/*
	 * This code goes between the begin/end labels and basically
	 * emulates a while(child_dentry != dentry), only cleaner and
	 * shorter than what would be a much longer while loop.
	 */
begin:
	/* get lower parent dir in the current branch */
	lower_parent_dentry = unionfs_lower_dentry_idx(parent_dentry, bindex);
	dput(parent_dentry);

	/* init the values to lookup */
	childname = child_dentry->d_name.name;
	childnamelen = child_dentry->d_name.len;

	if (child_dentry != dentry) {
		/* lookup child in the underlying file system */
		lower_dentry = lookup_one_len(childname, lower_parent_dentry,
					      childnamelen);
		if (IS_ERR(lower_dentry))
			goto out;
	} else {
		/*
		 * Is the name a whiteout of the child name ?  lookup the
		 * whiteout child in the underlying file system
		 */
		lower_dentry = lookup_one_len(name, lower_parent_dentry,
					      strlen(name));
		if (IS_ERR(lower_dentry))
			goto out;

		/* Replace the current dentry (if any) with the new one */
		dput(unionfs_lower_dentry_idx(dentry, bindex));
		unionfs_set_lower_dentry_idx(dentry, bindex,
					     lower_dentry);

		__cleanup_dentry(dentry, bindex, old_bstart, old_bend);
		goto out;
	}

	if (lower_dentry->d_inode) {
		/*
		 * since this already exists we dput to avoid
		 * multiple references on the same dentry
		 */
		dput(lower_dentry);
	} else {
		struct sioq_args args;

		/* it's a negative dentry, create a new dir */
		lower_parent_dentry = lock_parent(lower_dentry);

		args.mkdir.parent = lower_parent_dentry->d_inode;
		args.mkdir.dentry = lower_dentry;
		args.mkdir.mode = child_dentry->d_inode->i_mode;

		run_sioq(__unionfs_mkdir, &args);
		err = args.err;

		if (!err)
			err = copyup_permissions(dir->i_sb, child_dentry,
						 lower_dentry);
		unlock_dir(lower_parent_dentry);
		if (err) {
			dput(lower_dentry);
			lower_dentry = ERR_PTR(err);
			goto out;
		}

	}

	__set_inode(child_dentry, lower_dentry, bindex);
	__set_dentry(child_dentry, lower_dentry, bindex);
	/*
	 * update times of this dentry, but also the parent, because if
	 * we changed, the parent may have changed too.
	 */
	fsstack_copy_attr_times(parent_dentry->d_inode,
				lower_parent_dentry->d_inode);
	unionfs_copy_attr_times(child_dentry->d_inode);

	parent_dentry = child_dentry;
	child_dentry = path[--count];
	goto begin;
out:
	/* cleanup any leftover locks from the do/while loop above */
	if (IS_ERR(lower_dentry))
		while (count)
			dput(path[count--]);
	kfree(path);
	return lower_dentry;
}
Exemple #6
0
/**
 * sync_file_merge() - merge two sync_files
 * @name:	name of new fence
 * @a:		sync_file a
 * @b:		sync_file b
 *
 * Creates a new sync_file which contains copies of all the fences in both
 * @a and @b.  @a and @b remain valid, independent sync_file. Returns the
 * new merged sync_file or NULL in case of error.
 */
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
					 struct sync_file *b)
{
	struct sync_file *sync_file;
	struct dma_fence **fences, **nfences, **a_fences, **b_fences;
	int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;

	sync_file = sync_file_alloc();
	if (!sync_file)
		return NULL;

	a_fences = get_fences(a, &a_num_fences);
	b_fences = get_fences(b, &b_num_fences);
	if (a_num_fences > INT_MAX - b_num_fences)
		return NULL;

	num_fences = a_num_fences + b_num_fences;

	fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
	if (!fences)
		goto err;

	/*
	 * Assume sync_file a and b are both ordered and have no
	 * duplicates with the same context.
	 *
	 * If a sync_file can only be created with sync_file_merge
	 * and sync_file_create, this is a reasonable assumption.
	 */
	for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
		struct dma_fence *pt_a = a_fences[i_a];
		struct dma_fence *pt_b = b_fences[i_b];

		if (pt_a->context < pt_b->context) {
			add_fence(fences, &i, pt_a);

			i_a++;
		} else if (pt_a->context > pt_b->context) {
			add_fence(fences, &i, pt_b);

			i_b++;
		} else {
			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
				add_fence(fences, &i, pt_a);
			else
				add_fence(fences, &i, pt_b);

			i_a++;
			i_b++;
		}
	}

	for (; i_a < a_num_fences; i_a++)
		add_fence(fences, &i, a_fences[i_a]);

	for (; i_b < b_num_fences; i_b++)
		add_fence(fences, &i, b_fences[i_b]);

	if (i == 0)
		fences[i++] = dma_fence_get(a_fences[0]);

	if (num_fences > i) {
		nfences = krealloc(fences, i * sizeof(*fences),
				  GFP_KERNEL);
		if (!nfences)
			goto err;

		fences = nfences;
	}

	if (sync_file_set_fence(sync_file, fences, i) < 0) {
		kfree(fences);
		goto err;
	}

	strlcpy(sync_file->name, name, sizeof(sync_file->name));
	return sync_file;

err:
	fput(sync_file->file);
	return NULL;

}
Exemple #7
0
int ath9k_wiphy_add(struct ath_softc *sc)
{
	int i, error;
	struct ath_wiphy *aphy;
	struct ieee80211_hw *hw;
	u8 addr[ETH_ALEN];

	hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
	if (hw == NULL)
		return -ENOMEM;

	spin_lock_bh(&sc->wiphy_lock);
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		if (sc->sec_wiphy[i] == NULL)
			break;
	}

	if (i == sc->num_sec_wiphy) {
		/* No empty slot available; increase array length */
		struct ath_wiphy **n;
		n = krealloc(sc->sec_wiphy,
			     (sc->num_sec_wiphy + 1) *
			     sizeof(struct ath_wiphy *),
			     GFP_ATOMIC);
		if (n == NULL) {
			spin_unlock_bh(&sc->wiphy_lock);
			ieee80211_free_hw(hw);
			return -ENOMEM;
		}
		n[i] = NULL;
		sc->sec_wiphy = n;
		sc->num_sec_wiphy++;
	}

	SET_IEEE80211_DEV(hw, sc->dev);

	aphy = hw->priv;
	aphy->sc = sc;
	aphy->hw = hw;
	sc->sec_wiphy[i] = aphy;
	spin_unlock_bh(&sc->wiphy_lock);

	memcpy(addr, sc->sc_ah->macaddr, ETH_ALEN);
	addr[0] |= 0x02; /* Locally managed address */
	/*
	 * XOR virtual wiphy index into the least significant bits to generate
	 * a different MAC address for each virtual wiphy.
	 */
	addr[5] ^= i & 0xff;
	addr[4] ^= (i & 0xff00) >> 8;
	addr[3] ^= (i & 0xff0000) >> 16;

	SET_IEEE80211_PERM_ADDR(hw, addr);

	ath_set_hw_capab(sc, hw);

	error = ieee80211_register_hw(hw);

	if (error == 0) {
		/* Make sure wiphy scheduler is started (if enabled) */
		ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
	}

	return error;
}
Exemple #8
0
/* FIXME: prototype for adding security */
int wusb_dev_sec_add(struct wusbhc *wusbhc,
		     struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
{
	int result, bytes, secd_size;
	struct device *dev = &usb_dev->dev;
	struct usb_security_descriptor *secd;
	const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
	const void *itr, *top;
	char buf[64];

	secd = kmalloc(sizeof(*secd), GFP_KERNEL);
	if (secd == NULL) {
		result = -ENOMEM;
		goto out;
	}

	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
				    0, secd, sizeof(*secd));
	if (result < sizeof(*secd)) {
		dev_err(dev, "Can't read security descriptor or "
			"not enough data: %d\n", result);
		goto out;
	}
	secd_size = le16_to_cpu(secd->wTotalLength);
	secd = krealloc(secd, secd_size, GFP_KERNEL);
	if (secd == NULL) {
		dev_err(dev, "Can't allocate space for security descriptors\n");
		goto out;
	}
	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
				    0, secd, secd_size);
	if (result < secd_size) {
		dev_err(dev, "Can't read security descriptor or "
			"not enough data: %d\n", result);
		goto out;
	}
	bytes = 0;
	itr = &secd[1];
	top = (void *)secd + result;
	while (itr < top) {
		etd = itr;
		if (top - itr < sizeof(*etd)) {
			dev_err(dev, "BUG: bad device security descriptor; "
				"not enough data (%zu vs %zu bytes left)\n",
				top - itr, sizeof(*etd));
			break;
		}
		if (etd->bLength < sizeof(*etd)) {
			dev_err(dev, "BUG: bad device encryption descriptor; "
				"descriptor is too short "
				"(%u vs %zu needed)\n",
				etd->bLength, sizeof(*etd));
			break;
		}
		itr += etd->bLength;
		bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
				  "%s (0x%02x/%02x) ",
				  wusb_et_name(etd->bEncryptionType),
				  etd->bEncryptionValue, etd->bAuthKeyIndex);
		if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
			ccm1_etd = etd;
	}
	/* This code only supports CCM1 as of now. */
	/* FIXME: user has to choose which sec mode to use?
	 * In theory we want CCM */
	if (ccm1_etd == NULL) {
		dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
			"can't use!\n");
		result = -EINVAL;
		goto out;
	}
	wusb_dev->ccm1_etd = *ccm1_etd;
	dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
		buf, wusb_et_name(ccm1_etd->bEncryptionType),
		ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
	result = 0;
out:
	kfree(secd);
	return result;
}
Exemple #9
0
int reservation_object_get_fences_rcu(struct reservation_object *obj,
				      struct fence **pfence_excl,
				      unsigned *pshared_count,
				      struct fence ***pshared)
{
	unsigned shared_count = 0;
	unsigned retry = 1;
	struct fence **shared = NULL, *fence_excl = NULL;
	int ret = 0;

	while (retry) {
		struct reservation_object_list *fobj;
		unsigned seq;

		seq = read_seqcount_begin(&obj->seq);

		rcu_read_lock();

		fobj = rcu_dereference(obj->fence);
		if (fobj) {
			struct fence **nshared;
			size_t sz = sizeof(*shared) * fobj->shared_max;

			nshared = krealloc(shared, sz,
					   GFP_NOWAIT | __GFP_NOWARN);
			if (!nshared) {
				rcu_read_unlock();
				nshared = krealloc(shared, sz, GFP_KERNEL);
				if (nshared) {
					shared = nshared;
					continue;
				}

				ret = -ENOMEM;
				shared_count = 0;
				break;
			}
			shared = nshared;
			memcpy(shared, fobj->shared, sz);
			shared_count = fobj->shared_count;
		} else
			shared_count = 0;
		fence_excl = rcu_dereference(obj->fence_excl);

		retry = read_seqcount_retry(&obj->seq, seq);
		if (retry)
			goto unlock;

		if (!fence_excl || fence_get_rcu(fence_excl)) {
			unsigned i;

			for (i = 0; i < shared_count; ++i) {
				if (fence_get_rcu(shared[i]))
					continue;

				/* uh oh, refcount failed, abort and retry */
				while (i--)
					fence_put(shared[i]);

				if (fence_excl) {
					fence_put(fence_excl);
					fence_excl = NULL;
				}

				retry = 1;
				break;
			}
		} else
			retry = 1;

unlock:
		rcu_read_unlock();
	}
	*pshared_count = shared_count;
	if (shared_count)
		*pshared = shared;
	else {
		*pshared = NULL;
		kfree(shared);
	}
	*pfence_excl = fence_excl;

	return ret;
}
void __diag_smd_send_req(void)
{
	void *buf = NULL;
	int *in_busy_ptr = NULL;
	struct diag_request *write_ptr_modem = NULL;
#if  DIAG_XPST
	int type;
#endif

#ifdef SDQXDM_DEBUG
	static struct timeval t0 = {0, 0}, t1;
	static int full = 0, empty = 0;
	long diff;
#endif

	if (!driver->in_busy_1) {
		buf = driver->buf_in_1;
		write_ptr_modem = driver->write_ptr_1;
		in_busy_ptr = &(driver->in_busy_1);
	} else if (!driver->in_busy_2) {
		buf = driver->buf_in_2;
		write_ptr_modem = driver->write_ptr_2;
		in_busy_ptr = &(driver->in_busy_2);
	}

	if (driver->ch && buf) {
		int r = smd_read_avail(driver->ch);

		if (r > IN_BUF_SIZE) {
			if (r < MAX_IN_BUF_SIZE) {
				DIAGFWD_INFO("\n diag: SMD sending in "
						   "packets upto %d bytes", r);
				buf = krealloc(buf, r, GFP_KERNEL);
			} else {
				DIAGFWD_ERR("\n diag: SMD sending in "
				"packets more than %d bytes", MAX_IN_BUF_SIZE);
				return;
			}
		}
		if (r > 0) {
			if (!buf)
				DIAGFWD_INFO("Out of diagmem for a9\n");
			else {
				//APPEND_DEBUG('i');
				smd_read(driver->ch, buf, r);

				if (diag7k_debug_mask) {
					switch (diag7k_debug_mask) {
					case 1:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 7K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						break;
					case 2:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 7K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 7K(last 16 bytes) ", DUMP_PREFIX_ADDRESS, 16, 1, buf+r-16, 16, 1);
						break;
					default:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 7K ", DUMP_PREFIX_ADDRESS, 16, 1, buf, r, 1);

					}
				}

#if DIAG_XPST
				type = checkcmd_modem_epst(buf);
				if (type) {
					modem_to_userspace(buf, r, type, 0);
					return;
				}
#endif

#ifdef SDQXDM_DEBUG
				if (full) {
					pr_err("[diag-dbg] buffer become available %d %d, read %d\n", driver->in_busy_1, driver->in_busy_2, r);
					full = 0;
				}
				do_gettimeofday(&t1);
				diff = (t1.tv_sec-t0.tv_sec)*1000 + (t1.tv_usec-t0.tv_usec)/1000;
				if (diff > 1000) {
					pr_err("[diag-dbg] Over time (%ld) %ld.%04ld -> %ld.%04ld empty = %d\n", diff, (long)t0.tv_sec, t0.tv_usec/1000, (long)t1.tv_sec, t1.tv_usec/1000, empty);
				}
				write_ptr_modem->second = t1.tv_sec;
				t0 = t1;
				empty = 0;
#endif

				//APPEND_DEBUG('j');
				write_ptr_modem->length = r;
				*in_busy_ptr = 1;
				diag_device_write(buf, MODEM_DATA,
							 write_ptr_modem);
			}
		}
#ifdef SDQXDM_DEBUG
		else {
			empty++;
		}
#endif
	}
	else {
#ifdef SDQXDM_DEBUG
		if (!full && driver->ch)
			pr_info("[diag-dbg] Buffer full, %d bytes pending.\n", smd_read_avail(driver->ch));
		full = 1;
#endif
		wake_lock_timeout(&driver->wake_lock, HZ);
	}
}
int usbmsc_bindlun(FAR void *handle, FAR const char *drvrpath,
                   unsigned int lunno, off_t startsector, size_t nsectors,
                   bool readonly)
{
  FAR struct usbmsc_alloc_s *alloc = (FAR struct usbmsc_alloc_s *)handle;
  FAR struct usbmsc_dev_s *priv;
  FAR struct usbmsc_lun_s *lun;
  FAR struct inode *inode;
  struct geometry geo;
  int ret;

#ifdef CONFIG_DEBUG
  if (!alloc || !drvrpath || startsector < 0 || nsectors < 0)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_BINLUNINVALIDARGS1), 0);
      return -EINVAL;
    }
#endif

  priv = &alloc->dev;

#ifdef CONFIG_DEBUG
  if (!priv->luntab)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_INTERNALCONFUSION1), 0);
      return -EIO;
    }

  if (lunno > priv->nluns)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_BINDLUNINVALIDARGS2), 0);
      return -EINVAL;
    }
#endif

  lun = &priv->luntab[lunno];

#ifdef CONFIG_DEBUG
  if (lun->inode != NULL)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_LUNALREADYBOUND), 0);
      return -EBUSY;
    }
#endif

  /* Open the block driver */

  ret = open_blockdriver(drvrpath, 0, &inode);
  if (ret < 0)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_BLKDRVEOPEN), 0);
      return ret;
    }

  /* Get the drive geometry */

  if (!inode || !inode->u.i_bops || !inode->u.i_bops->geometry ||
      inode->u.i_bops->geometry(inode, &geo) != OK || !geo.geo_available)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_NOGEOMETRY), 0);
      return -ENODEV;
    }

  /* Verify that the partition parameters are valid */

  if (startsector >= geo.geo_nsectors)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_BINDLUNINVALIDARGS3), 0);
      return -EDOM;
    }
  else if (nsectors == 0)
    {
      nsectors = geo.geo_nsectors - startsector;
    }
  else if (startsector + nsectors >= geo.geo_nsectors)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_BINDLUNINVALIDARGS4), 0);
      return -EDOM;
    }

  /* Initialize the LUN structure */

  memset(lun, 0, sizeof(struct usbmsc_lun_s *));

  /* Allocate an I/O buffer big enough to hold one hardware sector.  SCSI commands
   * are processed one at a time so all LUNs may share a single I/O buffer.  The
   * I/O buffer will be allocated so that is it as large as the largest block
   * device sector size
   */

  if (!priv->iobuffer)
    {
      priv->iobuffer = (uint8_t*)kmalloc(geo.geo_sectorsize);
      if (!priv->iobuffer)
        {
          usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_ALLOCIOBUFFER), geo.geo_sectorsize);
          return -ENOMEM;
        }

      priv->iosize = geo.geo_sectorsize;
    }
  else if (priv->iosize < geo.geo_sectorsize)
    {
      void *tmp;
      tmp = (uint8_t*)krealloc(priv->iobuffer, geo.geo_sectorsize);
      if (!tmp)
        {
          usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_REALLOCIOBUFFER), geo.geo_sectorsize);
          return -ENOMEM;
        }

      priv->iobuffer = (uint8_t*)tmp;
      priv->iosize   = geo.geo_sectorsize;
    }

  lun->inode       = inode;
  lun->startsector = startsector;
  lun->nsectors    = nsectors;
  lun->sectorsize  = geo.geo_sectorsize;

  /* If the driver does not support the write method, then this is read-only */

  if (!inode->u.i_bops->write)
    {
      lun->readonly = true;
    }

  return OK;
}
void __diag_smd_qdsp_send_req(void)
{
	void *buf = NULL;
	int *in_busy_qdsp_ptr = NULL;
	struct diag_request *write_ptr_qdsp = NULL;
#if  DIAG_XPST
	int type;
#endif
	if (!driver->in_busy_qdsp_1) {
		buf = driver->buf_in_qdsp_1;
		write_ptr_qdsp = driver->write_ptr_qdsp_1;
		in_busy_qdsp_ptr = &(driver->in_busy_qdsp_1);
	} else if (!driver->in_busy_qdsp_2) {
		buf = driver->buf_in_qdsp_2;
		write_ptr_qdsp = driver->write_ptr_qdsp_2;
		in_busy_qdsp_ptr = &(driver->in_busy_qdsp_2);
	}

	if (driver->chqdsp && buf) {
		int r = smd_read_avail(driver->chqdsp);

		if (r > IN_BUF_SIZE) {
			if (r < MAX_IN_BUF_SIZE) {
				DIAGFWD_INFO("\n diag: SMD sending in "
						   "packets upto %d bytes", r);
				buf = krealloc(buf, r, GFP_KERNEL);
			} else {
				DIAGFWD_ERR("\n diag: SMD sending in "
				"packets more than %d bytes", MAX_IN_BUF_SIZE);
			return;
		}
		}
		if (r > 0) {
			if (!buf)
				DIAGFWD_INFO("Out of diagmem for QDSP\n");
			else {
			//	APPEND_DEBUG('i');
					smd_read(driver->chqdsp, buf, r);

				if (diag7k_debug_mask) {
					switch (diag7k_debug_mask) {
					case 1:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from qdsp(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						break;
					case 2:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from qdsp(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from qdsp(last 16 bytes) ", DUMP_PREFIX_ADDRESS, 16, 1, buf+r-16, 16, 1);
						break;
					default:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from qdsp ", DUMP_PREFIX_ADDRESS, 16, 1, buf, r, 1);

					}
				}
#if  DIAG_XPST

		type = checkcmd_modem_epst(buf);
		if (type) {
			modem_to_userspace(buf, r, type, 0);
			return;
		}

#endif
			//	APPEND_DEBUG('j');
				write_ptr_qdsp->length = r;
				*in_busy_qdsp_ptr = 1;
				diag_device_write(buf, QDSP_DATA,
							 write_ptr_qdsp);
			}
		}
	}
}
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;
	void *temp_buf;
	uint16_t support_list = 0;
	struct diag_dci_client_tbl *params =
		kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
	struct diag_dci_health_stats stats;
	int status;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			diag_max_reg += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				diag_max_reg -= pkt_params->count -
							 count_entries;
				pr_alert("diag: Insufficient memory for reg.");
				mutex_unlock(&driver->diagchar_mutex);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_DCI_REG) {
		if (driver->dci_state == DIAG_DCI_NO_REG)
			return DIAG_DCI_NO_REG;
		if (driver->num_dci_client >= MAX_DCI_CLIENTS)
			return DIAG_DCI_NO_REG;
		if (copy_from_user(params, (void *)ioarg,
				 sizeof(struct diag_dci_client_tbl)))
			return -EFAULT;
		mutex_lock(&driver->dci_mutex);
		if (!(driver->num_dci_client))
			driver->in_busy_dci = 0;
		driver->num_dci_client++;
		pr_debug("diag: id = %d\n", driver->dci_client_id);
		driver->dci_client_id++;
		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
			if (driver->dci_client_tbl[i].client == NULL) {
				driver->dci_client_tbl[i].client = current;
				driver->dci_client_tbl[i].list =
							 params->list;
				driver->dci_client_tbl[i].signal_type =
					 params->signal_type;
				create_dci_log_mask_tbl(driver->
					dci_client_tbl[i].dci_log_mask);
				create_dci_event_mask_tbl(driver->
					dci_client_tbl[i].dci_event_mask);
				driver->dci_client_tbl[i].data_len = 0;
				driver->dci_client_tbl[i].dci_data =
					 kzalloc(IN_BUF_SIZE, GFP_KERNEL);
				driver->dci_client_tbl[i].total_capacity =
								 IN_BUF_SIZE;
				driver->dci_client_tbl[i].dropped_logs = 0;
				driver->dci_client_tbl[i].dropped_events = 0;
				driver->dci_client_tbl[i].received_logs = 0;
				driver->dci_client_tbl[i].received_events = 0;
				break;
			}
		}
		mutex_unlock(&driver->dci_mutex);
		return driver->dci_client_id;
	} else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
		success = -1;
		/* Delete this process from DCI table */
		mutex_lock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++)
			if (driver->req_tracking_tbl[i].pid == current->tgid)
				driver->req_tracking_tbl[i].pid = 0;
		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
			if (driver->dci_client_tbl[i].client &&
			driver->dci_client_tbl[i].client->tgid ==
							 current->tgid) {
				driver->dci_client_tbl[i].client = NULL;
				success = i;
				break;
			}
		}
		if (success >= 0)
			driver->num_dci_client--;
		mutex_unlock(&driver->dci_mutex);
		return success;
	} else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
		if (driver->ch_dci)
			support_list = support_list | DIAG_CON_MPSS;
		*(uint16_t *)ioarg = support_list;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_DCI_HEALTH_STATS) {
		if (copy_from_user(&stats, (void *)ioarg,
				 sizeof(struct diag_dci_health_stats)))
			return -EFAULT;
		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
			params = &(driver->dci_client_tbl[i]);
			if (params->client &&
				params->client->tgid == current->tgid) {
				stats.dropped_logs = params->dropped_logs;
				stats.dropped_events = params->dropped_events;
				stats.received_logs = params->received_logs;
				stats.received_events = params->received_events;
				if (stats.reset_status) {
					params->dropped_logs = 0;
					params->dropped_events = 0;
					params->received_logs = 0;
					params->received_events = 0;
				}
				break;
			}
		}
		if (copy_to_user((void *)ioarg, &stats,
				   sizeof(struct diag_dci_health_stats)))
			return -EFAULT;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (temp == driver->logging_mode) {
			mutex_unlock(&driver->diagchar_mutex);
			pr_alert("diag: forbidden logging change requested\n");
			return 0;
		}
		if (driver->logging_mode == MEMORY_DEVICE_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 1;
			if (driver->socket_process) {
				/*
				 * Notify the socket logging process that we
				 * are switching to MEMORY_DEVICE_MODE
				 */
				status = send_sig(SIGCONT,
					 driver->socket_process, 0);
				if (status) {
					pr_err("diag: %s, Error notifying ",
						__func__);
					pr_err("socket process, status: %d\n",
						status);
				}
			}
		}
		if (driver->logging_mode == SOCKET_MODE)
			driver->socket_process = current;
		if (driver->logging_mode == CALLBACK_MODE)
			driver->callback_process = current;
		if (driver->logging_mode == UART_MODE ||
			driver->logging_mode == SOCKET_MODE ||
			driver->logging_mode == CALLBACK_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_lpass_1 = 1;
			driver->in_busy_lpass_2 = 1;
			driver->in_busy_wcnss_1 = 1;
			driver->in_busy_wcnss_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
			diag_clear_hsic_tbl();
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_lpass_1 = 0;
			driver->in_busy_lpass_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chlpass)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_lpass_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE) {
			diagfwd_disconnect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_lpass_1 = 0;
			driver->in_busy_lpass_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;

			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chlpass)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_lpass_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == MEMORY_DEVICE_MODE &&
				 driver->logging_mode == USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diag_clear_hsic_tbl();
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		}
#endif /* DIAG over USB */
		success = 1;
	} else if (iocmd == DIAG_IOCTL_REMOTE_DEV) {
		uint16_t remote_dev = diag_get_remote_device_mask();

		if (copy_to_user((void *)ioarg, &remote_dev, sizeof(uint16_t)))
			success = -EFAULT;
		else
			success = 1;
	}

	return success;
}
static int etm_probe(struct platform_device *pdev)
{
	static int first_device = 0;
	struct etm_driver_data *data = dev_get_platdata(&pdev->dev);
	int ret = 0;
	void __iomem **new_regs;
	struct etm_info *new_info;
	int new_count;
	u32 id, config_code, config_code_extension, system_config;

	mutex_lock(&tracer.mutex);

	new_count = tracer.nr_etm_regs + 1;
	new_regs = krealloc(tracer.etm_regs,
			sizeof(tracer.etm_regs[0]) * new_count, GFP_KERNEL);
	if (!new_regs) {
		pr_err("Failed to allocate ETM register array\n");
		ret = -ENOMEM;
		goto out;
	}
	tracer.etm_regs = new_regs;
	new_info = krealloc(tracer.etm_info,
			sizeof(tracer.etm_info[0]) * new_count, GFP_KERNEL);
	if (!new_info) {
		pr_err("Failed to allocate ETM info array\n");
		ret = -ENOMEM;
		goto out;
	}
	tracer.etm_info = new_info;

	tracer.etm_regs[tracer.nr_etm_regs] = data->etm_regs;

	if (!first_device) {
		first_device = 1;

		if (unlikely((ret = misc_register(&etm_device)) != 0)) {
			pr_err("Fail to register etm device\n");
			goto out;
		}

		if (unlikely((ret = create_files()) != 0)) {
			pr_err("Fail to create device files\n");
			goto deregister;
		}

	} 

	memset(&(tracer.etm_info[tracer.nr_etm_regs]), 0, sizeof(struct etm_info));
	tracer.etm_info[tracer.nr_etm_regs].enable = 1;
	tracer.etm_info[tracer.nr_etm_regs].is_ptm = data->is_ptm;
	tracer.etm_info[tracer.nr_etm_regs].pwr_down = data->pwr_down;

	id = etm_readl(&tracer, tracer.nr_etm_regs, ETMIDR);
	config_code = etm_readl(&tracer, tracer.nr_etm_regs, ETMCCR);
	config_code_extension = etm_readl(&tracer, tracer.nr_etm_regs, ETMCCER);
	system_config = etm_readl(&tracer, tracer.nr_etm_regs, ETMSCR);
 
	tracer.nr_etm_regs = new_count;

out:
	mutex_unlock(&tracer.mutex);
	return ret;

deregister:
	misc_deregister(&etm_device);
	mutex_unlock(&tracer.mutex);
	return ret;
}
Exemple #15
0
/*
 * ima_collect_measurement - collect file measurement
 *
 * Calculate the file hash, if it doesn't already exist,
 * storing the measurement and i_version in the iint.
 *
 * Must be called with iint->mutex held.
 *
 * Return 0 on success, error code otherwise
 */
int ima_collect_measurement(struct integrity_iint_cache *iint,
			    struct file *file, void *buf, loff_t size,
			    enum hash_algo algo)
{
	const char *audit_cause = "failed";
	struct inode *inode = file_inode(file);
	const char *filename = file->f_path.dentry->d_name.name;
	int result = 0;
	int length;
	void *tmpbuf;
	u64 i_version;
	struct {
		struct ima_digest_data hdr;
		char digest[IMA_MAX_DIGEST_SIZE];
	} hash;

	if (iint->flags & IMA_COLLECTED)
		goto out;

	/*
	 * Dectecting file change is based on i_version. On filesystems
	 * which do not support i_version, support is limited to an initial
	 * measurement/appraisal/audit.
	 */
	i_version = inode_query_iversion(inode);
	hash.hdr.algo = algo;

	/* Initialize hash digest to 0's in case of failure */
	memset(&hash.digest, 0, sizeof(hash.digest));

	if (buf)
		result = ima_calc_buffer_hash(buf, size, &hash.hdr);
	else
		result = ima_calc_file_hash(file, &hash.hdr);

	if (result && result != -EBADF && result != -EINVAL)
		goto out;

	length = sizeof(hash.hdr) + hash.hdr.length;
	tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
	if (!tmpbuf) {
		result = -ENOMEM;
		goto out;
	}

	iint->ima_hash = tmpbuf;
	memcpy(iint->ima_hash, &hash, length);
	iint->version = i_version;

	/* Possibly temporary failure due to type of read (eg. O_DIRECT) */
	if (!result)
		iint->flags |= IMA_COLLECTED;
out:
	if (result) {
		if (file->f_flags & O_DIRECT)
			audit_cause = "failed(directio)";

		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
				    filename, "collect_data", audit_cause,
				    result, 0);
	}
	return result;
}
static int jpeg_probe(struct platform_device *pdev)
{

#ifdef CONFIG_OF

    int new_count;
    struct JpegDeviceStruct* jpegDev;
    struct device_node *node = NULL;

    new_count = nrJpegDevs + 1;
    gJpegqDevs = krealloc(gJpegqDevs, 
        sizeof(struct JpegDeviceStruct) * new_count, GFP_KERNEL);
    if (!gJpegqDevs) {
        dev_err(&pdev->dev, "Unable to allocate cam_isp_devs\n");
        return -ENOMEM;
    }

    jpegDev = &(gJpegqDevs[nrJpegDevs]);
    jpegDev->pDev = &pdev->dev;

    memset(&gJpegqDev, 0x0, sizeof(JpegDeviceStruct));

    node = of_find_compatible_node(NULL, NULL, "mediatek,JPGENC");
    jpegDev->encRegBaseVA = (unsigned long)of_iomap(node, 0);
    jpegDev->encIrqId = irq_of_parse_and_map(node, 0);

    node = of_find_compatible_node(NULL, NULL, "mediatek,JPGDEC");
    jpegDev->decRegBaseVA = (unsigned long)of_iomap(node, 0);
    jpegDev->decIrqId = irq_of_parse_and_map(node, 0);

    gJpegqDev = *jpegDev;

#else

    gJpegqDev.encRegBaseVA = (0L | 0xF7003000);
    gJpegqDev.decRegBaseVA = (0L | 0xF7004000);
    gJpegqDev.encIrqId = JPGENC_IRQ_BIT_ID;
    gJpegqDev.decIrqId = JPGDEC_IRQ_BIT_ID;

    gJpegqDev.pDev = &pdev->dev;

#endif

#ifdef JPEG_DEV    
    int ret;
    struct class_device *class_dev = NULL;
    
    JPEG_MSG("-------------jpeg driver probe-------\n");
    ret = alloc_chrdev_region(&jpeg_devno, 0, 1, JPEG_DEVNAME);

    if(ret)
    {
        JPEG_ERR("Error: Can't Get Major number for JPEG Device\n");
    }
    else
    {
        JPEG_MSG("Get JPEG Device Major number (%d)\n", jpeg_devno);
    }

    jpeg_cdev = cdev_alloc();
    jpeg_cdev->owner = THIS_MODULE;
    jpeg_cdev->ops = &jpeg_fops;

    ret = cdev_add(jpeg_cdev, jpeg_devno, 1);

    jpeg_class = class_create(THIS_MODULE, JPEG_DEVNAME);
    class_dev = (struct class_device *)device_create(jpeg_class, NULL, jpeg_devno, NULL, JPEG_DEVNAME);
#else
    
    proc_create("mtk_jpeg", 0, NULL, &jpeg_fops);

#endif

    spin_lock_init(&jpeg_dec_lock);
    spin_lock_init(&jpeg_enc_lock);

    // initial codec, register codec ISR
    dec_status = 0;
    enc_status = 0;
    _jpeg_dec_int_status = 0;
    _jpeg_enc_int_status = 0;
    _jpeg_dec_mode = 0;

#ifndef FPGA_VERSION

#ifdef JPEG_DEC_DRIVER
    init_waitqueue_head(&dec_wait_queue);
#endif    
    init_waitqueue_head(&enc_wait_queue);  
    
    //mt6575_irq_set_sens(MT6575_JPEG_CODEC_IRQ_ID, MT65xx_LEVEL_SENSITIVE);
    //mt6575_irq_set_polarity(MT6575_JPEG_CODEC_IRQ_ID, MT65xx_POLARITY_LOW);
    //mt6575_irq_unmask(MT6575_JPEG_CODEC_IRQ_ID);
    JPEG_MSG("request JPEG Encoder IRQ \n");
    enable_irq(gJpegqDev.encIrqId);
    if(request_irq(gJpegqDev.encIrqId, jpeg_drv_enc_isr, IRQF_TRIGGER_LOW, "jpeg_enc_driver" , NULL))
    //if(request_irq(JPGENC_IRQ_BIT_ID, jpeg_drv_enc_isr, /*IRQF_TRIGGER_RISING*/ IRQF_TRIGGER_HIGH, "jpeg_enc_driver" , NULL))
    //if(request_irq(JPGENC_IRQ_BIT_ID, jpeg_drv_enc_isr, IRQF_TRIGGER_RISING , "jpeg_enc_driver" , NULL))
    {
        JPEG_ERR("JPEG ENC Driver request irq failed\n");
    }

#ifdef JPEG_DEC_DRIVER    
    enable_irq(gJpegqDev.decIrqId);
    JPEG_MSG("request JPEG Decoder IRQ \n");
    //if(request_irq(JPGDEC_IRQ_BIT_ID, jpeg_drv_dec_isr, IRQF_TRIGGER_LOW, "jpeg_dec_driver" , NULL))
    //if(request_irq(JPGDEC_IRQ_BIT_ID, jpeg_drv_dec_isr, /*IRQF_TRIGGER_RISING*/ IRQF_TRIGGER_HIGH, "jpeg_dec_driver" , NULL))
    //if(request_irq(JPGDEC_IRQ_BIT_ID, jpeg_drv_dec_isr, IRQF_TRIGGER_RISING , "jpeg_dec_driver" , NULL))
    if(request_irq(gJpegqDev.decIrqId, jpeg_drv_dec_isr, IRQF_TRIGGER_FALLING , "jpeg_dec_driver" , NULL))
    {
        JPEG_ERR("JPEG DEC Driver request irq failed\n");
    }    
#endif    
    
#endif
    JPEG_MSG("JPEG Probe Done\n");

#ifdef JPEG_DEV    
    NOT_REFERENCED(class_dev);
#endif
    return 0;
}
Exemple #17
0
int softing_load_fw(const char *file, struct softing *card,
		__iomem uint8_t *dpram, unsigned int size, int offset)
{
	const struct firmware *fw;
	int ret;
	const uint8_t *mem, *end, *dat;
	uint16_t type, len;
	uint32_t addr;
	uint8_t *buf = NULL;
	int buflen = 0;
	int8_t type_end = 0;

	ret = request_firmware(&fw, file, &card->pdev->dev);
	if (ret < 0)
		return ret;
	dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
		", offset %c0x%04x\n",
		card->pdat->name, file, (unsigned int)fw->size,
		(offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
	/* parse the firmware */
	mem = fw->data;
	end = &mem[fw->size];
	/* look for header record */
	ret = fw_parse(&mem, &type, &addr, &len, &dat);
	if (ret < 0)
		goto failed;
	if (type != 0xffff)
		goto failed;
	if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
		ret = -EINVAL;
		goto failed;
	}
	/* ok, we had a header */
	while (mem < end) {
		ret = fw_parse(&mem, &type, &addr, &len, &dat);
		if (ret < 0)
			goto failed;
		if (type == 3) {
			/* start address, not used here */
			continue;
		} else if (type == 1) {
			/* eof */
			type_end = 1;
			break;
		} else if (type != 0) {
			ret = -EINVAL;
			goto failed;
		}

		if ((addr + len + offset) > size)
			goto failed;
		memcpy_toio(&dpram[addr + offset], dat, len);
		/* be sure to flush caches from IO space */
		mb();
		if (len > buflen) {
			/* align buflen */
			buflen = (len + (1024-1)) & ~(1024-1);
			buf = krealloc(buf, buflen, GFP_KERNEL);
			if (!buf) {
				ret = -ENOMEM;
				goto failed;
			}
		}
		/* verify record data */
		memcpy_fromio(buf, &dpram[addr + offset], len);
		if (memcmp(buf, dat, len)) {
			/* is not ok */
			dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
			ret = -EIO;
			goto failed;
		}
	}
	if (!type_end)
		/* no end record seen */
		goto failed;
	ret = 0;
failed:
	kfree(buf);
	release_firmware(fw);
	if (ret < 0)
		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
	return ret;
}
Exemple #18
0
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;
	void *temp_buf;
	uint16_t support_list = 0;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			diag_max_reg += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				diag_max_reg -= pkt_params->count -
							 count_entries;
				pr_alert("diag: Insufficient memory for reg.");
				mutex_unlock(&driver->diagchar_mutex);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_DCI_REG) {
		if (driver->dci_state == DIAG_DCI_NO_REG)
			return DIAG_DCI_NO_REG;
		/* use the 'list' later on to notify user space */
		if (driver->num_dci_client >= MAX_DCI_CLIENT)
			return DIAG_DCI_NO_REG;
		mutex_lock(&driver->dci_mutex);
		driver->num_dci_client++;
		pr_debug("diag: id = %d\n", driver->dci_client_id);
		driver->dci_client_id++;
		mutex_unlock(&driver->dci_mutex);
		return driver->dci_client_id;
	} else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
		success = -1;
		/* Delete this process from DCI table */
		mutex_lock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++) {
			if (driver->dci_tbl[i].pid == current->tgid) {
				pr_debug("diag: delete %d\n", current->tgid);
				driver->dci_tbl[i].pid = 0;
				success = i;
			}
		}
		/* if any registrations were deleted successfully OR a valid
		   client_id was sent in DEINIT call , then its DCI client */
		if (success >= 0 || ioarg)
			driver->num_dci_client--;
		driver->num_dci_client--;
		mutex_unlock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++)
			if (driver->dci_tbl[i].pid != 0)
				pr_debug("diag: PID = %d, UID = %d, tag = %d\n",
	driver->dci_tbl[i].pid, driver->dci_tbl[i].uid, driver->dci_tbl[i].tag);
		pr_debug("diag: complete deleting registrations\n");
		return success;
	} else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
		if (driver->ch_dci)
			support_list = support_list | DIAG_CON_MPSS;
		*(uint16_t *)ioarg = support_list;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (driver->logging_mode == MEMORY_DEVICE_MODE)
			driver->mask_check = 1;
		if (driver->logging_mode == UART_MODE) {
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_qdsp_1 = 1;
			driver->in_busy_qdsp_2 = 1;
			driver->in_busy_wcnss_1 = 1;
			driver->in_busy_wcnss_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
#ifdef CONFIG_DIAG_HSIC_PIPE
			driver->num_hsic_buf_tbl_entries = 0;
			for (i = 0; i < driver->poolsize_hsic_write; i++) {
				if (driver->hsic_buf_tbl[i].buf) {
					/* Return the buffer to the pool */
					diagmem_free(driver, (unsigned char *)
						(driver->hsic_buf_tbl[i].buf),
						POOL_TYPE_HSIC);
					driver->hsic_buf_tbl[i].buf = 0;
					driver->hsic_buf_tbl[i].length = 0;
				}
			}
			diagfwd_disconnect_hsic(0);
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_HSIC_PIPE
			driver->num_hsic_buf_tbl_entries = 0;
			for (i = 0; i < driver->poolsize_hsic_write; i++) {
				driver->hsic_buf_tbl[i].buf = 0;
				driver->hsic_buf_tbl[i].length = 0;
			}
			diagfwd_connect_hsic(0);
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE) {
			diagfwd_disconnect();
#ifdef CONFIG_DIAG_HSIC_PIPE
			diagfwd_disconnect_hsic(0);
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_HSIC_PIPE
			diagfwd_connect_hsic(0);
#endif
		} else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;

			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_HSIC_PIPE
			driver->num_hsic_buf_tbl_entries = 0;
			for (i = 0; i < driver->poolsize_hsic_write; i++) {
				driver->hsic_buf_tbl[i].buf = 0;
				driver->hsic_buf_tbl[i].length = 0;
			}
			diagfwd_cancel_hsic();
			diagfwd_connect_hsic(0);
#endif
		} else if (temp == MEMORY_DEVICE_MODE &&
				 driver->logging_mode == USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_HSIC_PIPE
			driver->num_hsic_buf_tbl_entries = 0;
			for (i = 0; i < driver->poolsize_hsic_write; i++) {
				if (driver->hsic_buf_tbl[i].buf) {
					/* Return the buffer to the pool */
					diagmem_free(driver, (unsigned char *)
						(driver->hsic_buf_tbl[i].buf),
						POOL_TYPE_HSIC);
					driver->hsic_buf_tbl[i].buf = 0;
					driver->hsic_buf_tbl[i].length = 0;
				}
			}
			diagfwd_cancel_hsic();
			diagfwd_connect_hsic(0);
#endif
		}
#endif /* DIAG over USB */
		success = 1;
	}

	return success;
}
Exemple #19
0
static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
				       unsigned table_count)
{
	struct exofs_sb_info *sbi = *psbi;
	struct osd_dev *fscb_od;
	struct osd_obj_id obj = {.partition = sbi->layout.s_pid,
				 .id = EXOFS_DEVTABLE_ID};
	struct exofs_device_table *dt;
	unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
					     sizeof(*dt);
	unsigned numdevs, i;
	int ret;

	dt = kmalloc(table_bytes, GFP_KERNEL);
	if (unlikely(!dt)) {
		EXOFS_ERR("ERROR: allocating %x bytes for device table\n",
			  table_bytes);
		return -ENOMEM;
	}

	fscb_od = sbi->layout.s_ods[0];
	sbi->layout.s_ods[0] = NULL;
	sbi->layout.s_numdevs = 0;
	ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes);
	if (unlikely(ret)) {
		EXOFS_ERR("ERROR: reading device table\n");
		goto out;
	}

	numdevs = le64_to_cpu(dt->dt_num_devices);
	if (unlikely(!numdevs)) {
		ret = -EINVAL;
		goto out;
	}
	WARN_ON(table_count != numdevs);

	ret = _read_and_match_data_map(sbi, numdevs, dt);
	if (unlikely(ret))
		goto out;

	if (likely(numdevs > 1)) {
		unsigned size = numdevs * sizeof(sbi->layout.s_ods[0]);

		sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL);
		if (unlikely(!sbi)) {
			ret = -ENOMEM;
			goto out;
		}
		memset(&sbi->layout.s_ods[1], 0,
		       size - sizeof(sbi->layout.s_ods[0]));
		*psbi = sbi;
	}

	for (i = 0; i < numdevs; i++) {
		struct exofs_fscb fscb;
		struct osd_dev_info odi;
		struct osd_dev *od;

		if (exofs_devs_2_odi(&dt->dt_dev_table[i], &odi)) {
			EXOFS_ERR("ERROR: Read all-zeros device entry\n");
			ret = -EINVAL;
			goto out;
		}

		printk(KERN_NOTICE "Add device[%d]: osd_name-%s\n",
		       i, odi.osdname);

		/* On all devices the device table is identical. The user can
		 * specify any one of the participating devices on the command
		 * line. We always keep them in device-table order.
		 */
		if (fscb_od && osduld_device_same(fscb_od, &odi)) {
			sbi->layout.s_ods[i] = fscb_od;
			++sbi->layout.s_numdevs;
			fscb_od = NULL;
			continue;
		}

		od = osduld_info_lookup(&odi);
		if (unlikely(IS_ERR(od))) {
			ret = PTR_ERR(od);
			EXOFS_ERR("ERROR: device requested is not found "
				  "osd_name-%s =>%d\n", odi.osdname, ret);
			goto out;
		}

		sbi->layout.s_ods[i] = od;
		++sbi->layout.s_numdevs;

		/* Read the fscb of the other devices to make sure the FS
		 * partition is there.
		 */
		ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb,
				      sizeof(fscb));
		if (unlikely(ret)) {
			EXOFS_ERR("ERROR: Malformed participating device "
				  "error reading fscb osd_name-%s\n",
				  odi.osdname);
			goto out;
		}

		/* TODO: verify other information is correct and FS-uuid
		 *	 matches. Benny what did you say about device table
		 *	 generation and old devices?
		 */
	}

out:
	kfree(dt);
	if (unlikely(!ret && fscb_od)) {
		EXOFS_ERR(
		      "ERROR: Bad device-table container device not present\n");
		osduld_put_device(fscb_od);
		ret = -EINVAL;
	}

	return ret;
}

/*
 * Read the superblock from the OSD and fill in the fields
 */
static int exofs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct inode *root;
	struct exofs_mountopt *opts = data;
	struct exofs_sb_info *sbi;	/*extended info                  */
	struct osd_dev *od;		/* Master device                 */
	struct exofs_fscb fscb;		/*on-disk superblock info        */
	struct osd_obj_id obj;
	unsigned table_count;
	int ret;

	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
	if (ret)
		goto free_bdi;

	/* use mount options to fill superblock */
	od = osduld_path_lookup(opts->dev_name);
	if (IS_ERR(od)) {
		ret = PTR_ERR(od);
		goto free_sbi;
	}

	/* Default layout in case we do not have a device-table */
	sbi->layout.stripe_unit = PAGE_SIZE;
	sbi->layout.mirrors_p1 = 1;
	sbi->layout.group_width = 1;
	sbi->layout.group_depth = -1;
	sbi->layout.group_count = 1;
	sbi->layout.s_ods[0] = od;
	sbi->layout.s_numdevs = 1;
	sbi->layout.s_pid = opts->pid;
	sbi->s_timeout = opts->timeout;

	/* fill in some other data by hand */
	memset(sb->s_id, 0, sizeof(sb->s_id));
	strcpy(sb->s_id, "exofs");
	sb->s_blocksize = EXOFS_BLKSIZE;
	sb->s_blocksize_bits = EXOFS_BLKSHIFT;
	sb->s_maxbytes = MAX_LFS_FILESIZE;
	atomic_set(&sbi->s_curr_pending, 0);
	sb->s_bdev = NULL;
	sb->s_dev = 0;

	obj.partition = sbi->layout.s_pid;
	obj.id = EXOFS_SUPER_ID;
	exofs_make_credential(sbi->s_cred, &obj);

	ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb, sizeof(fscb));
	if (unlikely(ret))
		goto free_sbi;

	sb->s_magic = le16_to_cpu(fscb.s_magic);
	sbi->s_nextid = le64_to_cpu(fscb.s_nextid);
	sbi->s_numfiles = le32_to_cpu(fscb.s_numfiles);

	/* make sure what we read from the object store is correct */
	if (sb->s_magic != EXOFS_SUPER_MAGIC) {
		if (!silent)
			EXOFS_ERR("ERROR: Bad magic value\n");
		ret = -EINVAL;
		goto free_sbi;
	}
	if (le32_to_cpu(fscb.s_version) != EXOFS_FSCB_VER) {
		EXOFS_ERR("ERROR: Bad FSCB version expected-%d got-%d\n",
			  EXOFS_FSCB_VER, le32_to_cpu(fscb.s_version));
		ret = -EINVAL;
		goto free_sbi;
	}

	/* start generation numbers from a random point */
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

	table_count = le64_to_cpu(fscb.s_dev_table_count);
	if (table_count) {
		ret = exofs_read_lookup_dev_table(&sbi, table_count);
		if (unlikely(ret))
			goto free_sbi;
	}

	/* set up operation vectors */
	sb->s_bdi = &sbi->bdi;
	sb->s_fs_info = sbi;
	sb->s_op = &exofs_sops;
	sb->s_export_op = &exofs_export_ops;
	root = exofs_iget(sb, EXOFS_ROOT_ID - EXOFS_OBJ_OFF);
	if (IS_ERR(root)) {
		EXOFS_ERR("ERROR: exofs_iget failed\n");
		ret = PTR_ERR(root);
		goto free_sbi;
	}
	sb->s_root = d_alloc_root(root);
	if (!sb->s_root) {
		iput(root);
		EXOFS_ERR("ERROR: get root inode failed\n");
		ret = -ENOMEM;
		goto free_sbi;
	}

	if (!S_ISDIR(root->i_mode)) {
		dput(sb->s_root);
		sb->s_root = NULL;
		EXOFS_ERR("ERROR: corrupt root inode (mode = %hd)\n",
		       root->i_mode);
		ret = -EINVAL;
		goto free_sbi;
	}

	_exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
			    sbi->layout.s_pid);
	return 0;

free_sbi:
	bdi_destroy(&sbi->bdi);
free_bdi:
	EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
		  opts->dev_name, sbi->layout.s_pid, ret);
	exofs_free_sbi(sbi);
	return ret;
}

/*
 * Set up the superblock (calls exofs_fill_super eventually)
 */
static struct dentry *exofs_mount(struct file_system_type *type,
			  int flags, const char *dev_name,
			  void *data)
{
	struct exofs_mountopt opts;
	int ret;

	ret = parse_options(data, &opts);
	if (ret)
		return ERR_PTR(ret);

	opts.dev_name = dev_name;
	return mount_nodev(type, flags, &opts, exofs_fill_super);
}

/*
 * Return information about the file system state in the buffer.  This is used
 * by the 'df' command, for example.
 */
static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
	struct super_block *sb = dentry->d_sb;
	struct exofs_sb_info *sbi = sb->s_fs_info;
	struct exofs_io_state *ios;
	struct osd_attr attrs[] = {
		ATTR_DEF(OSD_APAGE_PARTITION_QUOTAS,
			OSD_ATTR_PQ_CAPACITY_QUOTA, sizeof(__be64)),
		ATTR_DEF(OSD_APAGE_PARTITION_INFORMATION,
			OSD_ATTR_PI_USED_CAPACITY, sizeof(__be64)),
	};
	uint64_t capacity = ULLONG_MAX;
	uint64_t used = ULLONG_MAX;
	uint8_t cred_a[OSD_CAP_LEN];
	int ret;

	ret = exofs_get_io_state(&sbi->layout, &ios);
	if (ret) {
		EXOFS_DBGMSG("exofs_get_io_state failed.\n");
		return ret;
	}

	exofs_make_credential(cred_a, &ios->obj);
	ios->cred = sbi->s_cred;
	ios->in_attr = attrs;
	ios->in_attr_len = ARRAY_SIZE(attrs);

	ret = exofs_sbi_read(ios);
	if (unlikely(ret))
		goto out;

	ret = extract_attr_from_ios(ios, &attrs[0]);
	if (likely(!ret)) {
		capacity = get_unaligned_be64(attrs[0].val_ptr);
		if (unlikely(!capacity))
			capacity = ULLONG_MAX;
	} else
		EXOFS_DBGMSG("exofs_statfs: get capacity failed.\n");

	ret = extract_attr_from_ios(ios, &attrs[1]);
	if (likely(!ret))
		used = get_unaligned_be64(attrs[1].val_ptr);
	else
		EXOFS_DBGMSG("exofs_statfs: get used-space failed.\n");

	/* fill in the stats buffer */
	buf->f_type = EXOFS_SUPER_MAGIC;
	buf->f_bsize = EXOFS_BLKSIZE;
	buf->f_blocks = capacity >> 9;
	buf->f_bfree = (capacity - used) >> 9;
	buf->f_bavail = buf->f_bfree;
	buf->f_files = sbi->s_numfiles;
	buf->f_ffree = EXOFS_MAX_ID - sbi->s_numfiles;
	buf->f_namelen = EXOFS_NAME_LEN;

out:
	exofs_put_io_state(ios);
	return ret;
}

static const struct super_operations exofs_sops = {
	.alloc_inode    = exofs_alloc_inode,
	.destroy_inode  = exofs_destroy_inode,
	.write_inode    = exofs_write_inode,
	.evict_inode    = exofs_evict_inode,
	.put_super      = exofs_put_super,
	.write_super    = exofs_write_super,
	.sync_fs	= exofs_sync_fs,
	.statfs         = exofs_statfs,
};

/******************************************************************************
 * EXPORT OPERATIONS
 *****************************************************************************/

struct dentry *exofs_get_parent(struct dentry *child)
{
	unsigned long ino = exofs_parent_ino(child);

	if (!ino)
		return NULL;

	return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
}
Exemple #20
0
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;
	void *temp_buf;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			diag_max_reg += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				diag_max_reg -= pkt_params->count -
							 count_entries;
				pr_alert("diag: Insufficient memory for reg.");
				mutex_unlock(&driver->diagchar_mutex);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, pkt_params->params,
						&success, &count_entries);
				if (pkt_params->count > count_entries) {
					pkt_params->params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					return success;
				}
			}
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (driver->logging_mode == MEMORY_DEVICE_MODE)
			driver->mask_check = 1;
		if (driver->logging_mode == UART_MODE) {
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_qdsp_1 = 1;
			driver->in_busy_qdsp_2 = 1;
			driver->in_busy_wcnss = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE)
			diagfwd_disconnect();
		else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE)
			diagfwd_connect();
		else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
			} else if (temp == MEMORY_DEVICE_MODE &&
					 driver->logging_mode == USB_MODE)
				diagfwd_connect();
#endif /* DIAG over USB */
		success = 1;
	}

	return success;
}
static void process_ssid_range_report(uint8_t *buf, uint32_t len,
				      struct diag_smd_info *smd_info)
{
	int i;
	int j;
	int read_len = 0;
	int found = 0;
	int new_size = 0;
	int err = 0;
	struct diag_ctrl_ssid_range_report *header = NULL;
	struct diag_ssid_range_t *ssid_range = NULL;
	int header_len = sizeof(struct diag_ctrl_ssid_range_report);
	struct diag_msg_mask_t *mask_ptr = NULL;
	uint8_t *ptr = buf;
	uint8_t *temp = NULL;
	uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);

	if (!buf || !smd_info || len < min_len)
		return;

	header = (struct diag_ctrl_ssid_range_report *)ptr;
	ptr += header_len;
	/* Don't account for pkt_id and length */
	read_len += header_len - (2 * sizeof(uint32_t));

	mutex_lock(&msg_mask.lock);
	driver->max_ssid_count[smd_info->peripheral] = header->count;
	for (i = 0; i < header->count && read_len < len; i++) {
		ssid_range = (struct diag_ssid_range_t *)ptr;
		ptr += sizeof(struct diag_ssid_range_t);
		read_len += sizeof(struct diag_ssid_range_t);
		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
		found = 0;
		for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
			if (mask_ptr->ssid_first != ssid_range->ssid_first)
				continue;
			err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
			if (err == -ENOMEM) {
				pr_err("diag: In %s, unable to increase the msg mask table range\n",
				       __func__);
			}
			found = 1;
			break;
		}

		if (found)
			continue;

		new_size = (driver->msg_mask_tbl_count + 1) *
			   sizeof(struct diag_msg_mask_t);
		temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
		if (!temp) {
			pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
			       __func__, ssid_range->ssid_first,
			       ssid_range->ssid_last);
			continue;
		}
		msg_mask.ptr = temp;
		err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
		if (err) {
			pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
			       __func__, ssid_range->ssid_first,
			       ssid_range->ssid_last, err);
			continue;
		}
		driver->msg_mask_tbl_count += 1;
	}
	mutex_unlock(&msg_mask.lock);
}
Exemple #22
0
static void lsm_event_handler(uint32_t opcode, uint32_t token,
			      void *payload, void *priv)
{
	unsigned long flags;
	struct lsm_priv *prtd = priv;
	struct snd_pcm_substream *substream = prtd->substream;
	uint16_t status = 0;
	uint16_t payload_size = 0;
	uint16_t index = 0;

	pr_debug("%s: Opcode 0x%x\n", __func__, opcode);
	switch (opcode) {
	case LSM_DATA_EVENT_READ_DONE: {
		int rc;
		struct lsm_cmd_read_done *read_done = payload;
		int buf_index = 0;
		if (prtd->lsm_client->session != token
		  || !read_done) {
			pr_err("%s: EVENT_READ_DONE invalid callback client session %d callback sesson %d payload %p",
			__func__, prtd->lsm_client->session, token, read_done);
			return;
		}
		if (atomic_read(&prtd->read_abort)) {
			pr_info("%s: read abort set skip data\n", __func__);
			return;
		}
		if (!lsm_lab_buffer_sanity(prtd, read_done, &buf_index)) {
			pr_debug("%s: process read done index %d\n",
				__func__, buf_index);
			if (buf_index >=
				prtd->lsm_client->hw_params.period_count) {
				pr_err("%s: Invalid index %d buf_index max cnt %d\n"
				, __func__, buf_index,
				prtd->lsm_client->hw_params.period_count);
				return;
			}
			prtd->dma_write += read_done->total_size;
			atomic_inc(&prtd->buf_count);
			snd_pcm_period_elapsed(substream);
			wake_up(&prtd->period_wait);
			/* queue the next period buffer */
			buf_index = (buf_index + 1) %
			prtd->lsm_client->hw_params.period_count;
			rc = msm_lsm_queue_lab_buffer(prtd, buf_index);
			if (rc)
				pr_err("%s: error in queuing the lab buffer rc %d\n",
					__func__, rc);
		} else
			pr_err("%s: Invalid lab buffer returned by dsp\n",
			__func__);
		break;
	}
	case LSM_SESSION_EVENT_DETECTION_STATUS:
		status = (uint16_t)((uint8_t *)payload)[0];
		payload_size = (uint16_t)((uint8_t *)payload)[2];
		index = 4;
		pr_debug("%s: event detect status = %d payload size = %d\n",
			 __func__, status , payload_size);
	break;
	case LSM_SESSION_EVENT_DETECTION_STATUS_V2:
		status = (uint16_t)((uint8_t *)payload)[0];
		payload_size = (uint16_t)((uint8_t *)payload)[1];
		index = 2;
		pr_debug("%s: event detect status = %d payload size = %d\n",
			 __func__, status , payload_size);
		break;
	default:
		pr_debug("%s: Unsupported Event opcode 0x%x\n", __func__,
			 opcode);
		break;
	}
	if (opcode == LSM_SESSION_EVENT_DETECTION_STATUS ||
		opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
		spin_lock_irqsave(&prtd->event_lock, flags);
		prtd->event_status = krealloc(prtd->event_status,
					sizeof(struct snd_lsm_event_status) +
					payload_size, GFP_ATOMIC);
		prtd->event_status->status = status;
		prtd->event_status->payload_size = payload_size;
		if (likely(prtd->event_status)) {
			memcpy(prtd->event_status->payload,
			       &((uint8_t *)payload)[index],
			       payload_size);
			prtd->event_avail = 1;
			spin_unlock_irqrestore(&prtd->event_lock, flags);
			wake_up(&prtd->event_wait);
		} else {
			spin_unlock_irqrestore(&prtd->event_lock, flags);
			pr_err("%s: Couldn't allocate %d bytes of memory\n",
			       __func__, payload_size);
		}
		if (substream->timer_running)
			snd_timer_interrupt(substream->timer, 1);
	}
}
Exemple #23
0
static int edid_load(struct drm_connector *connector, char *name,
		     char *connector_name)
{
	const struct firmware *fw;
	struct platform_device *pdev;
	u8 *fwdata = NULL, *edid, *new_edid;
	int fwsize, expected;
	int builtin = 0, err = 0;
	int i, valid_extensions = 0;
	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);

	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
	if (IS_ERR(pdev)) {
		DRM_ERROR("Failed to register EDID firmware platform device "
		    "for connector \"%s\"\n", connector_name);
		err = -EINVAL;
		goto out;
	}

	err = request_firmware(&fw, name, &pdev->dev);
	platform_device_unregister(pdev);

	if (err) {
		i = 0;
		while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
			i++;
		if (i < GENERIC_EDIDS) {
			err = 0;
			builtin = 1;
			fwdata = generic_edid[i];
			fwsize = sizeof(generic_edid[i]);
		}
	}

	if (err) {
		DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
		    name, err);
		goto out;
	}

	if (fwdata == NULL) {
		fwdata = (u8 *) fw->data;
		fwsize = fw->size;
	}

	expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
	if (expected != fwsize) {
		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
		    "(expected %d, got %d)\n", name, expected, (int) fwsize);
		err = -EINVAL;
		goto relfw_out;
	}

	edid = kmalloc(fwsize, GFP_KERNEL);
	if (edid == NULL) {
		err = -ENOMEM;
		goto relfw_out;
	}
	memcpy(edid, fwdata, fwsize);

	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
		connector->bad_edid_counter++;
		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
		    name);
		kfree(edid);
		err = -EINVAL;
		goto relfw_out;
	}

	for (i = 1; i <= edid[0x7e]; i++) {
		if (i != valid_extensions + 1)
			memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
			    edid + i * EDID_LENGTH, EDID_LENGTH);
		if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
			valid_extensions++;
	}

	if (valid_extensions != edid[0x7e]) {
		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
		    "\"%s\" for connector \"%s\"\n", valid_extensions,
		    edid[0x7e], name, connector_name);
		edid[0x7e] = valid_extensions;
		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
		    GFP_KERNEL);
		if (new_edid == NULL) {
			err = -ENOMEM;
			kfree(edid);
			goto relfw_out;
		}
		edid = new_edid;
	}

	connector->display_info.raw_edid = edid;
	DRM_INFO("Got %s EDID base block and %d extension%s from "
	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
	    name, connector_name);

relfw_out:
	release_firmware(fw);

out:
	return err;
}
int softing_load_fw(const char *file, struct softing *card,
		__iomem uint8_t *dpram, unsigned int size, int offset)
{
	const struct firmware *fw;
	int ret;
	const uint8_t *mem, *end, *dat;
	uint16_t type, len;
	uint32_t addr;
	uint8_t *buf = NULL;
	int buflen = 0;
	int8_t type_end = 0;

	ret = request_firmware(&fw, file, &card->pdev->dev);
	if (ret < 0)
		return ret;
	dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
		", offset %c0x%04x\n",
		card->pdat->name, file, (unsigned int)fw->size,
		(offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
	
	mem = fw->data;
	end = &mem[fw->size];
	
	ret = fw_parse(&mem, &type, &addr, &len, &dat);
	if (ret < 0)
		goto failed;
	if (type != 0xffff)
		goto failed;
	if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
		ret = -EINVAL;
		goto failed;
	}
	
	while (mem < end) {
		ret = fw_parse(&mem, &type, &addr, &len, &dat);
		if (ret < 0)
			goto failed;
		if (type == 3) {
			
			continue;
		} else if (type == 1) {
			
			type_end = 1;
			break;
		} else if (type != 0) {
			ret = -EINVAL;
			goto failed;
		}

		if ((addr + len + offset) > size)
			goto failed;
		memcpy_toio(&dpram[addr + offset], dat, len);
		
		mb();
		if (len > buflen) {
			
			buflen = (len + (1024-1)) & ~(1024-1);
			buf = krealloc(buf, buflen, GFP_KERNEL);
			if (!buf) {
				ret = -ENOMEM;
				goto failed;
			}
		}
		
		memcpy_fromio(buf, &dpram[addr + offset], len);
		if (memcmp(buf, dat, len)) {
			
			dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
			ret = -EIO;
			goto failed;
		}
	}
	if (!type_end)
		
		goto failed;
	ret = 0;
failed:
	kfree(buf);
	release_firmware(fw);
	if (ret < 0)
		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
	return ret;
}
long diagchar_ioctl(struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, temp, success = -1, status;
	unsigned int count_entries = 0, interim_count = 0;
	void *temp_buf;
	uint16_t support_list = 0;
	struct dci_notification_tbl *dci_params;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process pkt_params;
		struct bindpkt_params *params;
		struct bindpkt_params *head_params;
		if (copy_from_user(&pkt_params, (void *)ioarg,
			   sizeof(struct bindpkt_params_per_process))) {
			return -EFAULT;
		}
		if ((UINT32_MAX/sizeof(struct bindpkt_params)) <
							 pkt_params.count) {
			pr_warning("diag: integer overflow while multiply\n");
			return -EFAULT;
		}
		params = kzalloc(pkt_params.count*sizeof(
			struct bindpkt_params), GFP_KERNEL);
		if (!params) {
			pr_err("diag: unable to alloc memory\n");
			return -ENOMEM;
		} else
			head_params = params;

		if (copy_from_user(params, pkt_params.params,
			   pkt_params.count*sizeof(struct bindpkt_params))) {
			kfree(head_params);
			return -EFAULT;
		}
		mutex_lock(&driver->diagchar_mutex);
		for (i = 0; i < diag_max_reg; i++) {
			if (driver->table[i].process_id == 0) {
				diag_add_reg(i, params,
					     &success, &count_entries);
				if (pkt_params.count > count_entries) {
					params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return success;
				}
			}
		}
		if (i < diag_threshold_reg) {
			/* Increase table size by amount required */
			if (pkt_params.count >= count_entries) {
				interim_count = pkt_params.count -
							 count_entries;
			} else {
				pr_warning("diag: error in params count\n");
				kfree(head_params);
				mutex_unlock(&driver->diagchar_mutex);
				return -EFAULT;
			}
			if (UINT32_MAX - diag_max_reg >=
							interim_count) {
				diag_max_reg += interim_count;
			} else {
				pr_warning("diag: Integer overflow\n");
				kfree(head_params);
				mutex_unlock(&driver->diagchar_mutex);
				return -EFAULT;
			}
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_reg > diag_threshold_reg) {
				diag_max_reg = diag_threshold_reg;
				pr_info("diag: best case memory allocation\n");
			}
			if (UINT32_MAX/sizeof(struct diag_master_table) <
								 diag_max_reg) {
				pr_warning("diag: integer overflow\n");
				kfree(head_params);
				mutex_unlock(&driver->diagchar_mutex);
				return -EFAULT;
			}
			temp_buf = krealloc(driver->table,
					 diag_max_reg*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			if (!temp_buf) {
				pr_alert("diag: Insufficient memory for reg.\n");
				mutex_unlock(&driver->diagchar_mutex);

				if (pkt_params.count >= count_entries) {
					interim_count = pkt_params.count -
								 count_entries;
				} else {
					pr_warning("diag: params count error\n");
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return -EFAULT;
				}
				if (diag_max_reg >= interim_count) {
					diag_max_reg -= interim_count;
				} else {
					pr_warning("diag: Integer underflow\n");
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return -EFAULT;
				}
				kfree(head_params);
				return 0;
			} else {
				driver->table = temp_buf;
			}
			for (j = i; j < diag_max_reg; j++) {
				diag_add_reg(j, params,
						&success, &count_entries);
				if (pkt_params.count > count_entries) {
					params++;
				} else {
					mutex_unlock(&driver->diagchar_mutex);
					kfree(head_params);
					return success;
				}
			}
			kfree(head_params);
			mutex_unlock(&driver->diagchar_mutex);
		} else {
			mutex_unlock(&driver->diagchar_mutex);
			kfree(head_params);
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);
		}
		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params delay_params;
		uint16_t interim_rsp_id;
		int interim_size;
		if (copy_from_user(&delay_params, (void *)ioarg,
					   sizeof(struct diagpkt_delay_params)))
			return -EFAULT;
		if ((delay_params.rsp_ptr) &&
		 (delay_params.size == sizeof(delayed_rsp_id)) &&
				 (delay_params.num_bytes_ptr)) {
			interim_rsp_id = DIAGPKT_NEXT_DELAYED_RSP_ID(
							delayed_rsp_id);
			if (copy_to_user((void *)delay_params.rsp_ptr,
					 &interim_rsp_id, sizeof(uint16_t)))
				return -EFAULT;
			interim_size = sizeof(delayed_rsp_id);
			if (copy_to_user((void *)delay_params.num_bytes_ptr,
						 &interim_size, sizeof(int)))
				return -EFAULT;
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_DCI_REG) {
		if (driver->dci_state == DIAG_DCI_NO_REG)
			return DIAG_DCI_NO_REG;
		if (driver->num_dci_client >= MAX_DCI_CLIENT)
			return DIAG_DCI_NO_REG;
		dci_params = kzalloc(sizeof(struct dci_notification_tbl),
								 GFP_KERNEL);
		if (dci_params == NULL) {
			pr_err("diag: unable to alloc memory\n");
			return -ENOMEM;
		}
		if (copy_from_user(dci_params, (void *)ioarg,
				 sizeof(struct dci_notification_tbl)))
			return -EFAULT;
		mutex_lock(&driver->dci_mutex);
		driver->num_dci_client++;
		diag_printk(1,"diag:%s id = %d\n",__func__, driver->dci_client_id);
		driver->dci_client_id++;
		for (i = 0; i < MAX_DCI_CLIENT; i++) {
			if (driver->dci_notify_tbl[i].client == NULL) {
				driver->dci_notify_tbl[i].client = current;
				driver->dci_notify_tbl[i].list =
							 dci_params->list;
				driver->dci_notify_tbl[i].signal_type =
					 dci_params->signal_type;
				break;
			}
		}
		mutex_unlock(&driver->dci_mutex);
		kfree(dci_params);
		return driver->dci_client_id;
	} else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
		success = -1;
		/* Delete this process from DCI table */
		mutex_lock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++) {
			if (driver->dci_tbl[i].pid == current->tgid) {
				diag_printk(1,"diag:%s delete %d\n",__func__, current->tgid);
				driver->dci_tbl[i].pid = 0;
				success = i;
			}
		}
		for (i = 0; i < MAX_DCI_CLIENT; i++) {
			if (driver->dci_notify_tbl[i].client == current) {
				driver->dci_notify_tbl[i].client = NULL;
				break;
			}
		}
		/* if any registrations were deleted successfully OR a valid
		   client_id was sent in DEINIT call , then its DCI client */
		if (success >= 0 || ioarg)
			driver->num_dci_client--;
		driver->num_dci_client--;
		mutex_unlock(&driver->dci_mutex);
		for (i = 0; i < dci_max_reg; i++)
			if (driver->dci_tbl[i].pid != 0)
				diag_printk(1,"diag:%s PID = %d, UID = %d, tag = %d\n",__func__,
	driver->dci_tbl[i].pid, driver->dci_tbl[i].uid, driver->dci_tbl[i].tag);
		diag_printk(1,"diag:%s complete deleting registrations\n",__func__);
		return success;
	} else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
		if (driver->ch_dci)
			support_list = support_list | DIAG_CON_MPSS;
		*(uint16_t *)ioarg = support_list;
		return DIAG_DCI_NO_ERROR;
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		if (driver->logging_mode == MEMORY_DEVICE_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 1;
			if (driver->socket_process) {
				/*
				 * Notify the socket logging process that we
				 * are switching to MEMORY_DEVICE_MODE
				 */
				status = send_sig(SIGCONT,
					 driver->socket_process, 0);
				if (status) {
					pr_err("diag: %s, Error notifying ",
						__func__);
					pr_err("socket process, status: %d\n",
						status);
				}
			}
		}
		if (driver->logging_mode == UART_MODE) {
			diag_clear_hsic_tbl();
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		if (driver->logging_mode == SOCKET_MODE) {
			diag_clear_hsic_tbl();
			driver->socket_process = current;
			driver->mask_check = 0;
			driver->logging_mode = MEMORY_DEVICE_MODE;
		}
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy_1 = 1;
			driver->in_busy_2 = 1;
			driver->in_busy_qdsp_1 = 1;
			driver->in_busy_qdsp_2 = 1;
			driver->in_busy_wcnss_1 = 1;
			driver->in_busy_wcnss_2 = 1;
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 1;
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
			diag_clear_hsic_tbl();
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		}
#ifdef CONFIG_DIAG_OVER_USB
		else if (temp == USB_MODE && driver->logging_mode
							 == NO_LOGGING_MODE) {
			diagfwd_disconnect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_disconnect_bridge(0);
#endif
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;

			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		} else if (temp == MEMORY_DEVICE_MODE &&
				 driver->logging_mode == USB_MODE) {
			diagfwd_connect();
#ifdef CONFIG_DIAG_BRIDGE_CODE
			diag_clear_hsic_tbl();
			diagfwd_cancel_hsic();
			diagfwd_connect_bridge(0);
#endif
		}
#endif /* DIAG over USB */
		success = 1;
	}

	return success;
}
Exemple #26
0
/**
 * genl_register_mc_group - register a multicast group
 *
 * Registers the specified multicast group and notifies userspace
 * about the new group.
 *
 * Returns 0 on success or a negative error code.
 *
 * @family: The generic netlink family the group shall be registered for.
 * @grp: The group to register, must have a name.
 */
int genl_register_mc_group(struct genl_family *family,
			   struct genl_multicast_group *grp)
{
	int id;
	unsigned long *new_groups;
	int err = 0;

	BUG_ON(grp->name[0] == '\0');

	genl_lock();

	/* special-case our own group */
	if (grp == &notify_grp)
		id = GENL_ID_CTRL;
	else
		id = find_first_zero_bit(mc_groups,
					 mc_groups_longs * BITS_PER_LONG);


	if (id >= mc_groups_longs * BITS_PER_LONG) {
		size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long);

		if (mc_groups == &mc_group_start) {
			new_groups = kzalloc(nlen, GFP_KERNEL);
			if (!new_groups) {
				err = -ENOMEM;
				goto out;
			}
			mc_groups = new_groups;
			*mc_groups = mc_group_start;
		} else {
			new_groups = krealloc(mc_groups, nlen, GFP_KERNEL);
			if (!new_groups) {
				err = -ENOMEM;
				goto out;
			}
			mc_groups = new_groups;
			mc_groups[mc_groups_longs] = 0;
		}
		mc_groups_longs++;
	}

	if (family->netnsok) {
		struct net *net;

		netlink_table_grab();
		rcu_read_lock();
		for_each_net_rcu(net) {
			err = __netlink_change_ngroups(net->genl_sock,
					mc_groups_longs * BITS_PER_LONG);
			if (err) {
				/*
				 * No need to roll back, can only fail if
				 * memory allocation fails and then the
				 * number of _possible_ groups has been
				 * increased on some sockets which is ok.
				 */
				rcu_read_unlock();
				netlink_table_ungrab();
				goto out;
			}
		}
		rcu_read_unlock();
		netlink_table_ungrab();
	} else {
Exemple #27
0
/**
 * msm_bus_scale_register_client() - Register the clients with the msm bus
 * driver
 * @pdata: Platform data of the client, containing src, dest, ab, ib
 *
 * Client data contains the vectors specifying arbitrated bandwidth (ab)
 * and instantaneous bandwidth (ib) requested between a particular
 * src and dest.
 */
uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
{
	struct msm_bus_client *client = NULL;
	int i;
	int src, dest, nfab;
	nfab = msm_bus_get_num_fab();
	if (nfab < NUM_FAB) {
		MSM_BUS_ERR("Can't register client!\n"
				"Num of fabrics up: %d\n",
				nfab);
		return 0;
	}

	if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) {
		MSM_BUS_ERR("Cannot register client with null data\n");
		return 0;
	}

	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
	if (!client) {
		MSM_BUS_ERR("Error allocating client\n");
		return 0;
	}

	mutex_lock(&msm_bus_lock);
	client->pdata = pdata;
	client->curr = -1;
	for (i = 0; i < pdata->usecase->num_paths; i++) {
		int *pnode;
		struct msm_bus_fabric_device *srcfab;
		pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)),
			GFP_KERNEL);
		if (!IS_ERR(pnode))
			client->src_pnode = pnode;
		else {
			MSM_BUS_ERR("Invalid Pnode ptr!\n");
			continue;
		}

		if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) {
			MSM_BUS_ERR("Invalid Master ID %d in request!\n",
				pdata->usecase->vectors[i].src);
			goto err;
		}

		if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) {
			MSM_BUS_ERR("Invalid Slave ID %d in request!\n",
				pdata->usecase->vectors[i].dst);
			goto err;
		}

		src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src);
		dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst);
		srcfab = msm_bus_get_fabric_device(GET_FABID(src));
		srcfab->visited = true;
		pnode[i] = getpath(src, dest);
		bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag);
		if (pnode[i] < 0) {
			MSM_BUS_ERR("Cannot register client now! Try again!\n");
			goto err;
		}
	}
	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
		(uint32_t)client);
	mutex_unlock(&msm_bus_lock);
	MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client,
		pdata->usecase->num_paths);
	return (uint32_t)(client);
err:
	kfree(client->src_pnode);
	kfree(client);
	mutex_unlock(&msm_bus_lock);
	return 0;
}
static uint32_t register_client_legacy(struct msm_bus_scale_pdata *pdata)
{
	struct msm_bus_client *client = NULL;
	int i;
	int src, dest, nfab;
	struct msm_bus_fabric_device *deffab;

	deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT);
	if (!deffab) {
		MSM_BUS_ERR("Error finding default fabric\n");
		return 0;
	}

	nfab = msm_bus_get_num_fab();
	if (nfab < deffab->board_algo->board_nfab) {
		MSM_BUS_ERR("Can't register client!\n"
				"Num of fabrics up: %d\n",
				nfab);
		return 0;
	}

	if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) {
		MSM_BUS_ERR("Cannot register client with null data\n");
		return 0;
	}

	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
	if (!client) {
		MSM_BUS_ERR("Error allocating client\n");
		return 0;
	}

	mutex_lock(&msm_bus_lock);
	client->pdata = pdata;
	client->curr = -1;
	for (i = 0; i < pdata->usecase->num_paths; i++) {
		int *pnode;
		struct msm_bus_fabric_device *srcfab;
		pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)),
			GFP_KERNEL);
		if (ZERO_OR_NULL_PTR(pnode)) {
			MSM_BUS_ERR("Invalid Pnode ptr!\n");
			continue;
		} else
			client->src_pnode = pnode;

		if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) {
			MSM_BUS_ERR("Invalid Master ID %d in request!\n",
				pdata->usecase->vectors[i].src);
			goto err;
		}

		if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) {
			MSM_BUS_ERR("Invalid Slave ID %d in request!\n",
				pdata->usecase->vectors[i].dst);
			goto err;
		}

		src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src);
		if (src == -ENXIO) {
			MSM_BUS_ERR("Master %d not supported. Client cannot be"
				" registered\n",
				pdata->usecase->vectors[i].src);
			goto err;
		}
		dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst);
		if (dest == -ENXIO) {
			MSM_BUS_ERR("Slave %d not supported. Client cannot be"
				" registered\n",
				pdata->usecase->vectors[i].dst);
			goto err;
		}
		srcfab = msm_bus_get_fabric_device(GET_FABID(src));
		if (!srcfab) {
			MSM_BUS_ERR("Fabric not found\n");
			goto err;
		}

		srcfab->visited = true;
		pnode[i] = getpath(src, dest);
		bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag);
		if (pnode[i] == -ENXIO) {
			MSM_BUS_ERR("Cannot register client now! Try again!\n");
			goto err;
		}
	}
	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
		(uint32_t)client);
	mutex_unlock(&msm_bus_lock);
	MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client,
		pdata->usecase->num_paths);
	return (uint32_t)(client);
err:
	kfree(client->src_pnode);
	kfree(client);
	mutex_unlock(&msm_bus_lock);
	return 0;
}
Exemple #29
0
static int diagchar_open(struct inode *inode, struct file *file)
{
	int i = 0;
	void *temp;
	DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__,
			current->comm, current->parent->comm, current->tgid);

	if (driver) {
		mutex_lock(&driver->diagchar_mutex);

		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid == 0)
				break;

		if (i < driver->num_clients) {
			diag_add_client(i, file);
		} else {
			if (i < threshold_client_limit) {
				driver->num_clients++;
				temp = krealloc(driver->client_map
					, (driver->num_clients) * sizeof(struct
						 diag_client_map), GFP_KERNEL);
				if (!temp)
					goto fail;
				else
					driver->client_map = temp;
				temp = krealloc(driver->data_ready
					, (driver->num_clients) * sizeof(int),
							GFP_KERNEL);
				if (!temp)
					goto fail;
				else
					driver->data_ready = temp;
				diag_add_client(i, file);
			} else {
				mutex_unlock(&driver->diagchar_mutex);
				pr_alert("Max client limit for DIAG reached\n");
				pr_info("Cannot open handle %s"
					   " %d", current->comm, current->tgid);
				for (i = 0; i < driver->num_clients; i++)
					pr_debug("%d) %s PID=%d", i, driver->
						client_map[i].name,
						driver->client_map[i].pid);
				return -ENOMEM;
			}
		}
		driver->data_ready[i] |= MSG_MASKS_TYPE;
		driver->data_ready[i] |= EVENT_MASKS_TYPE;
		driver->data_ready[i] |= LOG_MASKS_TYPE;

		if (driver->ref_count == 0)
			diagmem_init(driver);
		driver->ref_count++;
		mutex_unlock(&driver->diagchar_mutex);
		return 0;
	}
	return -ENOMEM;

fail:
	mutex_unlock(&driver->diagchar_mutex);
	driver->num_clients--;
	pr_alert("diag: Insufficient memory for new client");
	return -ENOMEM;
}
Exemple #30
0
inline void *realloc(void* str, size_t size) {return krealloc((const void*)str, size, GFP_KERNEL);}