Example #1
10
static void __init cpg_mstp_clocks_init(struct device_node *np)
{
	struct mstp_clock_group *group;
	const char *idxname;
	struct clk **clks;
	unsigned int i;

	group = kzalloc(sizeof(*group), GFP_KERNEL);
	clks = kmalloc(MSTP_MAX_CLOCKS * sizeof(*clks), GFP_KERNEL);
	if (group == NULL || clks == NULL) {
		kfree(group);
		kfree(clks);
		pr_err("%s: failed to allocate group\n", __func__);
		return;
	}

	spin_lock_init(&group->lock);
	group->data.clks = clks;

	group->smstpcr = of_iomap(np, 0);
	group->mstpsr = of_iomap(np, 1);

	if (group->smstpcr == NULL) {
		pr_err("%s: failed to remap SMSTPCR\n", __func__);
		kfree(group);
		kfree(clks);
		return;
	}

	for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
		clks[i] = ERR_PTR(-ENOENT);

	if (of_find_property(np, "clock-indices", &i))
		idxname = "clock-indices";
	else
		idxname = "renesas,clock-indices";

	for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
		const char *parent_name;
		const char *name;
		u32 clkidx;
		int ret;

		/* Skip clocks with no name. */
		ret = of_property_read_string_index(np, "clock-output-names",
						    i, &name);
		if (ret < 0 || strlen(name) == 0)
			continue;

		parent_name = of_clk_get_parent_name(np, i);
		ret = of_property_read_u32_index(np, idxname, i, &clkidx);
		if (parent_name == NULL || ret < 0)
			break;

		if (clkidx >= MSTP_MAX_CLOCKS) {
			pr_err("%s: invalid clock %s %s index %u)\n",
			       __func__, np->name, name, clkidx);
			continue;
		}

		clks[clkidx] = cpg_mstp_clock_register(name, parent_name,
						       clkidx, group);
		if (!IS_ERR(clks[clkidx])) {
			group->data.clk_num = max(group->data.clk_num,
						  clkidx + 1);
			/*
			 * Register a clkdev to let board code retrieve the
			 * clock by name and register aliases for non-DT
			 * devices.
			 *
			 * FIXME: Remove this when all devices that require a
			 * clock will be instantiated from DT.
			 */
			clk_register_clkdev(clks[clkidx], name, NULL);
		} else {
			pr_err("%s: failed to register %s %s clock (%ld)\n",
			       __func__, np->name, name, PTR_ERR(clks[clkidx]));
		}
	}

	of_clk_add_provider(np, of_clk_src_onecell_get, &group->data);
}
Example #2
0
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
	struct aac_fib_context * fibctx;
	int status;

	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
	if (fibctx == NULL) {
		status = -ENOMEM;
	} else {
		unsigned long flags;
		struct list_head * entry;
		struct aac_fib_context * context;

		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
		fibctx->size = sizeof(struct aac_fib_context);
		/*
		 *	Yes yes, I know this could be an index, but we have a
		 * better guarantee of uniqueness for the locked loop below.
		 * Without the aid of a persistent history, this also helps
		 * reduce the chance that the opaque context would be reused.
		 */
		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
		/*
		 *	Initialize the mutex used to wait for the next AIF.
		 */
		init_MUTEX_LOCKED(&fibctx->wait_sem);
		fibctx->wait = 0;
		/*
		 *	Initialize the fibs and set the count of fibs on
		 *	the list to 0.
		 */
		fibctx->count = 0;
		INIT_LIST_HEAD(&fibctx->fib_list);
		fibctx->jiffies = jiffies/HZ;
		/*
		 *	Now add this context onto the adapter's
		 *	AdapterFibContext list.
		 */
		spin_lock_irqsave(&dev->fib_lock, flags);
		/* Ensure that we have a unique identifier */
		entry = dev->fib_list.next;
		while (entry != &dev->fib_list) {
			context = list_entry(entry, struct aac_fib_context, next);
			if (context->unique == fibctx->unique) {
				/* Not unique (32 bits) */
				fibctx->unique++;
				entry = dev->fib_list.next;
			} else {
				entry = entry->next;
			}
		}
		list_add_tail(&fibctx->next, &dev->fib_list);
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(arg, &fibctx->unique,
						sizeof(fibctx->unique))) {
			status = -EFAULT;
		} else {
			status = 0;
		}
	}
	return status;
}
Example #3
0
File: airo_cs.c Project: nhanh0/hah
static dev_link_t *airo_attach(void)
{
	client_reg_t client_reg;
	dev_link_t *link;
	local_info_t *local;
	int ret, i;
	
	DEBUG(0, "airo_attach()\n");
	flush_stale_links();
	
	/* Initialize the dev_link_t structure */
	link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
	if (!link) {
		printk(KERN_ERR "airo_cs: no memory for new device\n");
		return NULL;
	}
	memset(link, 0, sizeof(struct dev_link_t));
	link->release.function = &airo_release;
	link->release.data = (u_long)link;
	
	/* Interrupt setup */
	link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
	link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
	if (irq_list[0] == -1)
		link->irq.IRQInfo2 = irq_mask;
	else
		for (i = 0; i < 4; i++)
			link->irq.IRQInfo2 |= 1 << irq_list[i];
	link->irq.Handler = NULL;
	
	/*
	  General socket configuration defaults can go here.  In this
	  client, we assume very little, and rely on the CIS for almost
	  everything.  In most clients, many details (i.e., number, sizes,
	  and attributes of IO windows) are fixed by the nature of the
	  device, and can be hard-wired here.
	*/
	link->conf.Attributes = 0;
	link->conf.Vcc = 50;
	link->conf.IntType = INT_MEMORY_AND_IO;
	
	/* Allocate space for private device-specific data */
	local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
	memset(local, 0, sizeof(local_info_t));
	link->priv = local;
	
	/* Register with Card Services */
	link->next = dev_list;
	dev_list = link;
	client_reg.dev_info = &dev_info;
	client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
	client_reg.EventMask =
		CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
		CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
		CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
	client_reg.event_handler = &airo_event;
	client_reg.Version = 0x0210;
	client_reg.event_callback_args.client_data = link;
	ret = CardServices(RegisterClient, &link->handle, &client_reg);
	if (ret != 0) {
		cs_error(link->handle, RegisterClient, ret);
		airo_detach(link);
		return NULL;
	}
	
	return link;
} /* airo_attach */
Example #4
0
/* format_corename will inspect the pattern parameter, and output a
 * name into corename, which must have space for at least
 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
 */
static int format_corename(struct core_name *cn, struct coredump_params *cprm)
{
	const struct cred *cred = current_cred();
	const char *pat_ptr = core_pattern;
	int ispipe = (*pat_ptr == '|');
	int pid_in_pattern = 0;
	int err = 0;

	cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
	cn->corename = kmalloc(cn->size, GFP_KERNEL);
	cn->used = 0;

	if (!cn->corename)
		return -ENOMEM;

	/* Repeat as long as we have more pattern to process and more output
	   space */
	while (*pat_ptr) {
		if (*pat_ptr != '%') {
			if (*pat_ptr == 0)
				goto out;
			err = cn_printf(cn, "%c", *pat_ptr++);
		} else {
			switch (*++pat_ptr) {
			/* single % at the end, drop that */
			case 0:
				goto out;
			/* Double percent, output one percent */
			case '%':
				err = cn_printf(cn, "%c", '%');
				break;
			/* pid */
			case 'p':
				pid_in_pattern = 1;
				err = cn_printf(cn, "%d",
					      task_tgid_vnr(current));
				break;
			/* uid */
			case 'u':
				err = cn_printf(cn, "%d", cred->uid);
				break;
			/* gid */
			case 'g':
				err = cn_printf(cn, "%d", cred->gid);
				break;
			case 'd':
				err = cn_printf(cn, "%d",
					__get_dumpable(cprm->mm_flags));
				break;
			/* signal that caused the coredump */
			case 's':
				err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
				break;
			/* UNIX time of coredump */
			case 't': {
				struct timeval tv;
				do_gettimeofday(&tv);
				err = cn_printf(cn, "%lu", tv.tv_sec);
				break;
			}
			/* hostname */
			case 'h': {
				char *namestart = cn->corename + cn->used;
				down_read(&uts_sem);
				err = cn_printf(cn, "%s",
					      utsname()->nodename);
				up_read(&uts_sem);
				cn_escape(namestart);
				break;
			}
			/* executable */
			case 'e': {
				char *commstart = cn->corename + cn->used;
				err = cn_printf(cn, "%s", current->comm);
				cn_escape(commstart);
				break;
			}
			case 'E':
				err = cn_print_exe_file(cn);
				break;
			/* core limit size */
			case 'c':
				err = cn_printf(cn, "%lu",
					      rlimit(RLIMIT_CORE));
				break;
			default:
				break;
			}
			++pat_ptr;
		}

		if (err)
			return err;
	}

	/* Backward compatibility with core_uses_pid:
	 *
	 * If core_pattern does not include a %p (as is the default)
	 * and core_uses_pid is set, then .%pid will be appended to
	 * the filename. Do not do this for piped commands. */
	if (!ispipe && !pid_in_pattern && core_uses_pid) {
		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
		if (err)
			return err;
	}
out:
	return ispipe;
}
Example #5
0
int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
{
	int ret = 0;
	char *command = NULL;
	int bytes_written = 0;
	android_wifi_priv_cmd priv_cmd;

	net_os_wake_lock(net);

	if (!ifr->ifr_data) {
		ret = -EINVAL;
		goto exit;
	}
	if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
		ret = -EFAULT;
		goto exit;
	}
	command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
	if (!command)
	{
		DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
		ret = -ENOMEM;
		goto exit;
	}
	if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
		ret = -EFAULT;
		goto exit;
	}

	DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));

	if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
		DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
		bytes_written = wl_android_wifi_on(net);
	}
	else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
		bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
	}

	if (!g_wifi_on) {
		DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
			__FUNCTION__, command, ifr->ifr_name));
		ret = 0;
		goto exit;
	}

	if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
		bytes_written = wl_android_wifi_off(net);
	}
	else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
		/* TBD: SCAN-ACTIVE */
	}
	else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
		/* TBD: SCAN-PASSIVE */
	}
	else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
		bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
		bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
		bytes_written = net_os_set_packet_filter(net, 1);
	}
	else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
		bytes_written = net_os_set_packet_filter(net, 0);
	}
	else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
		int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
		bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
	}
	else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
		int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
		bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
	}
	else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
		/* TBD: BTCOEXSCAN-START */
	}
	else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
		/* TBD: BTCOEXSCAN-STOP */
	}
	else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
		uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';

		if (mode == 1)
			net_os_set_packet_filter(net, 0); /* DHCP starts */
		else
			net_os_set_packet_filter(net, 1); /* DHCP ends */
#ifdef WL_CFG80211
		bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command);
#endif
	}
	else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
		bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
		uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
		bytes_written = wldev_set_band(net, band);
	}
	else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
		bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
		char *country_code = command + strlen(CMD_COUNTRY) + 1;
		bytes_written = wldev_set_country(net, country_code);
	}
#ifdef PNO_SUPPORT
	else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
		bytes_written = dhd_dev_pno_reset(net);
	}
	else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
		bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
		uint pfn_enabled = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
		bytes_written = dhd_dev_pno_enable(net, pfn_enabled);
	}
#endif
	else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
		bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
		int skip = strlen(CMD_P2P_SET_NOA) + 1;
		bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
			priv_cmd.total_len - skip);
	}
	else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
		bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
	}
	else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
		int skip = strlen(CMD_P2P_SET_PS) + 1;
		bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
			priv_cmd.total_len - skip);
	}
#ifdef WL_CFG80211
	else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
		strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
		int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
		bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
			priv_cmd.total_len - skip, *(command + skip - 2) - '0');
	}
#endif /* WL_CFG80211 */
	else {
		DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
		snprintf(command, 3, "OK");
		bytes_written = strlen("OK");
	}

	if (bytes_written > 0) {
		if (bytes_written > priv_cmd.total_len) {
			DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
			bytes_written = priv_cmd.total_len;
		} else {
			bytes_written++;
		}
		priv_cmd.used_len = bytes_written;
		if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
			DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
			ret = -EFAULT;
		}
	} else {
		ret = bytes_written;
	}

exit:
	net_os_wake_unlock(net);
	if (command) {
		kfree(command);
	}

	return ret;
}
Example #6
0
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
			      const struct firmware *fw)
{
	struct intel_css_header *css_header;
	struct intel_package_header *package_header;
	struct intel_dmc_header *dmc_header;
	struct intel_csr *csr = &dev_priv->csr;
	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
	uint32_t max_fw_size = 0;
	uint32_t i;
	uint32_t *dmc_payload;
	uint32_t required_version;

	if (!fw)
		return NULL;

	/* Extract CSS Header information*/
	css_header = (struct intel_css_header *)fw->data;
	if (sizeof(struct intel_css_header) !=
	    (css_header->header_len * 4)) {
		DRM_ERROR("DMC firmware has wrong CSS header length "
			  "(%u bytes)\n",
			  (css_header->header_len * 4));
		return NULL;
	}

	csr->version = css_header->version;

	if (csr->fw_path == i915_modparams.dmc_firmware_path) {
		/* Bypass version check for firmware override. */
		required_version = csr->version;
	} else if (IS_ICELAKE(dev_priv)) {
		required_version = ICL_CSR_VERSION_REQUIRED;
	} else if (IS_CANNONLAKE(dev_priv)) {
		required_version = CNL_CSR_VERSION_REQUIRED;
	} else if (IS_GEMINILAKE(dev_priv)) {
		required_version = GLK_CSR_VERSION_REQUIRED;
	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
		required_version = KBL_CSR_VERSION_REQUIRED;
	} else if (IS_SKYLAKE(dev_priv)) {
		required_version = SKL_CSR_VERSION_REQUIRED;
	} else if (IS_BROXTON(dev_priv)) {
		required_version = BXT_CSR_VERSION_REQUIRED;
	} else {
		MISSING_CASE(INTEL_REVID(dev_priv));
		required_version = 0;
	}

	if (csr->version != required_version) {
		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
			 " please use v%u.%u\n",
			 CSR_VERSION_MAJOR(csr->version),
			 CSR_VERSION_MINOR(csr->version),
			 CSR_VERSION_MAJOR(required_version),
			 CSR_VERSION_MINOR(required_version));
		return NULL;
	}

	readcount += sizeof(struct intel_css_header);

	/* Extract Package Header information*/
	package_header = (struct intel_package_header *)
		&fw->data[readcount];
	if (sizeof(struct intel_package_header) !=
	    (package_header->header_len * 4)) {
		DRM_ERROR("DMC firmware has wrong package header length "
			  "(%u bytes)\n",
			  (package_header->header_len * 4));
		return NULL;
	}
	readcount += sizeof(struct intel_package_header);

	/* Search for dmc_offset to find firware binary. */
	for (i = 0; i < package_header->num_entries; i++) {
		if (package_header->fw_info[i].substepping == '*' &&
		    si->stepping == package_header->fw_info[i].stepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (si->stepping == package_header->fw_info[i].stepping &&
			   si->substepping == package_header->fw_info[i].substepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (package_header->fw_info[i].stepping == '*' &&
			   package_header->fw_info[i].substepping == '*')
			dmc_offset = package_header->fw_info[i].offset;
	}
	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
		DRM_ERROR("DMC firmware not supported for %c stepping\n",
			  si->stepping);
		return NULL;
	}
	/* Convert dmc_offset into number of bytes. By default it is in dwords*/
	dmc_offset *= 4;
	readcount += dmc_offset;

	/* Extract dmc_header information. */
	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
		DRM_ERROR("DMC firmware has wrong dmc header length "
			  "(%u bytes)\n",
			  (dmc_header->header_len));
		return NULL;
	}
	readcount += sizeof(struct intel_dmc_header);

	/* Cache the dmc header info. */
	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
		DRM_ERROR("DMC firmware has wrong mmio count %u\n",
			  dmc_header->mmio_count);
		return NULL;
	}
	csr->mmio_count = dmc_header->mmio_count;
	for (i = 0; i < dmc_header->mmio_count; i++) {
		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
		    dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
			DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
				  dmc_header->mmioaddr[i]);
			return NULL;
		}
		csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
		csr->mmiodata[i] = dmc_header->mmiodata[i];
	}

	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
	nbytes = dmc_header->fw_size * 4;
	if (INTEL_GEN(dev_priv) >= 11)
		max_fw_size = ICL_CSR_MAX_FW_SIZE;
	else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
		max_fw_size = GLK_CSR_MAX_FW_SIZE;
	else if (IS_GEN9(dev_priv))
		max_fw_size = BXT_CSR_MAX_FW_SIZE;
	else
		MISSING_CASE(INTEL_REVID(dev_priv));
	if (nbytes > max_fw_size) {
		DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
		return NULL;
	}
	csr->dmc_fw_size = dmc_header->fw_size;

	dmc_payload = kmalloc(nbytes, GFP_KERNEL);
	if (!dmc_payload) {
		DRM_ERROR("Memory allocation failed for dmc payload\n");
		return NULL;
	}

	return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
Example #7
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
    struct buffer_head * bh;
    struct ext2_sb_info * sbi;
    struct ext2_super_block * es;
    struct inode *root;
    unsigned long block;
    unsigned long sb_block = get_sb_block(&data);
    unsigned long logic_sb_block;
    unsigned long offset = 0;
    unsigned long def_mount_opts;
    int blocksize = BLOCK_SIZE;
    int db_count;
    int i, j;
    __le32 features;

    sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
    if (!sbi)
        return -ENOMEM;
    sb->s_fs_info = sbi;

    /*
     * See what the current blocksize for the device is, and
     * use that as the blocksize.  Otherwise (or if the blocksize
     * is smaller than the default) use the default.
     * This is important for devices that have a hardware
     * sectorsize that is larger than the default.
     */
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    if (!blocksize) {
        printk ("EXT2-fs: unable to set blocksize\n");
        goto failed_sbi;
    }

    /*
     * If the superblock doesn't start on a hardware sector boundary,
     * calculate the offset.
     */
    if (blocksize != BLOCK_SIZE) {
        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
    } else {
        logic_sb_block = sb_block;
    }

    if (!(bh = sb_bread(sb, logic_sb_block))) {
        printk ("EXT2-fs: unable to read superblock\n");
        goto failed_sbi;
    }
    /*
     * Note: s_es must be initialized as soon as possible because
     *       some ext2 macro-instructions depend on its value
     */
    es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
    sbi->s_es = es;
    sb->s_magic = le16_to_cpu(es->s_magic);

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    /* Set defaults before we parse the mount options */
    def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
    if (def_mount_opts & EXT2_DEFM_DEBUG)
        set_opt(sbi->s_mount_opt, DEBUG);
    if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
        set_opt(sbi->s_mount_opt, GRPID);
    if (def_mount_opts & EXT2_DEFM_UID16)
        set_opt(sbi->s_mount_opt, NO_UID32);
    if (def_mount_opts & EXT2_DEFM_XATTR_USER)
        set_opt(sbi->s_mount_opt, XATTR_USER);
    if (def_mount_opts & EXT2_DEFM_ACL)
        set_opt(sbi->s_mount_opt, POSIX_ACL);

    if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
        set_opt(sbi->s_mount_opt, ERRORS_PANIC);
    else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
        set_opt(sbi->s_mount_opt, ERRORS_RO);
    else
        set_opt(sbi->s_mount_opt, ERRORS_CONT);

    sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
    sbi->s_resgid = le16_to_cpu(es->s_def_resgid);

    if (!parse_options ((char *) data, sbi))
        goto failed_mount;

    sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                  ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
                   MS_POSIXACL : 0);

    ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
            (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
        printk("EXT2-fs warning: feature flags set on rev 0 fs, "
               "running e2fsck is recommended\n");
    /*
     * Check feature flags regardless of the revision level, since we
     * previously didn't change the revision level when setting the flags,
     * so there is a chance incompat flags are set on a rev 0 filesystem.
     */
    features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
    if (features) {
        printk("EXT2-fs: %s: couldn't mount because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }
    if (!(sb->s_flags & MS_RDONLY) &&
            (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))) {
        printk("EXT2-fs: %s: couldn't mount RDWR because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }

    blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

    if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) ||
                               (sb->s_blocksize != blocksize))) {
        if (!silent)
            printk("XIP: Unsupported blocksize\n");
        goto failed_mount;
    }

    /* If the blocksize doesn't match, re-read the thing.. */
    if (sb->s_blocksize != blocksize) {
        brelse(bh);

        if (!sb_set_blocksize(sb, blocksize)) {
            printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
            goto failed_sbi;
        }

        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        if(!bh) {
            printk("EXT2-fs: Couldn't read superblock on "
                   "2nd try.\n");
            goto failed_sbi;
        }
        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
        sbi->s_es = es;
        if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
            printk ("EXT2-fs: Magic mismatch, very weird !\n");
            goto failed_mount;
        }
    }

    sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
        sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
        sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
    } else {
        sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
        sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
        if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
                (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
                (sbi->s_inode_size > blocksize)) {
            printk ("EXT2-fs: unsupported inode size: %d\n",
                    sbi->s_inode_size);
            goto failed_mount;
        }
    }

    sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
                       le32_to_cpu(es->s_log_frag_size);
    if (sbi->s_frag_size == 0)
        goto cantfind_ext2;
    sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

    sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
    sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
    sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

    if (EXT2_INODE_SIZE(sb) == 0)
        goto cantfind_ext2;
    sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
    if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
        goto cantfind_ext2;
    sbi->s_itb_per_group = sbi->s_inodes_per_group /
                           sbi->s_inodes_per_block;
    sbi->s_desc_per_block = sb->s_blocksize /
                            sizeof (struct ext2_group_desc);
    sbi->s_sbh = bh;
    sbi->s_mount_state = le16_to_cpu(es->s_state);
    sbi->s_addr_per_block_bits =
        ilog2 (EXT2_ADDR_PER_BLOCK(sb));
    sbi->s_desc_per_block_bits =
        ilog2 (EXT2_DESC_PER_BLOCK(sb));

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    if (sb->s_blocksize != bh->b_size) {
        if (!silent)
            printk ("VFS: Unsupported blocksize on dev "
                    "%s.\n", sb->s_id);
        goto failed_mount;
    }

    if (sb->s_blocksize != sbi->s_frag_size) {
        printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
                sbi->s_frag_size, sb->s_blocksize);
        goto failed_mount;
    }

    if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #blocks per group too big: %lu\n",
                sbi->s_blocks_per_group);
        goto failed_mount;
    }
    if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #fragments per group too big: %lu\n",
                sbi->s_frags_per_group);
        goto failed_mount;
    }
    if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #inodes per group too big: %lu\n",
                sbi->s_inodes_per_group);
        goto failed_mount;
    }

    if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
        goto cantfind_ext2;
    sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                            le32_to_cpu(es->s_first_data_block) - 1)
                           / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
    db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
               EXT2_DESC_PER_BLOCK(sb);
    sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
    if (sbi->s_group_desc == NULL) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount;
    }
    bgl_lock_init(&sbi->s_blockgroup_lock);
    sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
                           GFP_KERNEL);
    if (!sbi->s_debts) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount_group_desc;
    }
    memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
    for (i = 0; i < db_count; i++) {
        block = descriptor_loc(sb, logic_sb_block, i);
        sbi->s_group_desc[i] = sb_bread(sb, block);
        if (!sbi->s_group_desc[i]) {
            for (j = 0; j < i; j++)
                brelse (sbi->s_group_desc[j]);
            printk ("EXT2-fs: unable to read group descriptors\n");
            goto failed_mount_group_desc;
        }
    }
    if (!ext2_check_descriptors (sb)) {
        printk ("EXT2-fs: group descriptors corrupted!\n");
        goto failed_mount2;
    }
    sbi->s_gdb_count = db_count;
    get_random_bytes(&sbi->s_next_generation, sizeof(u32));
    spin_lock_init(&sbi->s_next_gen_lock);

    percpu_counter_init(&sbi->s_freeblocks_counter,
                        ext2_count_free_blocks(sb));
    percpu_counter_init(&sbi->s_freeinodes_counter,
                        ext2_count_free_inodes(sb));
    percpu_counter_init(&sbi->s_dirs_counter,
                        ext2_count_dirs(sb));
    /*
     * set up enough so that it can read an inode
     */
    sb->s_op = &ext2_sops;
    sb->s_export_op = &ext2_export_ops;
    sb->s_xattr = ext2_xattr_handlers;
    root = iget(sb, EXT2_ROOT_INO);
    sb->s_root = d_alloc_root(root);
    if (!sb->s_root) {
        iput(root);
        printk(KERN_ERR "EXT2-fs: get root inode failed\n");
        goto failed_mount3;
    }
    if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
        dput(sb->s_root);
        sb->s_root = NULL;
        printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
        goto failed_mount3;
    }
    if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
        ext2_warning(sb, __FUNCTION__,
                     "mounting ext3 filesystem as ext2");
    ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
    return 0;

cantfind_ext2:
    if (!silent)
        printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
               sb->s_id);
    goto failed_mount;
failed_mount3:
    percpu_counter_destroy(&sbi->s_freeblocks_counter);
    percpu_counter_destroy(&sbi->s_freeinodes_counter);
    percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
    for (i = 0; i < db_count; i++)
        brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
    kfree(sbi->s_group_desc);
    kfree(sbi->s_debts);
failed_mount:
    brelse(bh);
failed_sbi:
    sb->s_fs_info = NULL;
    kfree(sbi);
    return -EINVAL;
}
/*
 * build a stack in kernel
 * set up the contents in terms of user address
 * this is to be copied into user stack by copyout()
 */
void *setup_args_mem(struct runprogram_info *prog_info, vaddr_t *usr_stack, size_t *len) {
	unsigned int i;
	unsigned long argc = prog_info->argc;
	char **argv = prog_info->argv;
	/*
	 * .
	 * .
	 * .
	 * s
	 * g
	 * r
	 * a
	 * l
	 * a
	 * e
	 * r
	 * ---
	 * ---
	 * ---
	 * *(argv+argc-1) -> translate to user stack
	 * ........
	 * ---
	 * ---
	 * ---
	 * *(argv+1) -> translate to user stack
	 * ---
	 * ---
	 * ---
	 * *(argv+0) -> translate to user stack
	 * ---
	 * ---
	 * ---
	 * argc <-- ks_start
	 */
	// start addr of the user stack copy in kernel
	*len = stack_size(prog_info);
	int *ks_start = (int *)kmalloc(*len);
	
	unsigned long arg_offset[argc];
	/*
	 * offset in terms of num of words
	 */
	// arg[0] is always there, it stores the program name
	arg_offset[0] = (unsigned int)argc + 1;
	// offset of argv[i]: offset of argv[i-1] + len of argv[i-1]
	for (i = 1; i < argc; i++) {
		int len_arg = (strlen(*(argv+i-1))+1)/4;
		if ((strlen(*(argv+i-1))+1)%4 != 0) {
			len_arg ++ ;
		}
		arg_offset[i] = arg_offset[i-1] + len_arg;
	}

	//int *ks = ks_start + 1;
	
	//*ks_start = argc;
	
	/*
	 * setup the pointer address to point to the addr on the user stack
	 */
	// the stack size in terms of words
	int s_size_w = (*len)/4;
	for (i = 0; i < argc; i++) {
		*(ks_start + i) = (arg_offset[i] - s_size_w) + (int *)(*usr_stack);
	}
	
	*(ks_start + argc) = NULL;
	
	/*
	 * set up the actual contents in argvs
	 */
	for (i = 0; i < argc; i++) {
		char *argi = (char *)(arg_offset[i] + ks_start);
		char **x = (char **)(argv+i);
		int arglen = (strlen(*(argv+i))+1)/4;
		if ((strlen(*(argv+i))+1)%4 != 0) {
			arglen ++ ;
		}
		memmove((void *)argi, (void *)*x, 4*arglen);
	}
	/*
	 * ks_start will passed to copyout
	 * usr_stack will be passed to md_usermode
	 * basically they all point to the beginning of the stack
	 * ks_stack is in kernel, usr_stack is in user as
	 */
	// *usr_stack is int, so offset it with respect to num of bytes (*len)
	*usr_stack = *usr_stack - *len;
	return (void *)ks_start;
}
static int btusb_send_frame(struct sk_buff *skb)
{
	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
	struct btusb_data *data = hci_get_drvdata(hdev);
	struct usb_ctrlrequest *dr;
	struct urb *urb;
	unsigned int pipe;
	int err;

	BT_DBG("%s", hdev->name);

	if (!test_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	switch (bt_cb(skb)->pkt_type) {
	case HCI_COMMAND_PKT:
		urb = usb_alloc_urb(0, GFP_ATOMIC);
		if (!urb)
			return -ENOMEM;

		dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
		if (!dr) {
			usb_free_urb(urb);
			return -ENOMEM;
		}

		dr->bRequestType = data->cmdreq_type;
		dr->bRequest     = 0;
		dr->wIndex       = 0;
		dr->wValue       = 0;
		dr->wLength      = __cpu_to_le16(skb->len);

		pipe = usb_sndctrlpipe(data->udev, 0x00);

		usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
				skb->data, skb->len, btusb_tx_complete, skb);

		hdev->stat.cmd_tx++;
		break;

	case HCI_ACLDATA_PKT:
		if (!data->bulk_tx_ep)
			return -ENODEV;

		urb = usb_alloc_urb(0, GFP_ATOMIC);
		if (!urb)
			return -ENOMEM;

		pipe = usb_sndbulkpipe(data->udev,
					data->bulk_tx_ep->bEndpointAddress);

		usb_fill_bulk_urb(urb, data->udev, pipe,
				skb->data, skb->len, btusb_tx_complete, skb);

		hdev->stat.acl_tx++;
		break;

	case HCI_SCODATA_PKT:
		if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
			return -ENODEV;

		urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
		if (!urb)
			return -ENOMEM;

		pipe = usb_sndisocpipe(data->udev,
					data->isoc_tx_ep->bEndpointAddress);

		usb_fill_int_urb(urb, data->udev, pipe,
				skb->data, skb->len, btusb_isoc_tx_complete,
				skb, data->isoc_tx_ep->bInterval);

		urb->transfer_flags  = URB_ISO_ASAP;

		__fill_isoc_descriptor(urb, skb->len,
				le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));

		hdev->stat.sco_tx++;
		goto skip_waking;

	default:
		return -EILSEQ;
	}

	err = inc_tx(data);
	if (err) {
		usb_anchor_urb(urb, &data->deferred);
		schedule_work(&data->waker);
		err = 0;
		goto done;
	}

skip_waking:
	usb_anchor_urb(urb, &data->tx_anchor);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (err < 0) {
		if (err != -EPERM && err != -ENODEV)
			BT_ERR("%s urb %p submission failed (%d)",
						hdev->name, urb, -err);
		kfree(urb->setup_packet);
		usb_unanchor_urb(urb);
	} else {
		usb_mark_last_busy(data->udev);
	}

done:
	usb_free_urb(urb);
	return err;
}
/**
 * map_extent_mft_record - load an extent inode and attach it to its base
 * @base_ni:	base ntfs inode
 * @mref:	mft reference of the extent inode to load (in little endian)
 * @ntfs_ino:	on successful return, pointer to the ntfs_inode structure
 *
 * Load the extent mft record @mref and attach it to its base inode @base_ni.
 * Return the mapped extent mft record if IS_ERR(result) is false. Otherwise
 * PTR_ERR(result) gives the negative error code.
 *
 * On successful return, @ntfs_ino contains a pointer to the ntfs_inode
 * structure of the mapped extent inode.
 */
MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
		ntfs_inode **ntfs_ino)
{
	MFT_RECORD *m;
	ntfs_inode *ni = NULL;
	ntfs_inode **extent_nis = NULL;
	int i;
	unsigned long mft_no = MREF_LE(mref);
	u16 seq_no = MSEQNO_LE(mref);
	BOOL destroy_ni = FALSE;

	ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",
			mft_no, base_ni->mft_no);
	/* Make sure the base ntfs inode doesn't go away. */
	atomic_inc(&base_ni->count);
	/*
	 * Check if this extent inode has already been added to the base inode,
	 * in which case just return it. If not found, add it to the base
	 * inode before returning it.
	 */
	down(&base_ni->extent_lock);
	if (base_ni->nr_extents > 0) {
		extent_nis = base_ni->ext.extent_ntfs_inos;
		for (i = 0; i < base_ni->nr_extents; i++) {
			if (mft_no != extent_nis[i]->mft_no)
				continue;
			ni = extent_nis[i];
			/* Make sure the ntfs inode doesn't go away. */
			atomic_inc(&ni->count);
			break;
		}
	}
	if (likely(ni != NULL)) {
		up(&base_ni->extent_lock);
		atomic_dec(&base_ni->count);
		/* We found the record; just have to map and return it. */
		m = map_mft_record(ni);
		/* map_mft_record() has incremented this on success. */
		atomic_dec(&ni->count);
		if (likely(!IS_ERR(m))) {
			/* Verify the sequence number. */
			if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
				ntfs_debug("Done 1.");
				*ntfs_ino = ni;
				return m;
			}
			unmap_mft_record(ni);
			ntfs_error(base_ni->vol->sb, "Found stale extent mft "
					"reference! Corrupt file system. "
					"Run chkdsk.");
			return ERR_PTR(-EIO);
		}
map_err_out:
		ntfs_error(base_ni->vol->sb, "Failed to map extent "
				"mft record, error code %ld.", -PTR_ERR(m));
		return m;
	}
	/* Record wasn't there. Get a new ntfs inode and initialize it. */
	ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
	if (unlikely(!ni)) {
		up(&base_ni->extent_lock);
		atomic_dec(&base_ni->count);
		return ERR_PTR(-ENOMEM);
	}
	ni->vol = base_ni->vol;
	ni->seq_no = seq_no;
	ni->nr_extents = -1;
	ni->ext.base_ntfs_ino = base_ni;
	/* Now map the record. */
	m = map_mft_record(ni);
	if (unlikely(IS_ERR(m))) {
		up(&base_ni->extent_lock);
		atomic_dec(&base_ni->count);
		ntfs_clear_extent_inode(ni);
		goto map_err_out;
	}
	/* Verify the sequence number. */
	if (unlikely(le16_to_cpu(m->sequence_number) != seq_no)) {
		ntfs_error(base_ni->vol->sb, "Found stale extent mft "
				"reference! Corrupt file system. Run chkdsk.");
		destroy_ni = TRUE;
		m = ERR_PTR(-EIO);
		goto unm_err_out;
	}
	/* Attach extent inode to base inode, reallocating memory if needed. */
	if (!(base_ni->nr_extents & 3)) {
		ntfs_inode **tmp;
		int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);

		tmp = (ntfs_inode **)kmalloc(new_size, GFP_NOFS);
		if (unlikely(!tmp)) {
			ntfs_error(base_ni->vol->sb, "Failed to allocate "
					"internal buffer.");
			destroy_ni = TRUE;
			m = ERR_PTR(-ENOMEM);
			goto unm_err_out;
		}
		if (base_ni->ext.extent_ntfs_inos) {
			memcpy(tmp, base_ni->ext.extent_ntfs_inos, new_size -
					4 * sizeof(ntfs_inode *));
			kfree(base_ni->ext.extent_ntfs_inos);
		}
		base_ni->ext.extent_ntfs_inos = tmp;
	}
	base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;
	up(&base_ni->extent_lock);
	atomic_dec(&base_ni->count);
	ntfs_debug("Done 2.");
	*ntfs_ino = ni;
	return m;
unm_err_out:
	unmap_mft_record(ni);
	up(&base_ni->extent_lock);
	atomic_dec(&base_ni->count);
	/*
	 * If the extent inode was not attached to the base inode we need to
	 * release it or we will leak memory.
	 */
	if (destroy_ni)
		ntfs_clear_extent_inode(ni);
	return m;
}
Example #11
0
int cifs_setxattr(struct dentry *direntry, const char *ea_name,
		  const void *ea_value, size_t value_size, int flags)
{
	int rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
	int xid;
	struct cifs_sb_info *cifs_sb;
	struct tcon_link *tlink;
	struct cifs_tcon *pTcon;
	struct super_block *sb;
	char *full_path;
	struct cifs_ntsd *pacl;

	if (direntry == NULL)
		return -EIO;
	if (direntry->d_inode == NULL)
		return -EIO;
	sb = direntry->d_inode->i_sb;
	if (sb == NULL)
		return -EIO;

	cifs_sb = CIFS_SB(sb);
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink))
		return PTR_ERR(tlink);
	pTcon = tlink_tcon(tlink);

	xid = GetXid();

	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto set_ea_exit;
	}
	/* return dos attributes as pseudo xattr */
	/* return alt name if available as pseudo attr */

	/* if proc/fs/cifs/streamstoxattr is set then
		search server for EAs or streams to
		returns as xattrs */
	if (value_size > MAX_EA_VALUE_SIZE) {
		cFYI(1, "size of EA value too large");
		rc = -EOPNOTSUPP;
		goto set_ea_exit;
	}

	if (ea_name == NULL) {
		cFYI(1, "Null xattr names not supported");
	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto set_ea_exit;
		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
			cFYI(1, "attempt to set cifs inode metadata");

		ea_name += 5; /* skip past user. prefix */
		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
			(__u16)value_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto set_ea_exit;

		ea_name += 4; /* skip past os2. prefix */
		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
			(__u16)value_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
			strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
		pacl = kmalloc(value_size, GFP_KERNEL);
		if (!pacl) {
			cFYI(1, "%s: Can't allocate memory for ACL",
					__func__);
			rc = -ENOMEM;
		} else {
#ifdef CONFIG_CIFS_ACL
			memcpy(pacl, ea_value, value_size);
			rc = set_cifs_acl(pacl, value_size,
				direntry->d_inode, full_path);
			if (rc == 0) /* force revalidate of the inode */
				CIFS_I(direntry->d_inode)->time = 0;
			kfree(pacl);
#else
			cFYI(1, "Set CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
		}
	} else {
		int temp;
		temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
			strlen(POSIX_ACL_XATTR_ACCESS));
		if (temp == 0) {
#ifdef CONFIG_CIFS_POSIX
			if (sb->s_flags & MS_POSIXACL)
				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
					ea_value, (const int)value_size,
					ACL_TYPE_ACCESS, cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			cFYI(1, "set POSIX ACL rc %d", rc);
#else
			cFYI(1, "set POSIX ACL not supported");
#endif
		} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
				   strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
#ifdef CONFIG_CIFS_POSIX
			if (sb->s_flags & MS_POSIXACL)
				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
					ea_value, (const int)value_size,
					ACL_TYPE_DEFAULT, cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			cFYI(1, "set POSIX default ACL rc %d", rc);
#else
			cFYI(1, "set default POSIX ACL not supported");
#endif
		} else {
			cFYI(1, "illegal xattr request %s (only user namespace"
				" supported)", ea_name);
		  /* BB what if no namespace prefix? */
		  /* Should we just pass them to server, except for
		  system and perhaps security prefixes? */
		}
	}

set_ea_exit:
	kfree(full_path);
	FreeXid(xid);
	cifs_put_tlink(tlink);
#endif
	return rc;
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

#endif

#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_tfm *tfm)
{
	struct cryptd_aead *cryptd_tfm;
	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
	struct crypto_aead *cryptd_child;
	struct aesni_rfc4106_gcm_ctx *child_ctx;
	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
	if (IS_ERR(cryptd_tfm))
		return PTR_ERR(cryptd_tfm);

	cryptd_child = cryptd_aead_child(cryptd_tfm);
	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
	memcpy(child_ctx, ctx, sizeof(*ctx));
	ctx->cryptd_tfm = cryptd_tfm;
	tfm->crt_aead.reqsize = sizeof(struct aead_request)
		+ crypto_aead_reqsize(&cryptd_tfm->base);
	return 0;
}

static void rfc4106_exit(struct crypto_tfm *tfm)
{
	struct aesni_rfc4106_gcm_ctx *ctx =
		(struct aesni_rfc4106_gcm_ctx *)
		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
	if (!IS_ERR(ctx->cryptd_tfm))
		cryptd_free_aead(ctx->cryptd_tfm);
	return;
}

static void
rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
{
	struct aesni_gcm_set_hash_subkey_result *result = req->data;

	if (err == -EINPROGRESS)
		return;
	result->err = err;
	complete(&result->completion);
}

static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_ablkcipher *ctr_tfm;
	struct ablkcipher_request *req;
	int ret = -EINVAL;
	struct aesni_hash_subkey_req_data *req_data;

	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
	if (IS_ERR(ctr_tfm))
		return PTR_ERR(ctr_tfm);

	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);

	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
	if (ret)
		goto out_free_ablkcipher;

	ret = -ENOMEM;
	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
	if (!req)
		goto out_free_ablkcipher;

	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
	if (!req_data)
		goto out_free_request;

	memset(req_data->iv, 0, sizeof(req_data->iv));

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	init_completion(&req_data->result.completion);
	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
	ablkcipher_request_set_tfm(req, ctr_tfm);
	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
					CRYPTO_TFM_REQ_MAY_BACKLOG,
					rfc4106_set_hash_subkey_done,
					&req_data->result);

	ablkcipher_request_set_crypt(req, &req_data->sg,
		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);

	ret = crypto_ablkcipher_encrypt(req);
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		ret = wait_for_completion_interruptible
			(&req_data->result.completion);
		if (!ret)
			ret = req_data->result.err;
	}
	kfree(req_data);
out_free_request:
	ablkcipher_request_free(req);
out_free_ablkcipher:
	crypto_free_ablkcipher(ctr_tfm);
	return ret;
}

static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
						   unsigned int key_len)
{
	int ret = 0;
	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
	struct aesni_rfc4106_gcm_ctx *child_ctx =
                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
	u8 *new_key_align, *new_key_mem = NULL;

	if (key_len < 4) {
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}
	/*Account for 4 byte nonce at the end.*/
	key_len -= 4;
	if (key_len != AES_KEYSIZE_128) {
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
	/*This must be on a 16 byte boundary!*/
	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
		return -EINVAL;

	if ((unsigned long)key % AESNI_ALIGN) {
		/*key is not aligned: use an auxuliar aligned pointer*/
		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
		if (!new_key_mem)
			return -ENOMEM;

		new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
		memcpy(new_key_align, key, key_len);
		key = new_key_align;
	}

	if (!irq_fpu_usable())
		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
		key, key_len);
	else {
		kernel_fpu_begin();
		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
		kernel_fpu_end();
	}
	/*This must be on a 16 byte boundary!*/
	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
		ret = -EINVAL;
		goto exit;
	}
	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
	memcpy(child_ctx, ctx, sizeof(*ctx));
exit:
	kfree(new_key_mem);
	return ret;
}

/* This is the Integrity Check Value (aka the authentication tag length and can
 * be 8, 12 or 16 bytes long. */
static int rfc4106_set_authsize(struct crypto_aead *parent,
				unsigned int authsize)
{
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);

	switch (authsize) {
	case 8:
	case 12:
	case 16:
		break;
	default:
		return -EINVAL;
	}
	crypto_aead_crt(parent)->authsize = authsize;
	crypto_aead_crt(cryptd_child)->authsize = authsize;
	return 0;
}

static int rfc4106_encrypt(struct aead_request *req)
{
	int ret;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);

	if (!irq_fpu_usable()) {
		struct aead_request *cryptd_req =
			(struct aead_request *) aead_request_ctx(req);
		memcpy(cryptd_req, req, sizeof(*req));
		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
		return crypto_aead_encrypt(cryptd_req);
	} else {
		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
		kernel_fpu_begin();
		ret = cryptd_child->base.crt_aead.encrypt(req);
		kernel_fpu_end();
		return ret;
	}
}
static int __driver_rfc4106_decrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	unsigned long tempCipherLen = 0;
	__be32 counter = cpu_to_be32(1);
	int retval = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv_and_authTag[32+AESNI_ALIGN];
	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
	u8 *authTag = iv + 16;
	struct scatter_walk src_sg_walk;
	struct scatter_walk assoc_sg_walk;
	struct scatter_walk dst_sg_walk;
	unsigned int i;

	if (unlikely((req->cryptlen < auth_tag_len) ||
		(req->assoclen != 8 && req->assoclen != 12)))
		return -EINVAL;
	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length */
	/* equal to 8 or 12 bytes */

	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		scatterwalk_start(&assoc_sg_walk, req->assoc);
		src = scatterwalk_map(&src_sg_walk);
		assoc = scatterwalk_map(&assoc_sg_walk);
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk);
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
		if (!src)
			return -ENOMEM;
		assoc = (src + req->cryptlen);
		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
		scatterwalk_map_and_copy(assoc, req->assoc, 0,
			req->assoclen, 0);
		dst = src;
	}

	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
		authTag, auth_tag_len);

	/* Compare generated tag with passed in tag. */
	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
		-EBADMSG : 0;

	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst);
			scatterwalk_done(&dst_sg_walk, 0, 0);
		}
		scatterwalk_unmap(src);
		scatterwalk_unmap(assoc);
		scatterwalk_done(&src_sg_walk, 0, 0);
		scatterwalk_done(&assoc_sg_walk, 0, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
		kfree(src);
	}
	return retval;
}
static int __driver_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv_tab[16+AESNI_ALIGN];
	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
	struct scatter_walk src_sg_walk;
	struct scatter_walk assoc_sg_walk;
	struct scatter_walk dst_sg_walk;
	unsigned int i;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length equal */
	/* to 8 or 12 bytes */
	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
		return -EINVAL;
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		scatterwalk_start(&assoc_sg_walk, req->assoc);
		src = scatterwalk_map(&src_sg_walk);
		assoc = scatterwalk_map(&assoc_sg_walk);
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk);
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!src))
			return -ENOMEM;
		assoc = (src + req->cryptlen + auth_tag_len);
		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
		scatterwalk_map_and_copy(assoc, req->assoc, 0,
					req->assoclen, 0);
		dst = src;
	}

	aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
		+ ((unsigned long)req->cryptlen), auth_tag_len);

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst);
			scatterwalk_done(&dst_sg_walk, 0, 0);
		}
		scatterwalk_unmap(src);
		scatterwalk_unmap(assoc);
		scatterwalk_done(&src_sg_walk, 0, 0);
		scatterwalk_done(&assoc_sg_walk, 0, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, 0,
			req->cryptlen + auth_tag_len, 1);
		kfree(src);
	}
	return 0;
}
Example #15
0
static int __devinit omapflash_probe(struct platform_device *pdev)
{
	int err;
	struct omapflash_info *info;
	struct flash_platform_data *pdata = pdev->dev.platform_data;
	struct resource *res = pdev->resource;
	unsigned long size = res->end - res->start + 1;

	info = kmalloc(sizeof(struct omapflash_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	memset(info, 0, sizeof(struct omapflash_info));

	if (!request_mem_region(res->start, size, "flash")) {
		err = -EBUSY;
		goto out_free_info;
	}

	info->map.virt		= ioremap(res->start, size);
	if (!info->map.virt) {
		err = -ENOMEM;
		goto out_release_mem_region;
	}
	info->map.name		= pdev->dev.bus_id;
	info->map.phys		= res->start;
	info->map.size		= size;
	info->map.bankwidth	= pdata->width;
	info->map.set_vpp	= omap_set_vpp;

	simple_map_init(&info->map);
	info->mtd = do_map_probe(pdata->map_name, &info->map);
	if (!info->mtd) {
		err = -EIO;
		goto out_iounmap;
	}
	info->mtd->owner = THIS_MODULE;

#ifdef CONFIG_MTD_PARTITIONS
	err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
	if (err > 0)
		add_mtd_partitions(info->mtd, info->parts, err);
	else if (err < 0 && pdata->parts)
		add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
	else
#endif
		add_mtd_device(info->mtd);

	platform_set_drvdata(pdev, info);

	return 0;

out_iounmap:
	iounmap(info->map.virt);
out_release_mem_region:
	release_mem_region(res->start, size);
out_free_info:
	kfree(info);

	return err;
}
Example #16
0
/*
 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 * specified by pool_data
 */
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
	size_t size = (size_t)pool_data;
	return kmalloc(size, gfp_mask);
}
Example #17
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
	struct buffer_head * bh;
	struct ext2_sb_info * sbi;
	struct ext2_super_block * es;
	struct inode *root;
	unsigned long block;
	unsigned long sb_block = get_sb_block(&data);
	unsigned long logic_sb_block;
	unsigned long offset = 0;
	unsigned long def_mount_opts;
	long ret = -EINVAL;
	int blocksize = BLOCK_SIZE;
	int db_count;
	int i, j;
	__le32 features;
	int err;

	err = -ENOMEM;
	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		goto failed_unlock;

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
	if (!sbi->s_blockgroup_lock) {
		kfree(sbi);
		goto failed_unlock;
	}
	sb->s_fs_info = sbi;
	sbi->s_sb_block = sb_block;

	spin_lock_init(&sbi->s_lock);

	/*
	 * See what the current blocksize for the device is, and
	 * use that as the blocksize.  Otherwise (or if the blocksize
	 * is smaller than the default) use the default.
	 * This is important for devices that have a hardware
	 * sectorsize that is larger than the default.
	 */
	blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
	if (!blocksize) {
		ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
		goto failed_sbi;
	}

	/*
	 * If the superblock doesn't start on a hardware sector boundary,
	 * calculate the offset.  
	 */
	if (blocksize != BLOCK_SIZE) {
		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
	} else {
		logic_sb_block = sb_block;
	}

	if (!(bh = sb_bread(sb, logic_sb_block))) {
		ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
		goto failed_sbi;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
	 *       some ext2 macro-instructions depend on its value
	 */
	es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
	if (def_mount_opts & EXT2_DEFM_DEBUG)
		set_opt(sbi->s_mount_opt, DEBUG);
	if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
		set_opt(sbi->s_mount_opt, GRPID);
	if (def_mount_opts & EXT2_DEFM_UID16)
		set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
	if (def_mount_opts & EXT2_DEFM_XATTR_USER)
		set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
	if (def_mount_opts & EXT2_DEFM_ACL)
		set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
	
	if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
		set_opt(sbi->s_mount_opt, ERRORS_CONT);
	else
		set_opt(sbi->s_mount_opt, ERRORS_RO);

	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
	
	set_opt(sbi->s_mount_opt, RESERVATION);

	if (!parse_options((char *) data, sb))
		goto failed_mount;

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
		 MS_POSIXACL : 0);

	ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
	    (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
		ext2_msg(sb, KERN_WARNING,
			"warning: feature flags set on rev 0 fs, "
			"running e2fsck is recommended");
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
	features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
	if (features) {
		ext2_msg(sb, KERN_ERR,	"error: couldn't mount because of "
		       "unsupported optional features (%x)",
			le32_to_cpu(features));
		goto failed_mount;
	}
	if (!(sb->s_flags & MS_RDONLY) &&
	    (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
		ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
		       "unsupported optional features (%x)",
		       le32_to_cpu(features));
		goto failed_mount;
	}

	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

	if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
		if (!silent)
			ext2_msg(sb, KERN_ERR,
				"error: unsupported blocksize for xip");
		goto failed_mount;
	}

	/* If the blocksize doesn't match, re-read the thing.. */
	if (sb->s_blocksize != blocksize) {
		brelse(bh);

		if (!sb_set_blocksize(sb, blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: bad blocksize %d", blocksize);
			goto failed_sbi;
		}

		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
		bh = sb_bread(sb, logic_sb_block);
		if(!bh) {
			ext2_msg(sb, KERN_ERR, "error: couldn't read"
				"superblock on 2nd try");
			goto failed_sbi;
		}
		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
		sbi->s_es = es;
		if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
			ext2_msg(sb, KERN_ERR, "error: magic mismatch");
			goto failed_mount;
		}
	}

	sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
	sb->s_max_links = EXT2_LINK_MAX;

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
		    !is_power_of_2(sbi->s_inode_size) ||
		    (sbi->s_inode_size > blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: unsupported inode size: %d",
				sbi->s_inode_size);
			goto failed_mount;
		}
	}

	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
				   le32_to_cpu(es->s_log_frag_size);
	if (sbi->s_frag_size == 0)
		goto cantfind_ext2;
	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

	if (EXT2_INODE_SIZE(sb) == 0)
		goto cantfind_ext2;
	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
	if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
		goto cantfind_ext2;
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
	sbi->s_desc_per_block = sb->s_blocksize /
					sizeof (struct ext2_group_desc);
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
	sbi->s_addr_per_block_bits =
		ilog2 (EXT2_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits =
		ilog2 (EXT2_DESC_PER_BLOCK(sb));

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	if (sb->s_blocksize != bh->b_size) {
		if (!silent)
			ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
		goto failed_mount;
	}

	if (sb->s_blocksize != sbi->s_frag_size) {
		ext2_msg(sb, KERN_ERR,
			"error: fragsize %lu != blocksize %lu"
			"(not supported yet)",
			sbi->s_frag_size, sb->s_blocksize);
		goto failed_mount;
	}

	if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #blocks per group too big: %lu",
			sbi->s_blocks_per_group);
		goto failed_mount;
	}
	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #fragments per group too big: %lu",
			sbi->s_frags_per_group);
		goto failed_mount;
	}
	if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #inodes per group too big: %lu",
			sbi->s_inodes_per_group);
		goto failed_mount;
	}

	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext2;
 	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
 				le32_to_cpu(es->s_first_data_block) - 1)
 					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
		   EXT2_DESC_PER_BLOCK(sb);
	sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
	if (sbi->s_group_desc == NULL) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount;
	}
	bgl_lock_init(sbi->s_blockgroup_lock);
	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
	if (!sbi->s_debts) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount_group_desc;
	}
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logic_sb_block, i);
		sbi->s_group_desc[i] = sb_bread(sb, block);
		if (!sbi->s_group_desc[i]) {
			for (j = 0; j < i; j++)
				brelse (sbi->s_group_desc[j]);
			ext2_msg(sb, KERN_ERR,
				"error: unable to read group descriptors");
			goto failed_mount_group_desc;
		}
	}
	if (!ext2_check_descriptors (sb)) {
		ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
		goto failed_mount2;
	}
	sbi->s_gdb_count = db_count;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

	/* per fileystem reservation list head & lock */
	spin_lock_init(&sbi->s_rsv_window_lock);
	sbi->s_rsv_window_root = RB_ROOT;
	/*
	 * Add a single, static dummy reservation to the start of the
	 * reservation window list --- it gives us a placeholder for
	 * append-at-start-of-list which makes the allocation logic
	 * _much_ simpler.
	 */
	sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
	sbi->s_rsv_window_head.rsv_goal_size = 0;
	ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);

	err = percpu_counter_init(&sbi->s_freeblocks_counter,
				ext2_count_free_blocks(sb));
	if (!err) {
		err = percpu_counter_init(&sbi->s_freeinodes_counter,
				ext2_count_free_inodes(sb));
	}
	if (!err) {
		err = percpu_counter_init(&sbi->s_dirs_counter,
				ext2_count_dirs(sb));
	}
	if (err) {
		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
		goto failed_mount3;
	}
	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_op = &ext2_sops;
	sb->s_export_op = &ext2_export_ops;
	sb->s_xattr = ext2_xattr_handlers;

#ifdef CONFIG_QUOTA
	sb->dq_op = &dquot_operations;
	sb->s_qcop = &dquot_quotactl_ops;
#endif

	root = ext2_iget(sb, EXT2_ROOT_INO);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto failed_mount3;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
		goto failed_mount3;
	}

	sb->s_root = d_make_root(root);
	if (!sb->s_root) {
		ext2_msg(sb, KERN_ERR, "error: get root inode failed");
		ret = -ENOMEM;
		goto failed_mount3;
	}
	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
		ext2_msg(sb, KERN_WARNING,
			"warning: mounting ext3 filesystem as ext2");
	if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
	ext2_write_super(sb);
	return 0;

cantfind_ext2:
	if (!silent)
		ext2_msg(sb, KERN_ERR,
			"error: can't find an ext2 filesystem on dev %s.",
			sb->s_id);
	goto failed_mount;
failed_mount3:
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
	kfree(sbi->s_group_desc);
	kfree(sbi->s_debts);
failed_mount:
	brelse(bh);
failed_sbi:
	sb->s_fs_info = NULL;
	kfree(sbi->s_blockgroup_lock);
	kfree(sbi);
failed_unlock:
	return ret;
}
Example #18
0
static int kgsl_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
	int status = 0;
	int i;
	const struct firmware *fw = NULL;
	unsigned int *fw_ptr = NULL;
	size_t fw_word_size = 0;
	struct kgsl_yamato_device *yamato_device = KGSL_YAMATO_DEVICE(device);

	if (yamato_device->pfp_fw == NULL) {
		if (device->chip_id == KGSL_CHIPID_LEIA_REV470) {
			status = request_firmware(&fw, LEIA_PFP_470_FW,
				device->dev);
			if (status != 0) {
				KGSL_DRV_ERR("request_firmware for %s \
					 failed with error %d\n",
					LEIA_PFP_470_FW, status);
				return status;
			}
		} else {
			status = request_firmware(&fw, YAMATO_PFP_FW,
				device->dev);
			if (status != 0) {
				KGSL_DRV_ERR("request_firmware for %s \
					 failed with error %d\n",
					YAMATO_PFP_FW, status);
				return status;
			}
		}
		/*this firmware must come in 1 word chunks. */
		if ((fw->size % sizeof(uint32_t)) != 0) {
			KGSL_DRV_ERR("bad firmware size %d.\n", fw->size);
			status = -EINVAL;
			goto error_release_fw;
		}
		fw_ptr = (unsigned int *)fw->data;
		fw_word_size = fw->size/sizeof(uint32_t);
		yamato_device->pfp_fw_size = fw_word_size;

		/* keep a copy of fw to be reloaded  later */
		yamato_device->pfp_fw = (unsigned int *)
						kmalloc(fw->size, GFP_KERNEL);
		if (yamato_device->pfp_fw == NULL) {
			KGSL_DRV_ERR("ERROR: couldn't kmalloc fw size= %d.\n",
								fw->size);
			status = -EINVAL;
			goto error_release_fw;
		}
		memcpy(yamato_device->pfp_fw, fw->data, fw->size);

	} else {
		fw_ptr = yamato_device->pfp_fw;
		fw_word_size = yamato_device->pfp_fw_size;
	}

	KGSL_DRV_INFO("loading pfp ucode version: %d\n", fw_ptr[0]);

	kgsl_yamato_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
	for (i = 1; i < fw_word_size; i++)
		kgsl_yamato_regwrite(device, REG_CP_PFP_UCODE_DATA, fw_ptr[i]);

error_release_fw:
	if (fw)
		release_firmware(fw);
	return status;
}
Example #19
0
static int __devinit wl1271_probe(struct spi_device *spi)
{
	struct wl12xx_platform_data *pdata;
	struct ieee80211_hw *hw;
	struct wl1271 *wl;
	int ret, i;
	static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};

	pdata = spi->dev.platform_data;
	if (!pdata) {
		wl1271_error("no platform data");
		return -ENODEV;
	}

	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
	if (!hw) {
		wl1271_error("could not alloc ieee80211_hw");
		return -ENOMEM;
	}

	wl = hw->priv;
	memset(wl, 0, sizeof(*wl));

	wl->hw = hw;
	dev_set_drvdata(&spi->dev, wl);
	wl->spi = spi;

	skb_queue_head_init(&wl->tx_queue);

	INIT_WORK(&wl->filter_work, wl1271_filter_work);
	wl->channel = WL1271_DEFAULT_CHANNEL;
	wl->scanning = false;
	wl->default_key = 0;
	wl->listen_int = 1;
	wl->rx_counter = 0;
	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
	wl->elp = false;
	wl->psm = 0;
	wl->psm_requested = false;
	wl->tx_queue_stopped = false;
	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;

	/* We use the default power on sleep time until we know which chip
	 * we're using */
	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
		wl->tx_frames[i] = NULL;

	spin_lock_init(&wl->wl_lock);

	/*
	 * In case our MAC address is not correctly set,
	 * we use a random but Nokia MAC.
	 */
	memcpy(wl->mac_addr, nokia_oui, 3);
	get_random_bytes(wl->mac_addr + 3, 3);

	wl->state = WL1271_STATE_OFF;
	mutex_init(&wl->mutex);

	wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
	if (!wl->rx_descriptor) {
		wl1271_error("could not allocate memory for rx descriptor");
		ret = -ENOMEM;
		goto out_free;
	}

	/* This is the only SPI value that we need to set here, the rest
	 * comes from the board-peripherals file */
	spi->bits_per_word = 32;

	ret = spi_setup(spi);
	if (ret < 0) {
		wl1271_error("spi_setup failed");
		goto out_free;
	}

	wl->set_power = pdata->set_power;
	if (!wl->set_power) {
		wl1271_error("set power function missing in platform data");
		ret = -ENODEV;
		goto out_free;
	}

	wl->irq = spi->irq;
	if (wl->irq < 0) {
		wl1271_error("irq missing in platform data");
		ret = -ENODEV;
		goto out_free;
	}

	ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
	if (ret < 0) {
		wl1271_error("request_irq() failed: %d", ret);
		goto out_free;
	}

	set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);

	disable_irq(wl->irq);

	ret = platform_device_register(&wl1271_device);
	if (ret) {
		wl1271_error("couldn't register platform device");
		goto out_irq;
	}
	dev_set_drvdata(&wl1271_device.dev, wl);

	ret = wl1271_init_ieee80211(wl);
	if (ret)
		goto out_platform;

	ret = wl1271_register_hw(wl);
	if (ret)
		goto out_platform;

	wl1271_debugfs_init(wl);

	wl1271_notice("initialized");

	return 0;

 out_platform:
	platform_device_unregister(&wl1271_device);

 out_irq:
	free_irq(wl->irq, wl);

 out_free:
	kfree(wl->rx_descriptor);
	wl->rx_descriptor = NULL;

	ieee80211_free_hw(hw);

	return ret;
}
Example #20
0
static usbd_status
usb_block_allocmem(bus_dma_tag_t tag, size_t size, size_t align,
		   usb_dma_block_t **dmap)
{
        usb_dma_block_t *p;

	DPRINTFN(5, ("usb_block_allocmem: size=%lu align=%lu\n",
		     (u_long)size, (u_long)align));

	crit_enter();
	/* First check the free list. */
	for (p = LIST_FIRST(&usb_blk_freelist); p; p = LIST_NEXT(p, next)) {
		if (p->tag == tag && p->size >= size && p->align >= align) {
			LIST_REMOVE(p, next);
			usb_blk_nfree--;
			crit_exit();
			*dmap = p;
			DPRINTFN(6,("usb_block_allocmem: free list size=%lu\n",
				    (u_long)p->size));
			logmemory(blkalloc2, p, NULL, size, align);
			return (USBD_NORMAL_COMPLETION);
		}
	}
	crit_exit();

	DPRINTFN(6, ("usb_block_allocmem: no free\n"));
	p = kmalloc(sizeof *p, M_USB, M_INTWAIT);
	logmemory(blkalloc, p, NULL, size, align);

	if (bus_dma_tag_create(tag, align, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    size, NELEM(p->segs), size, BUS_DMA_ALLOCNOW, &p->tag) == ENOMEM) {
		goto free;
	}

	p->size = size;
	p->align = align;
	if (bus_dmamem_alloc(p->tag, &p->kaddr,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT, &p->map))
		goto tagfree;

	if (bus_dmamap_load(p->tag, p->map, p->kaddr, p->size,
	    usbmem_callback, p, 0))
		goto memfree;

	/* XXX - override the tag, ok since we never free it */
	p->tag = tag;
	*dmap = p;
	return (USBD_NORMAL_COMPLETION);

	/*
	 * XXX - do we need to _unload? is the order of _free and _destroy
	 * correct?
	 */
memfree:
	bus_dmamem_free(p->tag, p->kaddr, p->map);
tagfree:
	bus_dma_tag_destroy(p->tag);
free:
	kfree(p, M_USB);
	return (USBD_NOMEM);
}
Example #21
0
int add_mtd_partitions(struct mtd_info *master, 
		       struct mtd_partition *parts,
		       int nbparts)
{
	struct mtd_part *slave;
	u_int32_t cur_offset = 0;
	int i;

	printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);

	for (i = 0; i < nbparts; i++) {

		/* allocate the partition structure */
		slave = kmalloc (sizeof(*slave), GFP_KERNEL);
		if (!slave) {
			printk ("memory allocation error while creating partitions for \"%s\"\n",
				master->name);
			del_mtd_partitions(master);
			return -ENOMEM;
		}
		memset(slave, 0, sizeof(*slave));
		list_add(&slave->list, &mtd_partitions);

		/* set up the MTD object for this partition */
		slave->mtd.type = master->type;
		slave->mtd.flags = master->flags & ~parts[i].mask_flags;
		slave->mtd.size = parts[i].size;
		slave->mtd.oobblock = master->oobblock;
		slave->mtd.oobsize = master->oobsize;
		slave->mtd.ecctype = master->ecctype;
		slave->mtd.eccsize = master->eccsize;

		slave->mtd.name = parts[i].name;
		slave->mtd.bank_size = master->bank_size;
		slave->mtd.module = master->module;

		slave->mtd.read = part_read;
		slave->mtd.write = part_write;

		if(master->point && master->unpoint){
			slave->mtd.point = part_point;
			slave->mtd.unpoint = part_unpoint;
		}
		
		if (master->read_ecc)
			slave->mtd.read_ecc = part_read_ecc;
		if (master->write_ecc)
			slave->mtd.write_ecc = part_write_ecc;
		if (master->read_oob)
			slave->mtd.read_oob = part_read_oob;
		if (master->write_oob)
			slave->mtd.write_oob = part_write_oob;
		if(master->read_user_prot_reg)
			slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
		if(master->read_fact_prot_reg)
			slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
		if(master->write_user_prot_reg)
			slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
		if (master->sync)
			slave->mtd.sync = part_sync;
		if (!i && master->suspend && master->resume) {
				slave->mtd.suspend = part_suspend;
				slave->mtd.resume = part_resume;
		}
		if (master->writev)
			slave->mtd.writev = part_writev;
		if (master->readv)
			slave->mtd.readv = part_readv;
		if (master->writev_ecc)
			slave->mtd.writev_ecc = part_writev_ecc;
		if (master->readv_ecc)
			slave->mtd.readv_ecc = part_readv_ecc;
		if (master->lock)
			slave->mtd.lock = part_lock;
		if (master->unlock)
			slave->mtd.unlock = part_unlock;
		slave->mtd.erase = part_erase;
		slave->master = master;
		slave->offset = parts[i].offset;
		slave->index = i;

		if (slave->offset == MTDPART_OFS_APPEND)
			slave->offset = cur_offset;
		if (slave->offset == MTDPART_OFS_NXTBLK) {
			u_int32_t emask = master->erasesize-1;
			slave->offset = (cur_offset + emask) & ~emask;
			if (slave->offset != cur_offset) {
				printk(KERN_NOTICE "Moving partition %d: "
				       "0x%08x -> 0x%08x\n", i,
				       cur_offset, slave->offset);
			}
		}
		if (slave->mtd.size == MTDPART_SIZ_FULL)
			slave->mtd.size = master->size - slave->offset;
		cur_offset = slave->offset + slave->mtd.size;
	
		printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 
			slave->offset + slave->mtd.size, slave->mtd.name);

		/* let's do some sanity checks */
		if (slave->offset >= master->size) {
				/* let's register it anyway to preserve ordering */
			slave->offset = 0;
			slave->mtd.size = 0;
			printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
				parts[i].name);
		}
		if (slave->offset + slave->mtd.size > master->size) {
			slave->mtd.size = master->size - slave->offset;
			printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
				parts[i].name, master->name, slave->mtd.size);
		}
		if (master->numeraseregions>1) {
			/* Deal with variable erase size stuff */
			int i;
			struct mtd_erase_region_info *regions = master->eraseregions;
			
			/* Find the first erase regions which is part of this partition. */
			for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
				;

			for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
				if (slave->mtd.erasesize < regions[i].erasesize) {
					slave->mtd.erasesize = regions[i].erasesize;
				}
			}
		} else {
			/* Single erase size */
			slave->mtd.erasesize = master->erasesize;
		}

		if ((slave->mtd.flags & MTD_WRITEABLE) && 
		    (slave->offset % slave->mtd.erasesize)) {
			/* Doesn't start on a boundary of major erase size */
			/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
			slave->mtd.flags &= ~MTD_WRITEABLE;
			printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
				parts[i].name);
		}
		if ((slave->mtd.flags & MTD_WRITEABLE) && 
		    (slave->mtd.size % slave->mtd.erasesize)) {
			slave->mtd.flags &= ~MTD_WRITEABLE;
			printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
				parts[i].name);
		}

		if(parts[i].mtdp)
		{	/* store the object pointer (caller may or may not register it */
			*parts[i].mtdp = &slave->mtd;
			slave->registered = 0;
		}
		else
		{
			/* register our partition */
			add_mtd_device(&slave->mtd);
			slave->registered = 1;
		}
	}

	return 0;
}
int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
{
	struct device *dev = mfd->fbi->dev;
	struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
	struct mdp3_session_data *mdp3_session = NULL;
	u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
	int rc;
	int splash_mismatch = 0;

	pr_debug("mdp3_ctrl_init\n");
	rc = mdp3_parse_dt_splash(mfd);
	if (rc)
		splash_mismatch = 1;

	mdp3_interface->on_fnc = mdp3_ctrl_on;
	mdp3_interface->off_fnc = mdp3_ctrl_off;
	mdp3_interface->do_histogram = NULL;
	mdp3_interface->cursor_update = NULL;
	mdp3_interface->dma_fnc = mdp3_ctrl_pan_display;
	mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler;
	mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff;
	mdp3_interface->lut_update = mdp3_ctrl_lut_update;

	mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
	if (!mdp3_session) {
		pr_err("fail to allocate mdp3 private data structure");
		return -ENOMEM;
	}
	memset(mdp3_session, 0, sizeof(struct mdp3_session_data));
	mutex_init(&mdp3_session->lock);
	INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off);
	INIT_WORK(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done);
	atomic_set(&mdp3_session->vsync_countdown, 0);
	mutex_init(&mdp3_session->histo_lock);
	mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
	if (!mdp3_session->dma) {
		rc = -ENODEV;
		goto init_done;
	}

	rc = mdp3_dma_init(mdp3_session->dma);
	if (rc) {
		pr_err("fail to init dma\n");
		goto init_done;
	}

	intf_type = mdp3_ctrl_get_intf_type(mfd);
	mdp3_session->intf = mdp3_get_display_intf(intf_type);
	if (!mdp3_session->intf) {
		rc = -ENODEV;
		goto init_done;
	}
	rc = mdp3_intf_init(mdp3_session->intf);
	if (rc) {
		pr_err("fail to init interface\n");
		goto init_done;
	}

	mdp3_session->dma->output_config.out_sel = intf_type;
	mdp3_session->mfd = mfd;
	mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
	mdp3_session->status = mdp3_session->intf->active;
	mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
	mdp3_bufq_init(&mdp3_session->bufq_in);
	mdp3_bufq_init(&mdp3_session->bufq_out);
	mdp3_session->histo_status = 0;
	mdp3_session->lut_sel = 0;
	BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head);

	init_timer(&mdp3_session->vsync_timer);
	mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
	mdp3_session->vsync_timer.data = (u32)mdp3_session;
	mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
	mfd->mdp.private1 = mdp3_session;
	init_completion(&mdp3_session->dma_completion);
	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;

	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
	if (rc) {
		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
		goto init_done;
	}

	mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, NULL,
							"vsync_event");
	if (!mdp3_session->vsync_event_sd) {
		pr_err("vsync_event sysfs lookup failed\n");
		rc = -ENODEV;
		goto init_done;
	}

	rc = mdp3_create_sysfs_link(dev);
	if (rc)
		pr_warn("problem creating link to mdp sysfs\n");

	kobject_uevent(&dev->kobj, KOBJ_ADD);
	pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");

	if (mdp3_get_cont_spash_en()) {
		mdp3_session->clk_on = 1;
		mdp3_ctrl_notifier_register(mdp3_session,
			&mdp3_session->mfd->mdp_sync_pt_data.notifier);
	}

	if (splash_mismatch) {
		pr_err("splash memory mismatch, stop splash\n");
		mdp3_ctrl_off(mfd);
	}

	mdp3_session->vsync_before_commit = true;
init_done:
	if (IS_ERR_VALUE(rc))
		kfree(mdp3_session);

	return rc;
}
Example #23
0
static int bfusb_load_firmware(struct bfusb_data *data,
			       const unsigned char *firmware, int count)
{
	unsigned char *buf;
	int err, pipe, len, size, sent = 0;

	BT_DBG("bfusb %p udev %p", data, data->udev);

	BT_INFO("BlueFRITZ! USB loading firmware");

	buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL);
	if (!buf) {
		BT_ERR("Can't allocate memory chunk for firmware");
		return -ENOMEM;
	}

	pipe = usb_sndctrlpipe(data->udev, 0);

	if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION,
				0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) {
		BT_ERR("Can't change to loading configuration");
		kfree(buf);
		return -EBUSY;
	}

	data->udev->toggle[0] = data->udev->toggle[1] = 0;

	pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);

	while (count) {
		size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE + 3);

		memcpy(buf, firmware + sent, size);

		err = usb_bulk_msg(data->udev, pipe, buf, size,
					&len, BFUSB_BLOCK_TIMEOUT);

		if (err || (len != size)) {
			BT_ERR("Error in firmware loading");
			goto error;
		}

		sent  += size;
		count -= size;
	}

	err = usb_bulk_msg(data->udev, pipe, NULL, 0,
					&len, BFUSB_BLOCK_TIMEOUT);
	if (err < 0) {
		BT_ERR("Error in null packet request");
		goto error;
	}

	pipe = usb_sndctrlpipe(data->udev, 0);

	err = usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION,
				0, 2, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
	if (err < 0) {
		BT_ERR("Can't change to running configuration");
		goto error;
	}

	data->udev->toggle[0] = data->udev->toggle[1] = 0;

	BT_INFO("BlueFRITZ! USB device ready");

	kfree(buf);
	return 0;

error:
	kfree(buf);

	pipe = usb_sndctrlpipe(data->udev, 0);

	usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION,
				0, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);

	return err;
}
Example #24
0
static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target)
{
	struct jffs2_inode_info *f, *dir_f;
	struct jffs2_sb_info *c;
	struct inode *inode;
	struct jffs2_raw_inode *ri;
	struct jffs2_raw_dirent *rd;
	struct jffs2_full_dnode *fn;
	struct jffs2_full_dirent *fd;
	int namelen;
	uint32_t alloclen;
	int ret, targetlen = strlen(target);

	/* FIXME: If you care. We'd need to use frags for the target
	   if it grows much more than this */
	if (targetlen > 254)
		return -ENAMETOOLONG;

	ri = jffs2_alloc_raw_inode();

	if (!ri)
		return -ENOMEM;

	c = JFFS2_SB_INFO(dir_i->i_sb);

	/* Try to reserve enough space for both node and dirent.
	 * Just the node will do for now, though
	 */
	namelen = dentry->d_name.len;
	ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen,
				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);

	if (ret) {
		jffs2_free_raw_inode(ri);
		return ret;
	}

	inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri);

	if (IS_ERR(inode)) {
		jffs2_free_raw_inode(ri);
		jffs2_complete_reservation(c);
		return PTR_ERR(inode);
	}

	inode->i_op = &jffs2_symlink_inode_operations;

	f = JFFS2_INODE_INFO(inode);

	inode->i_size = targetlen;
	ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size);
	ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size);
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

	ri->compr = JFFS2_COMPR_NONE;
	ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));

	fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL);

	jffs2_free_raw_inode(ri);

	if (IS_ERR(fn)) {
		/* Eeek. Wave bye bye */
		mutex_unlock(&f->sem);
		jffs2_complete_reservation(c);
		ret = PTR_ERR(fn);
		goto fail;
	}

	/* We use f->target field to store the target path. */
	f->target = kmalloc(targetlen + 1, GFP_KERNEL);
	if (!f->target) {
		printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
		mutex_unlock(&f->sem);
		jffs2_complete_reservation(c);
		ret = -ENOMEM;
		goto fail;
	}

	memcpy(f->target, target, targetlen + 1);
	D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target));

	/* No data here. Only a metadata node, which will be
	   obsoleted by the first data write
	*/
	f->metadata = fn;
	mutex_unlock(&f->sem);

	jffs2_complete_reservation(c);

	ret = jffs2_init_security(inode, dir_i);
	if (ret)
		goto fail;

	ret = jffs2_init_acl_post(inode);
	if (ret)
		goto fail;

	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
				  ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
	if (ret)
		goto fail;

	rd = jffs2_alloc_raw_dirent();
	if (!rd) {
		/* Argh. Now we treat it like a normal delete */
		jffs2_complete_reservation(c);
		ret = -ENOMEM;
		goto fail;
	}

	dir_f = JFFS2_INODE_INFO(dir_i);
	mutex_lock(&dir_f->sem);

	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));

	rd->pino = cpu_to_je32(dir_i->i_ino);
	rd->version = cpu_to_je32(++dir_f->highest_version);
	rd->ino = cpu_to_je32(inode->i_ino);
	rd->mctime = cpu_to_je32(get_seconds());
	rd->nsize = namelen;
	rd->type = DT_LNK;
	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));

	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);

	if (IS_ERR(fd)) {
		/* dirent failed to write. Delete the inode normally
		   as if it were the final unlink() */
		jffs2_complete_reservation(c);
		jffs2_free_raw_dirent(rd);
		mutex_unlock(&dir_f->sem);
		ret = PTR_ERR(fd);
		goto fail;
	}

	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));

	jffs2_free_raw_dirent(rd);

	/* Link the fd into the inode's list, obsoleting an old
	   one if necessary. */
	jffs2_add_fd_to_list(c, fd, &dir_f->dents);

	mutex_unlock(&dir_f->sem);
	jffs2_complete_reservation(c);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);
	return 0;

 fail:
	iget_failed(inode);
	return ret;
}
Example #25
0
static int isight_firmware_load(struct usb_interface *intf,
				const struct usb_device_id *id)
{
	struct usb_device *dev = interface_to_usbdev(intf);
	int llen, len, req, ret = 0;
	const struct firmware *firmware;
	unsigned char *buf = kmalloc(50, GFP_KERNEL);
	unsigned char data[4];
	const u8 *ptr;

	if (!buf)
		return -ENOMEM;

	if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
		ret = -ENODEV;
		goto out;
	}

	ptr = firmware->data;

	buf[0] = 0x01;
	if (usb_control_msg
	    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
	     300) != 1) {
		printk(KERN_ERR
		       "Failed to initialise isight firmware loader\n");
		ret = -ENODEV;
		goto out;
	}

	while (ptr+4 <= firmware->data+firmware->size) {
		memcpy(data, ptr, 4);
		len = (data[0] << 8 | data[1]);
		req = (data[2] << 8 | data[3]);
		ptr += 4;

		if (len == 0x8001)
			break;	/* success */
		else if (len == 0)
			continue;

		for (; len > 0; req += 50) {
			llen = min(len, 50);
			len -= llen;
			if (ptr+llen > firmware->data+firmware->size) {
				printk(KERN_ERR
				       "Malformed isight firmware");
				ret = -ENODEV;
				goto out;
			}
			memcpy(buf, ptr, llen);

			ptr += llen;

			if (usb_control_msg
			    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, req, 0,
			     buf, llen, 300) != llen) {
				printk(KERN_ERR
				       "Failed to load isight firmware\n");
				ret = -ENODEV;
				goto out;
			}

		}
	}

	buf[0] = 0x00;
	if (usb_control_msg
	    (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
	     300) != 1) {
		printk(KERN_ERR "isight firmware loading completion failed\n");
		ret = -ENODEV;
	}

out:
	kfree(buf);
	release_firmware(firmware);
	return ret;
}
Example #26
0
static int __devinit bfin_mac_probe(struct platform_device *pdev)
{
	struct net_device *ndev;
	struct bfin_mac_local *lp;
	int rc, i;

	ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
	if (!ndev) {
		dev_err(&pdev->dev, "Cannot allocate net device!\n");
		return -ENOMEM;
	}

	SET_NETDEV_DEV(ndev, &pdev->dev);
	platform_set_drvdata(pdev, ndev);
	lp = netdev_priv(ndev);

	/* Grab the MAC address in the MAC */
	*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
	*(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());

	/* probe mac */
	/*todo: how to proble? which is revision_register */
	bfin_write_EMAC_ADDRLO(0x12345678);
	if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
		dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
		rc = -ENODEV;
		goto out_err_probe_mac;
	}

	/* set the GPIO pins to Ethernet mode */
	rc = peripheral_request_list(pin_req, DRV_NAME);
	if (rc) {
		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
		rc = -EFAULT;
		goto out_err_setup_pin_mux;
	}

	/*
	 * Is it valid? (Did bootloader initialize it?)
	 * Grab the MAC from the board somehow
	 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
	 */
	if (!is_valid_ether_addr(ndev->dev_addr))
		bfin_get_ether_addr(ndev->dev_addr);

	/* If still not valid, get a random one */
	if (!is_valid_ether_addr(ndev->dev_addr))
		random_ether_addr(ndev->dev_addr);

	setup_mac_addr(ndev->dev_addr);

	/* MDIO bus initial */
	lp->mii_bus = mdiobus_alloc();
	if (lp->mii_bus == NULL)
		goto out_err_mdiobus_alloc;

	lp->mii_bus->priv = ndev;
	lp->mii_bus->read = bfin_mdiobus_read;
	lp->mii_bus->write = bfin_mdiobus_write;
	lp->mii_bus->reset = bfin_mdiobus_reset;
	lp->mii_bus->name = "bfin_mac_mdio";
	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "0");
	lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
	for (i = 0; i < PHY_MAX_ADDR; ++i)
		lp->mii_bus->irq[i] = PHY_POLL;

	rc = mdiobus_register(lp->mii_bus);
	if (rc) {
		dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
		goto out_err_mdiobus_register;
	}

	rc = mii_probe(ndev);
	if (rc) {
		dev_err(&pdev->dev, "MII Probe failed!\n");
		goto out_err_mii_probe;
	}

	/* Fill in the fields of the device structure with ethernet values. */
	ether_setup(ndev);

	ndev->netdev_ops = &bfin_mac_netdev_ops;
	ndev->ethtool_ops = &bfin_mac_ethtool_ops;

	spin_lock_init(&lp->lock);

	/* now, enable interrupts */
	/* register irq handler */
	rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
			IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
	if (rc) {
		dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
		rc = -EBUSY;
		goto out_err_request_irq;
	}

	rc = register_netdev(ndev);
	if (rc) {
		dev_err(&pdev->dev, "Cannot register net device!\n");
		goto out_err_reg_ndev;
	}

	/* now, print out the card info, in a short format.. */
	dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);

	return 0;

out_err_reg_ndev:
	free_irq(IRQ_MAC_RX, ndev);
out_err_request_irq:
out_err_mii_probe:
	mdiobus_unregister(lp->mii_bus);
out_err_mdiobus_register:
	mdiobus_free(lp->mii_bus);
out_err_mdiobus_alloc:
	peripheral_free_list(pin_req);
out_err_setup_pin_mux:
out_err_probe_mac:
	platform_set_drvdata(pdev, NULL);
	free_netdev(ndev);

	return rc;
}
Example #27
0
int ceva_memory_alloc(void)
{
	int i;
	int *buf_ptr = 0;
	int buf_count = 0;
	int ceva_ptr[CEVA_LOOP_COUNT];
	int ceva_count = 0;
	unsigned int addr, phy_addr;


	printk("[%s] start\n", __FUNCTION__);

	for (i = 0; ceva_count < CEVA_LOOP_COUNT; i++) {
		addr = (unsigned int)kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
		if (addr == 0) {
			printk("[%s][%d] kmalloc fail at loop %d, size = %d \n", __FUNCTION__, __LINE__, i, MAX_KMALLOC_SIZE);
			break;
		}
		phy_addr = (unsigned int)__pa(addr);

		/* Check if located in memory for ceva */
		if (((phy_addr >= CEVA_ADDR_START) && (phy_addr <= CEVA_ADDR_END)) ||
			((phy_addr + MAX_KMALLOC_SIZE - 1 >= CEVA_ADDR_START) && (phy_addr + MAX_KMALLOC_SIZE - 1 <= CEVA_ADDR_END))) {
			//printk("[%s][%d] ceva_ptr[%d] %08x\n", __FUNCTION__, __LINE__, ceva_count, phy_addr);
			ceva_ptr[ceva_count++] = addr;
		}
		else {
			if (buf_ptr == 0) {
				buf_ptr = (int*)addr;
			}
			else {
				buf_ptr[buf_count++] = addr;
			}
			//printk("[%s][%d] buf_ptr(%d)  %08x\n", __FUNCTION__, __LINE__, buf_count, phy_addr);
		}
	}

	if (ceva_count < CEVA_LOOP_COUNT) {
		printk("Ceva memory not full reserved!\n");
	}
	
	if (buf_ptr) {
		for (i = 0; i < buf_count; i++) {
			kfree((void*)buf_ptr[i]);
		}
		kfree(buf_ptr);
	}
	
#if 0
	buf_index = kmalloc(PAGE_SIZE, GFP_KERNEL);

	for (i = 0 ; i < 4096 ; i ++)
	{
		buf_index[i] = (int)kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
		if (buf_index[i] == 0) {
			printk("[%s][%d] kmalloc fail at %d, size = %d \n", __FUNCTION__, __LINE__, i, MAX_KMALLOC_SIZE);
			return 0;
		}

		//printk("[%s][%d][%d] ptr=0x%x, pa=0x%x\n", __FUNCTION__, __LINE__, i, buf_index[i], __pa(buf_index[i]));
		// check and keep memory for ceva
		if ( ( __pa(buf_index[i]) >= 0xe0000) && ( __pa(buf_index[i]) <= 0x200000) ) {
			int timeout = 0x1000;

			//printk("[%s][%d] keep buffer [%d]\n", __FUNCTION__, __LINE__, i);
			kfree((int *)buf_index[i]);
			i--;

			//for (j = 0 ; j < CEVA_LOOP_COUNT ; j ++) {
			j=0;
			do {
				ceva_ptr[j] = (int)kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
				if (ceva_ptr[j] == 0) {
					printk("[%s][%d] kmalloc fail at %d\n", __FUNCTION__, __LINE__, j);
					return 0;
				}
				//printk("[%s][%d][%d] ceva_ptr ptr=0x%x, pa=0x%x\n", __FUNCTION__, __LINE__, j, ceva_ptr[j], __pa(ceva_ptr[j]));
				if ((j == 0) && (__pa(ceva_ptr[j]) != 0xe0000)) {
					i++;
					buf_index[i] = ceva_ptr[j];
					continue;
				}
				j++ ;
				if (timeout-- == 0) {
					printk("ERROR !!! ceva memory allocate fail\n");
				}

			} while (j < CEVA_LOOP_COUNT);
			if ( (__pa(ceva_ptr[0]) != 0xe0000) && (__pa(ceva_ptr[8]) != 0x1e0000) ) {
				printk("[%s][%d][%d] ceva_ptr ptr=0x%x, pa=0x%x\n", __FUNCTION__, __LINE__, 0, ceva_ptr[0], (unsigned int)__pa(ceva_ptr[0]));
				printk("[%s][%d][%d] ceva_ptr ptr=0x%x, pa=0x%x\n", __FUNCTION__, __LINE__, 8, ceva_ptr[8], (unsigned int)__pa(ceva_ptr[8]));
			}

			break;
		}
	}
	//printk("[%s][%d][%d]\n", __FUNCTION__, __LINE__, i);
	
	for ( ; i >= 0 ; i --)
	{
		//printk("[%s][%d] start release i=[%d], buf_index[i]=0x%x\n", __FUNCTION__, __LINE__, i, buf_index[i]);
		if (buf_index[i]) 
			 kfree((int *)buf_index[i]);
	}
	//printk("[%s][%d] end release i=[%d]\n", __FUNCTION__, __LINE__, i);
#endif

#if 0
	for (i = 0 ; i < 10240 ; i ++)
	{
		buf_index[i] = kmalloc(0x1000, GFP_KERNEL);
		if (buf_index[i] == NULL) {
			printk("[%s][%d] kmalloc fail at %d, size = %d \n", __FUNCTION__, __LINE__, i, MAX_KMALLOC_SIZE);
			return 0;
		}
		printk("[%s][%d][%d] ptr=0x%x, pa=0x%x\n", __FUNCTION__, __LINE__, i, buf_index[i], __pa(buf_index[i]));
		if ( ( __pa(buf_index[i]) >= 0xe0000) && ( __pa(buf_index[i]) <= 0x200000) ) {
			printk("!!!!!!!!!!!!!!!!!!!!!!!!!! [%s][%d] still alloc ceva [%d]\n", __FUNCTION__, __LINE__, i);
			return 0;
		}
	}
#endif
	
#if 0
	if (buf_index)
		kfree(buf_index);		
#endif

	printk("[%s] finish\n", __FUNCTION__);
	return 0;
}
Example #28
0
static int
ngc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
	 struct mbuf *control, struct thread *td)
{
	struct ngpcb *const pcbp = sotongpcb(so);
	struct ngsock *const priv = NG_NODE_PRIVATE(pcbp->sockdata->node);
	struct sockaddr_ng *const sap = (struct sockaddr_ng *) addr;
	struct ng_mesg *msg;
	struct mbuf *m0;
	item_p item;
	char *path = NULL;
	int len, error = 0;
	struct ng_apply_info apply;

#ifdef	NOTYET
	if (control && (error = ng_internalize(control, td))) {
		if (pcbp->sockdata == NULL) {
			error = ENOTCONN;
			goto release;
		}
	}
#else	/* NOTYET */
	if (control) {
		error = EINVAL;
		goto release;
	}
#endif	/* NOTYET */

	/* Require destination as there may be >= 1 hooks on this node. */
	if (addr == NULL) {
		error = EDESTADDRREQ;
		goto release;
	}

	/*
	 * Allocate an expendable buffer for the path, chop off
	 * the sockaddr header, and make sure it's NUL terminated.
	 */
	len = sap->sg_len - 2;
	path = kmalloc(len + 1, M_NETGRAPH_PATH, M_WAITOK);
	bcopy(sap->sg_data, path, len);
	path[len] = '\0';

	/*
	 * Move the actual message out of mbufs into a linear buffer.
	 * Start by adding up the size of the data. (could use mh_len?)
	 */
	for (len = 0, m0 = m; m0 != NULL; m0 = m0->m_next)
		len += m0->m_len;

	/*
	 * Move the data into a linear buffer as well.
	 * Messages are not delivered in mbufs.
	 */
	msg = kmalloc(len + 1, M_NETGRAPH_MSG, M_WAITOK);
	m_copydata(m, 0, len, (char *)msg);

	if (msg->header.version != NG_VERSION) {
		kfree(msg, M_NETGRAPH_MSG);
		error = EINVAL;
		goto release;
	}

	/*
	 * Hack alert!
	 * We look into the message and if it mkpeers a node of unknown type, we
	 * try to load it. We need to do this now, in syscall thread, because if
	 * message gets queued and applied later we will get panic.
	 */
	if (msg->header.typecookie == NGM_GENERIC_COOKIE &&
	    msg->header.cmd == NGM_MKPEER) {
		struct ngm_mkpeer *const mkp = (struct ngm_mkpeer *) msg->data;
		struct ng_type *type;

		if ((type = ng_findtype(mkp->type)) == NULL) {
			char filename[NG_TYPESIZ + 3];
			int fileid;

			/* Not found, try to load it as a loadable module. */
			snprintf(filename, sizeof(filename), "ng_%s",
			    mkp->type);
			error = kern_kldload(curthread, filename, &fileid);
			if (error != 0) {
				kfree(msg, M_NETGRAPH_MSG);
				goto release;
			}

			/* See if type has been loaded successfully. */
			if ((type = ng_findtype(mkp->type)) == NULL) {
				kfree(msg, M_NETGRAPH_MSG);
				(void)kern_kldunload(curthread, fileid,
				    LINKER_UNLOAD_NORMAL);
				error =  ENXIO;
				goto release;
			}
		}
	}

	item = ng_package_msg(msg, NG_WAITOK);
	if ((error = ng_address_path((pcbp->sockdata->node), item, path, 0))
	    != 0) {
#ifdef TRACE_MESSAGES
		printf("ng_address_path: errx=%d\n", error);
#endif
		goto release;
	}

#ifdef TRACE_MESSAGES
	printf("[%x]:<---------[socket]: c=<%d>cmd=%x(%s) f=%x #%d (%s)\n",
		item->el_dest->nd_ID,
		msg->header.typecookie,
		msg->header.cmd,
		msg->header.cmdstr,
		msg->header.flags,
		msg->header.token,
		item->el_dest->nd_type->name);
#endif
	SAVE_LINE(item);
	/*
	 * We do not want to return from syscall until the item
	 * is processed by destination node. We register callback
	 * on the item, which will update priv->error when item
	 * was applied.
	 * If ng_snd_item() has queued item, we sleep until
	 * callback wakes us up.
	 */
	bzero(&apply, sizeof(apply));
	apply.apply = ng_socket_item_applied;
	apply.context = priv;
	item->apply = &apply;
	priv->error = -1;

	error = ng_snd_item(item, 0);

	mtx_lock(&priv->mtx);
	if (priv->error == -1)
		msleep(priv, &priv->mtx, 0, "ngsock", 0);
	mtx_unlock(&priv->mtx);
	KASSERT(priv->error != -1,
	    ("ng_socket: priv->error wasn't updated"));
	error = priv->error;

release:
	if (path != NULL)
		kfree(path, M_NETGRAPH_PATH);
	if (control != NULL)
		m_freem(control);
	if (m != NULL)
		m_freem(m);
	return (error);
}
Example #29
0
/* remember to add cleanup code (above) if you add anything here */
static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
{
	int i;

	/* QTDs for control/bulk/intr transfers */
	ehci->qtd_pool = dma_pool_create ("ehci_qtd", 
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_qtd),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->qtd_pool) {
		goto fail;
	}

	/* QHs for control/bulk/intr transfers */
	ehci->qh_pool = dma_pool_create ("ehci_qh", 
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_qh),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->qh_pool) {
		goto fail;
	}
	ehci->async = ehci_qh_alloc (ehci, flags);
	if (!ehci->async) {
		goto fail;
	}

	/* ITD for high speed ISO transfers */
	ehci->itd_pool = dma_pool_create ("ehci_itd", 
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_itd),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->itd_pool) {
		goto fail;
	}

	/* SITD for full/low speed split ISO transfers */
	ehci->sitd_pool = dma_pool_create ("ehci_sitd", 
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_sitd),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->sitd_pool) {
		goto fail;
	}

	/* Hardware periodic table */
	ehci->periodic = (__le32 *)
		dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller,
			ehci->periodic_size * sizeof(__le32),
			&ehci->periodic_dma, 0);
	if (ehci->periodic == NULL) {
		goto fail;
	}
	for (i = 0; i < ehci->periodic_size; i++)
		ehci->periodic [i] = EHCI_LIST_END;

	/* software shadow of hardware table */
	ehci->pshadow = kmalloc (ehci->periodic_size * sizeof (void *), flags);
	if (ehci->pshadow == NULL) {
		goto fail;
	}
	memset (ehci->pshadow, 0, ehci->periodic_size * sizeof (void *));

	return 0;

fail:
	ehci_dbg (ehci, "couldn't init memory\n");
	ehci_mem_cleanup (ehci);
	return -ENOMEM;
}
Example #30
0
static int tcc_msensor_open(struct inode *inode, struct file *filp)
{
    int ret;
    unsigned char old_ctrl;
//    int num = MINOR(inode->i_rdev);

    msensor_dbg("tcc_msensor_open\n");
//    msensor_dbg(KERN_INFO "%s -> minor : %d, (cnt:%d) \n", __FUNCTION__, num, msensor_used_count+1);
    if (msensor_used_count == 0) {

        if(machine_is_m801_88()) { // M801_88 board
            gpio_request(TCC_GPF(8), NULL);
            tcc_gpio_config(TCC_GPF(8), GPIO_FN(0));
            gpio_direction_input(TCC_GPF(8));
        }

#ifdef CONFIG_I2C
        // Initialize I2C driver for AK8975C
        ret = i2c_add_driver(&msensor_i2c_driver);
        if(ret < 0)
        {
            msensor_dbg("%s() [Error] failed i2c_add_driver() = %d\n", __func__, ret);
            return ret;
        }
        ret = ak8975c_i2c_register();
        if(ret < 0)
        {
            msensor_dbg("%s() [Error] Failed register i2c client driver for ak8975c, return is %d\n", __func__, ret);
            return ret;
        }
#endif

        old_ctrl = MSENSOR_READ_DAT(0x00);
        if (old_ctrl == 0xFF)
        {
            msensor_dbg("%s: No such device or address\n", __func__);
#ifdef CONFIG_I2C
            i2c_unregister_device(msensor_i2c_client);
            i2c_del_driver(&msensor_i2c_driver);
            msensor_i2c_client = NULL;
#endif
            return -ENXIO;
        }
        msensor_dbg("%s: identification : %x\n", __func__, old_ctrl);

        old_ctrl = MSENSOR_READ_DAT(0x01);
        msensor_dbg("%s: ASIC revition ID : %x\n", __func__, old_ctrl);

        msensor_timer= kmalloc( sizeof( struct timer_list ), GFP_KERNEL );
        if (msensor_timer == NULL)
        {
            msensor_dbg("%s: mem alloc fail\n", __func__);
#ifdef CONFIG_I2C
            i2c_unregister_device(msensor_i2c_client);
            i2c_del_driver(&msensor_i2c_driver);
            msensor_i2c_client = NULL;
#endif
            return -ENOMEM;
        }
        memset(msensor_timer, 0, sizeof(struct timer_list));
        msensor_dbg("%s: msensor_duration = %d\n", __func__, msensor_duration);
        msensor_timer_registertimer( msensor_timer, msensor_duration );
    }
    msensor_used_count++;
    return 0;
}