int kasan_module_alloc(void *addr, size_t size) { void *ret; size_t shadow_size; unsigned long shadow_start; shadow_start = (unsigned long)kasan_mem_to_shadow(addr); shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, PAGE_SIZE); if (WARN_ON(!PAGE_ALIGNED(shadow_start))) return -EINVAL; ret = __vmalloc_node_range(shadow_size, 1, shadow_start, shadow_start + shadow_size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, __builtin_return_address(0)); if (ret) { find_vm_area(addr)->flags |= VM_KASAN; kmemleak_ignore(ret); return 0; } return -ENOMEM; }
static int __meminit kasan_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct memory_notify *mem_data = data; unsigned long nr_shadow_pages, start_kaddr, shadow_start; unsigned long shadow_end, shadow_size; nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); shadow_size = nr_shadow_pages << PAGE_SHIFT; shadow_end = shadow_start + shadow_size; if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) return NOTIFY_BAD; switch (action) { case MEM_GOING_ONLINE: { void *ret; ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, shadow_end, GFP_KERNEL, PAGE_KERNEL, VM_NO_GUARD, pfn_to_nid(mem_data->start_pfn), __builtin_return_address(0)); if (!ret) return NOTIFY_BAD; kmemleak_ignore(ret); return NOTIFY_OK; } case MEM_OFFLINE: vfree((void *)shadow_start); } return NOTIFY_OK; }
static int config_xx(struct usf_xx_type *usf_xx, struct us_xx_info_type *config) { int rc = 0; uint16_t data_map_size = 0; uint16_t min_map_size = 0; if ((usf_xx == NULL) || (config == NULL)) return -EINVAL; if ((config->buf_size == 0) || (config->buf_size > USF_MAX_BUF_SIZE) || (config->buf_num == 0) || (config->buf_num > USF_MAX_BUF_NUM)) { pr_err("%s: wrong params: buf_size=%d; buf_num=%d\n", __func__, config->buf_size, config->buf_num); return -EINVAL; } data_map_size = sizeof(usf_xx->encdec_cfg.cfg_common.data_map); min_map_size = min(data_map_size, config->port_cnt); if (config->client_name != NULL) { if (strncpy_from_user(usf_xx->client_name, config->client_name, sizeof(usf_xx->client_name) - 1) < 0) { pr_err("%s: get client name failed\n", __func__); return -EINVAL; } } pr_debug("%s: name=%s; buf_size:%d; dev_id:0x%x; sample_rate:%d\n", __func__, usf_xx->client_name, config->buf_size, config->dev_id, config->sample_rate); pr_debug("%s: buf_num:%d; format:%d; port_cnt:%d; data_size=%d\n", __func__, config->buf_num, config->stream_format, config->port_cnt, config->params_data_size); pr_debug("%s: id[0]=%d, id[1]=%d, id[2]=%d, id[3]=%d, id[4]=%d,\n", __func__, config->port_id[0], config->port_id[1], config->port_id[2], config->port_id[3], config->port_id[4]); pr_debug("id[5]=%d, id[6]=%d, id[7]=%d\n", config->port_id[5], config->port_id[6], config->port_id[7]); /* q6usm allocation & configuration */ usf_xx->buffer_size = config->buf_size; usf_xx->buffer_count = config->buf_num; usf_xx->encdec_cfg.cfg_common.bits_per_sample = config->bits_per_sample; usf_xx->encdec_cfg.cfg_common.sample_rate = config->sample_rate; /* AFE port e.g. AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX */ usf_xx->encdec_cfg.cfg_common.dev_id = config->dev_id; usf_xx->encdec_cfg.cfg_common.ch_cfg = config->port_cnt; memcpy((void *)&usf_xx->encdec_cfg.cfg_common.data_map, (void *)config->port_id, min_map_size); if (rc) { pr_err("%s: ports offsets copy failure\n", __func__); return -EINVAL; } usf_xx->encdec_cfg.format_id = config->stream_format; usf_xx->encdec_cfg.params_size = config->params_data_size; usf_xx->user_upd_info_na = 1; /* it's used in US_GET_TX_UPDATE */ if (config->params_data_size > 0) { /* transparent data copy */ usf_xx->encdec_cfg.params = kzalloc(config->params_data_size, GFP_KERNEL); /* False memory leak here - pointer in packed struct * * is undetected by kmemleak tool */ kmemleak_ignore(usf_xx->encdec_cfg.params); if (usf_xx->encdec_cfg.params == NULL) { pr_err("%s: params memory alloc[%d] failure\n", __func__, config->params_data_size); return -ENOMEM; } rc = copy_from_user(usf_xx->encdec_cfg.params, config->params_data, config->params_data_size); if (rc) { pr_err("%s: transparent data copy failure\n", __func__); kfree(usf_xx->encdec_cfg.params); usf_xx->encdec_cfg.params = NULL; return -EFAULT; } pr_debug("%s: params_size[%d]; params[%d,%d,%d,%d, %d]\n", __func__, config->params_data_size, usf_xx->encdec_cfg.params[0], usf_xx->encdec_cfg.params[1], usf_xx->encdec_cfg.params[2], usf_xx->encdec_cfg.params[3], usf_xx->encdec_cfg.params[4] ); } usf_xx->usc = q6usm_us_client_alloc(usf_xx->cb, (void *)usf_xx); if (!usf_xx->usc) { pr_err("%s: Could not allocate q6usm client\n", __func__); rc = -EFAULT; } return rc; }
/** * platform_device_register_full - add a platform-level device with * resources and platform-specific data * * @pdevinfo: data used to create device * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo) { int ret = -ENOMEM; struct platform_device *pdev; pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { /* * This memory isn't freed when the device is put, * I don't have a nice idea for that though. Conceptually * dma_mask in struct device should not be a pointer. * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 */ pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); if (!pdev->dev.dma_mask) goto err; kmemleak_ignore(pdev->dev.dma_mask); *pdev->dev.dma_mask = pdevinfo->dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } ret = platform_device_add_resources(pdev, pdevinfo->res, pdevinfo->num_res); if (ret) goto err; ret = platform_device_add_data(pdev, pdevinfo->data, pdevinfo->size_data); if (ret) goto err; if (pdevinfo->properties) { ret = platform_device_add_properties(pdev, pdevinfo->properties); if (ret) goto err; } ret = platform_device_add(pdev); if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); platform_device_put(pdev); return ERR_PTR(ret); } return pdev; }