static int diag_create_log_mask_table(void) { struct diag_log_mask_t *mask = NULL; uint8_t i; int err = 0; mutex_lock(&log_mask.lock); mask = (struct diag_log_mask_t *)(log_mask.ptr); for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { mask->equip_id = i; mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]); mask->num_items_tools = mask->num_items; if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID) mask->range = LOG_ITEMS_TO_SIZE(mask->num_items); else mask->range = MAX_ITEMS_PER_EQUIP_ID; mask->range_tools = mask->range; mask->ptr = kzalloc(mask->range, GFP_KERNEL); if (!mask->ptr) { err = -ENOMEM; break; } kmemleak_not_leak(mask->ptr); } mutex_unlock(&log_mask.lock); return err; }
static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len, int update_buf_len) { if (!mask_info || mask_len < 0 || update_buf_len < 0) return -EINVAL; mask_info->status = DIAG_CTRL_MASK_INVALID; mask_info->mask_len = mask_len; mask_info->update_buf_len = update_buf_len; if (mask_len > 0) { mask_info->ptr = kzalloc(mask_len, GFP_KERNEL); if (!mask_info->ptr) return -ENOMEM; kmemleak_not_leak(mask_info->ptr); } if (update_buf_len > 0) { mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL); if (!mask_info->update_buf) { kfree(mask_info->ptr); return -ENOMEM; } kmemleak_not_leak(mask_info->update_buf); } mutex_init(&mask_info->lock); return 0; }
void diagfwd_cntl_init(void) { driver->polling_reg_flag = 0; driver->diag_cntl_wq = create_singlethread_workqueue("diag_cntl_wq"); if (driver->buf_in_cntl == NULL) { driver->buf_in_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_cntl == NULL) goto err; kmemleak_not_leak(driver->buf_in_cntl); } if (driver->buf_in_qdsp_cntl == NULL) { driver->buf_in_qdsp_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_qdsp_cntl == NULL) goto err; kmemleak_not_leak(driver->buf_in_qdsp_cntl); } if (driver->buf_in_wcnss_cntl == NULL) { driver->buf_in_wcnss_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_wcnss_cntl == NULL) goto err; kmemleak_not_leak(driver->buf_in_wcnss_cntl); } platform_driver_register(&msm_smd_ch1_cntl_driver); platform_driver_register(&diag_smd_lite_cntl_driver); return; err: pr_err("diag: Could not initialize diag buffers"); kfree(driver->buf_in_cntl); kfree(driver->buf_in_qdsp_cntl); kfree(driver->buf_in_wcnss_cntl); if (driver->diag_cntl_wq) destroy_workqueue(driver->diag_cntl_wq); }
void diagmem_init(struct diagchar_dev *driver, int index) { struct diag_mempool_t *mempool = NULL; if (!driver) return; if (index < 0 || index >= NUM_MEMORY_POOLS) { pr_err("diag: In %s, Invalid index %d\n", __func__, index); return; } mempool = &diag_mempools[index]; if (mempool->pool) { pr_debug("diag: mempool %s is already initialized\n", mempool->name); return; } if (mempool->itemsize <= 0 || mempool->poolsize <= 0) { pr_err("diag: Unable to initialize %s mempool, itemsize: %d poolsize: %d\n", mempool->name, mempool->itemsize, mempool->poolsize); return; } mempool->pool = mempool_create_kmalloc_pool(mempool->poolsize, mempool->itemsize); if (!mempool->pool) pr_err("diag: cannot allocate %s mempool\n", mempool->name); else kmemleak_not_leak(mempool->pool); spin_lock_init(&mempool->lock); }
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) { struct mem_section *section; struct page_cgroup *base; unsigned long table_size; section = __pfn_to_section(pfn); if (section->page_cgroup) return 0; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; base = alloc_page_cgroup(table_size, nid); kmemleak_not_leak(base); if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } pfn &= PAGE_SECTION_MASK; section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, struct device_node *of_node) { int size; if (!irq_count) return -EINVAL; size = BITS_TO_LONGS(irq_count) * sizeof(long); pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size); bmp->bitmap_from_slab = slab_is_available(); if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { bmp->bitmap = memblock_virt_alloc(size, 0); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } if (!bmp->bitmap) { pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n"); return -ENOMEM; } /* We zalloc'ed the bitmap, so all irqs are free by default */ spin_lock_init(&bmp->lock); bmp->of_node = of_node_get(of_node); bmp->irq_count = irq_count; return 0; }
static int __meminit init_section_page_ext(unsigned long pfn, int nid) { struct mem_section *section; struct page_ext *base; unsigned long table_size; section = __pfn_to_section(pfn); if (section->page_ext) return 0; table_size = get_entry_size() * PAGES_PER_SECTION; base = alloc_page_ext(table_size, nid); /* * The value stored in section->page_ext is (base - pfn) * and it does not point to the memory block allocated above, * causing kmemleak false positives. */ kmemleak_not_leak(base); if (!base) { pr_err("page ext allocation failure\n"); return -ENOMEM; } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_ext = (void *)base - get_entry_size() * pfn; total_usage += table_size; return 0; }
int diag_masks_init(void) { int err = 0; err = diag_msg_mask_init(); if (err) goto fail; err = diag_build_time_mask_init(); if (err) goto fail; err = diag_log_mask_init(); if (err) goto fail; err = diag_event_mask_init(); if (err) goto fail; if (driver->buf_feature_mask_update == NULL) { driver->buf_feature_mask_update = kzalloc(sizeof( struct diag_ctrl_feature_mask) + FEATURE_MASK_LEN, GFP_KERNEL); if (driver->buf_feature_mask_update == NULL) goto fail; kmemleak_not_leak(driver->buf_feature_mask_update); } return 0; fail: pr_err("diag: Could not initialize diag mask buffers\n"); diag_masks_exit(); return -ENOMEM; }
static __init int sysctl_core_init(void) { static struct ctl_table empty[1]; kmemleak_not_leak(register_sysctl_paths(net_core_path, empty)); register_net_sysctl_rotable(net_core_path, net_core_table); return register_pernet_subsys(&sysctl_core_ops); }
int diag_mux_init() { #ifdef CONFIG_LGE_DM_APP int j = 0; #endif logger = kzalloc(NUM_MUX_PROC * sizeof(struct diag_logger_t), GFP_KERNEL); if (!logger) return -ENOMEM; kmemleak_not_leak(logger); usb_logger.mode = DIAG_USB_MODE; usb_logger.log_ops = &usb_log_ops; md_logger.mode = DIAG_MEMORY_DEVICE_MODE; md_logger.log_ops = &md_log_ops; diag_md_init(); #ifdef CONFIG_LGE_DM_APP lge_dm_tty = kzalloc(sizeof(struct dm_tty), GFP_KERNEL); if (lge_dm_tty == NULL) { printk(KERN_DEBUG "diag: diag_mux_init failed to allocate" "lge_dm_tty\n"); } else { lge_dm_tty->num_tbl_entries = driver->poolsize; lge_dm_tty->tbl = kzalloc(lge_dm_tty->num_tbl_entries * sizeof(struct diag_buf_tbl_t), GFP_KERNEL); if (lge_dm_tty->tbl) { for (j = 0; j < lge_dm_tty->num_tbl_entries; j++) { lge_dm_tty->tbl[j].buf = NULL; lge_dm_tty->tbl[j].len = 0; lge_dm_tty->tbl[j].ctx = 0; spin_lock_init(&(lge_dm_tty->tbl[j].lock)); } } else { kfree(lge_dm_tty->tbl); lge_dm_tty->num_tbl_entries = 0; lge_dm_tty->ops = NULL; } } #endif /* * Set USB logging as the default logger. This is the mode * Diag should be in when it initializes. */ logger = &usb_logger; return 0; }
static int lookup_server(struct diag_socket_info *info) { int ret = 0; struct server_lookup_args *args = NULL; struct sockaddr_msm_ipc *srv_addr = NULL; if (!info) return -EINVAL; args = kzalloc((sizeof(struct server_lookup_args) + sizeof(struct msm_ipc_server_info)), GFP_KERNEL); if (!args) return -ENOMEM; kmemleak_not_leak(args); args->lookup_mask = 0xFFFFFFFF; args->port_name.service = info->svc_id; args->port_name.instance = info->ins_id; args->num_entries_in_array = 1; args->num_entries_found = 0; ret = kernel_sock_ioctl(info->hdl, IPC_ROUTER_IOCTL_LOOKUP_SERVER, (unsigned long)args); if (ret < 0) { pr_err("diag: In %s, cannot find service for %s\n", __func__, info->name); kfree(args); return -EFAULT; } srv_addr = &info->remote_addr; srv_addr->family = AF_MSM_IPC; srv_addr->address.addrtype = MSM_IPC_ADDR_ID; srv_addr->address.addr.port_addr.node_id = args->srv_info[0].node_id; srv_addr->address.addr.port_addr.port_id = args->srv_info[0].port_id; ret = args->num_entries_found; kfree(args); if (ret < 1) return -EIO; DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s found server node: %d port: %d", info->name, srv_addr->address.addr.port_addr.node_id, srv_addr->address.addr.port_addr.port_id); return 0; }
void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type) { void *buf = NULL; int i = 0; unsigned long flags; struct diag_mempool_t *mempool = NULL; if (!driver) return NULL; for (i = 0; i < NUM_MEMORY_POOLS; i++) { mempool = &diag_mempools[i]; if (pool_type != mempool->id) continue; if (!mempool->pool) { pr_err_ratelimited("diag: %s mempool is not initialized yet\n", mempool->name); break; } if (size == 0 || size > mempool->itemsize) { pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n", mempool->name, size); break; } spin_lock_irqsave(&mempool->lock, flags); if (mempool->count < mempool->poolsize) { atomic_add(1, (atomic_t *)&mempool->count); buf = mempool_alloc(mempool->pool, GFP_ATOMIC); kmemleak_not_leak(buf); } spin_unlock_irqrestore(&mempool->lock, flags); if (!buf) { pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n", mempool->name, size, mempool->itemsize, mempool->count, mempool->poolsize); } break; } return buf; }
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) { struct page_cgroup *base, *pc; struct mem_section *section; unsigned long table_size; unsigned long nr; int index; nr = pfn_to_section_nr(pfn); section = __nr_to_section(nr); if (section->page_cgroup) return 0; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; base = alloc_page_cgroup(table_size, nid); /* * The value stored in section->page_cgroup is (base - pfn) * and it does not point to the memory block allocated above, * causing kmemleak false positives. */ kmemleak_not_leak(base); if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; init_page_cgroup(pc, nr); } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask, struct diag_ssid_range_t *range) { if (!msg_mask || !range) return -EIO; if (range->ssid_last < range->ssid_first) return -EINVAL; msg_mask->ssid_first = range->ssid_first; msg_mask->ssid_last = range->ssid_last; msg_mask->ssid_last_tools = range->ssid_last; msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1; if (msg_mask->range < MAX_SSID_PER_RANGE) msg_mask->range = MAX_SSID_PER_RANGE; msg_mask->range_tools = msg_mask->range; if (msg_mask->range > 0) { msg_mask->ptr = kzalloc(msg_mask->range * sizeof(uint32_t), GFP_KERNEL); if (!msg_mask->ptr) return -ENOMEM; kmemleak_not_leak(msg_mask->ptr); } return 0; }
void diag_masks_init(void) { driver->event_status = DIAG_CTRL_MASK_INVALID; driver->msg_status = DIAG_CTRL_MASK_INVALID; driver->log_status = DIAG_CTRL_MASK_INVALID; if (driver->event_mask == NULL) { driver->event_mask = kzalloc(sizeof( struct diag_ctrl_event_mask), GFP_KERNEL); if (driver->event_mask == NULL) goto err; kmemleak_not_leak(driver->event_mask); } if (driver->msg_mask == NULL) { driver->msg_mask = kzalloc(sizeof( struct diag_ctrl_msg_mask), GFP_KERNEL); if (driver->msg_mask == NULL) goto err; kmemleak_not_leak(driver->msg_mask); } if (driver->log_mask == NULL) { driver->log_mask = kzalloc(sizeof( struct diag_ctrl_log_mask), GFP_KERNEL); if (driver->log_mask == NULL) goto err; kmemleak_not_leak(driver->log_mask); } if (driver->buf_msg_mask_update == NULL) { driver->buf_msg_mask_update = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->buf_msg_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_msg_mask_update); } if (driver->buf_log_mask_update == NULL) { driver->buf_log_mask_update = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->buf_log_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_log_mask_update); } if (driver->buf_event_mask_update == NULL) { driver->buf_event_mask_update = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->buf_event_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_event_mask_update); } if (driver->msg_masks == NULL) { driver->msg_masks = kzalloc(MSG_MASK_SIZE, GFP_KERNEL); if (driver->msg_masks == NULL) goto err; kmemleak_not_leak(driver->msg_masks); } if (driver->buf_feature_mask_update == NULL) { driver->buf_feature_mask_update = kzalloc(sizeof( struct diag_ctrl_feature_mask) + FEATURE_MASK_LEN_BYTES, GFP_KERNEL); if (driver->buf_feature_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_feature_mask_update); } if (driver->feature_mask == NULL) { driver->feature_mask = kzalloc(sizeof( struct diag_ctrl_feature_mask), GFP_KERNEL); if (driver->feature_mask == NULL) goto err; kmemleak_not_leak(driver->feature_mask); } diag_create_msg_mask_table(); diag_event_num_bytes = 0; if (driver->log_masks == NULL) { driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL); if (driver->log_masks == NULL) goto err; kmemleak_not_leak(driver->log_masks); } diag_log_mask_init(); if (driver->event_masks == NULL) { driver->event_masks = kzalloc(EVENT_MASK_SIZE, GFP_KERNEL); if (driver->event_masks == NULL) goto err; kmemleak_not_leak(driver->event_masks); } return; err: pr_err("diag: Could not initialize diag mask buffers"); kfree(driver->event_mask); kfree(driver->log_mask); kfree(driver->msg_mask); kfree(driver->msg_masks); kfree(driver->log_masks); kfree(driver->event_masks); kfree(driver->feature_mask); kfree(driver->buf_feature_mask_update); }
static void __init common_log_register_log_buf(void) { char **log_bufp; uint32_t *log_buf_lenp; uint32_t *fist_idxp; struct msm_client_dump dump_log_buf, dump_first_idx; struct msm_dump_entry entry_log_buf, entry_first_idx; struct msm_dump_data *dump_data; log_bufp = (char **)kallsyms_lookup_name("log_buf"); log_buf_lenp = (uint32_t *)kallsyms_lookup_name("log_buf_len"); if (!log_bufp || !log_buf_lenp) { pr_err("Unable to find log_buf by kallsyms!\n"); return; } fist_idxp = (uint32_t *)kallsyms_lookup_name("log_first_idx"); if (MSM_DUMP_MAJOR(msm_dump_table_version()) == 1) { dump_log_buf.id = MSM_LOG_BUF; dump_log_buf.start_addr = virt_to_phys(*log_bufp); dump_log_buf.end_addr = virt_to_phys(*log_bufp + *log_buf_lenp); if (msm_dump_tbl_register(&dump_log_buf)) pr_err("Unable to register %d.\n", dump_log_buf.id); dump_first_idx.id = MSM_LOG_BUF_FIRST_IDX; if (fist_idxp) { dump_first_idx.start_addr = virt_to_phys(fist_idxp); if (msm_dump_tbl_register(&dump_first_idx)) pr_err("Unable to register %d.\n", dump_first_idx.id); } } else { dump_data = kzalloc(sizeof(struct msm_dump_data), GFP_KERNEL); if (!dump_data) { pr_err("Unable to alloc data space.\n"); return; } dump_data->len = *log_buf_lenp; dump_data->addr = virt_to_phys(*log_bufp); entry_log_buf.id = MSM_DUMP_DATA_LOG_BUF; entry_log_buf.addr = virt_to_phys(dump_data); if (msm_dump_data_register(MSM_DUMP_TABLE_APPS, &entry_log_buf)) { kfree(dump_data); pr_err("Unable to register %d.\n", entry_log_buf.id); } else kmemleak_not_leak(dump_data); if (fist_idxp) { dump_data = kzalloc(sizeof(struct msm_dump_data), GFP_KERNEL); if (!dump_data) { pr_err("Unable to alloc data space.\n"); return; } dump_data->addr = virt_to_phys(fist_idxp); entry_first_idx.id = MSM_DUMP_DATA_LOG_BUF_FIRST_IDX; entry_first_idx.addr = virt_to_phys(dump_data); if (msm_dump_data_register(MSM_DUMP_TABLE_APPS, &entry_first_idx)) { kfree(dump_data); pr_err("Unable to register %d.\n", entry_first_idx.id); } else kmemleak_not_leak(dump_data); } } }
void diag_masks_init(void) { if (driver->event_mask == NULL) { driver->event_mask = kzalloc(sizeof( struct diag_ctrl_event_mask), GFP_KERNEL); if (driver->event_mask == NULL) goto err; kmemleak_not_leak(driver->event_mask); } if (driver->msg_mask == NULL) { driver->msg_mask = kzalloc(sizeof( struct diag_ctrl_msg_mask), GFP_KERNEL); if (driver->msg_mask == NULL) goto err; kmemleak_not_leak(driver->msg_mask); } if (driver->log_mask == NULL) { driver->log_mask = kzalloc(sizeof( struct diag_ctrl_log_mask), GFP_KERNEL); if (driver->log_mask == NULL) goto err; kmemleak_not_leak(driver->log_mask); } if (driver->buf_msg_mask_update == NULL) { driver->buf_msg_mask_update = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->buf_msg_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_msg_mask_update); } if (driver->buf_log_mask_update == NULL) { driver->buf_log_mask_update = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->buf_log_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_log_mask_update); } if (driver->buf_event_mask_update == NULL) { driver->buf_event_mask_update = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->buf_event_mask_update == NULL) goto err; kmemleak_not_leak(driver->buf_event_mask_update); } if (driver->msg_masks == NULL) { driver->msg_masks = kzalloc(MSG_MASK_SIZE, GFP_KERNEL); if (driver->msg_masks == NULL) goto err; kmemleak_not_leak(driver->msg_masks); } diag_create_msg_mask_table(); diag_event_num_bytes = 0; if (driver->log_masks == NULL) { driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL); if (driver->log_masks == NULL) goto err; kmemleak_not_leak(driver->log_masks); } driver->log_masks_length = (sizeof(struct mask_info))*MAX_EQUIP_ID; if (driver->event_masks == NULL) { driver->event_masks = kzalloc(EVENT_MASK_SIZE, GFP_KERNEL); if (driver->event_masks == NULL) goto err; kmemleak_not_leak(driver->event_masks); } #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(driver->diag_modem_mask_update_work), diag_modem_mask_update_fn); INIT_WORK(&(driver->diag_lpass_mask_update_work), diag_lpass_mask_update_fn); INIT_WORK(&(driver->diag_wcnss_mask_update_work), diag_wcnss_mask_update_fn); #endif return; err: pr_err("diag: Could not initialize diag mask buffers"); kfree(driver->event_mask); kfree(driver->log_mask); kfree(driver->msg_mask); kfree(driver->msg_masks); kfree(driver->log_masks); kfree(driver->event_masks); }
void diagfwd_init(void) { int success; int i; wrap_enabled = 0; wrap_count = 0; diag_debug_buf_idx = 0; driver->read_len_legacy = 0; driver->use_device_tree = has_device_tree(); mutex_init(&driver->diag_cntl_mutex); success = diag_smd_constructor(&driver->smd_data[MODEM_DATA], MODEM_DATA, SMD_DATA_TYPE); if (!success) goto err; success = diag_smd_constructor(&driver->smd_data[LPASS_DATA], LPASS_DATA, SMD_DATA_TYPE); if (!success) goto err; success = diag_smd_constructor(&driver->smd_data[WCNSS_DATA], WCNSS_DATA, SMD_DATA_TYPE); if (!success) goto err; if (driver->usb_buf_out == NULL && (driver->usb_buf_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->usb_buf_out); if (driver->hdlc_buf == NULL && (driver->hdlc_buf = kzalloc(HDLC_MAX, GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->hdlc_buf); if (driver->user_space_data == NULL) driver->user_space_data = kzalloc(USER_SPACE_DATA, GFP_KERNEL); if (driver->user_space_data == NULL) goto err; kmemleak_not_leak(driver->user_space_data); if (driver->client_map == NULL && (driver->client_map = kzalloc ((driver->num_clients) * sizeof(struct diag_client_map), GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->client_map); if (driver->buf_tbl == NULL) driver->buf_tbl = kzalloc(buf_tbl_size * sizeof(struct diag_write_device), GFP_KERNEL); if (driver->buf_tbl == NULL) goto err; kmemleak_not_leak(driver->buf_tbl); if (driver->data_ready == NULL && (driver->data_ready = kzalloc(driver->num_clients * sizeof(int) , GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->data_ready); if (driver->table == NULL && (driver->table = kzalloc(diag_max_reg* sizeof(struct diag_master_table), GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->table); if (driver->usb_read_ptr == NULL) { driver->usb_read_ptr = kzalloc( sizeof(struct diag_request), GFP_KERNEL); if (driver->usb_read_ptr == NULL) goto err; kmemleak_not_leak(driver->usb_read_ptr); } if (driver->pkt_buf == NULL && (driver->pkt_buf = kzalloc(PKT_SIZE, GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->pkt_buf); if (driver->apps_rsp_buf == NULL) { driver->apps_rsp_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->apps_rsp_buf == NULL) goto err; kmemleak_not_leak(driver->apps_rsp_buf); } driver->diag_wq = create_singlethread_workqueue("diag_wq"); #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn); INIT_WORK(&(driver->diag_read_work), diag_read_work_fn); driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver, diag_usb_legacy_notifier); if (IS_ERR(driver->legacy_ch)) { printk(KERN_ERR "Unable to open USB diag legacy channel\n"); goto err; } #endif platform_driver_register(&msm_smd_ch1_driver); platform_driver_register(&diag_smd_lite_driver); return; err: pr_err("diag: Could not initialize diag buffers"); for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) diag_smd_destructor(&driver->smd_data[i]); kfree(driver->buf_msg_mask_update); kfree(driver->buf_log_mask_update); kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->client_map); kfree(driver->buf_tbl); kfree(driver->data_ready); kfree(driver->table); kfree(driver->pkt_buf); kfree(driver->usb_read_ptr); kfree(driver->apps_rsp_buf); kfree(driver->user_space_data); if (driver->diag_wq) destroy_workqueue(driver->diag_wq); }
static int __init init_memory_dump(void) { struct msm_dump_table *table; struct msm_dump_entry entry; struct device_node *np; void __iomem *imem_base; int ret; np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-mem_dump_table"); if (!np) { pr_err("mem dump base table DT node does not exist\n"); return -ENODEV; } imem_base = of_iomap(np, 0); if (!imem_base) { pr_err("mem dump base table imem offset mapping failed\n"); return -ENOMEM; } memdump.table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL); if (!memdump.table) { pr_err("mem dump base table allocation failed\n"); ret = -ENOMEM; goto err0; } memdump.table->version = MSM_DUMP_TABLE_VERSION; memdump.table_phys = virt_to_phys(memdump.table); writel_relaxed(memdump.table_phys, imem_base); /* Ensure write to imem_base is complete before unmapping */ mb(); pr_info("MSM Memory Dump base table set up\n"); iounmap(imem_base); table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL); if (!table) { pr_err("mem dump apps data table allocation failed\n"); ret = -ENOMEM; goto err1; } kmemleak_not_leak(table); table->version = MSM_DUMP_TABLE_VERSION; entry.id = MSM_DUMP_TABLE_APPS; entry.addr = virt_to_phys(table); ret = msm_dump_table_register(&entry); if (ret) { pr_info("mem dump apps data table register failed\n"); goto err2; } pr_info("MSM Memory Dump apps data table set up\n"); return 0; err2: kfree(table); err1: kfree(memdump.table); return ret; err0: iounmap(imem_base); return ret; }
int diag_smd_constructor(struct diag_smd_info *smd_info, int peripheral, int type) { smd_info->peripheral = peripheral; smd_info->type = type; switch (peripheral) { case MODEM_DATA: smd_info->peripheral_mask = DIAG_CON_MPSS; break; case LPASS_DATA: smd_info->peripheral_mask = DIAG_CON_LPASS; break; case WCNSS_DATA: smd_info->peripheral_mask = DIAG_CON_WCNSS; break; default: pr_err("diag: In %s, unknown peripheral, peripheral: %d\n", __func__, peripheral); goto err; } smd_info->ch = 0; smd_info->ch_save = 0; if (smd_info->buf_in_1 == NULL) { smd_info->buf_in_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_1 == NULL) goto err; kmemleak_not_leak(smd_info->buf_in_1); } if (smd_info->write_ptr_1 == NULL) { smd_info->write_ptr_1 = kzalloc(sizeof(struct diag_request), GFP_KERNEL); if (smd_info->write_ptr_1 == NULL) goto err; kmemleak_not_leak(smd_info->write_ptr_1); } /* The smd data type needs two buffers */ if (smd_info->type == SMD_DATA_TYPE) { if (smd_info->buf_in_2 == NULL) { smd_info->buf_in_2 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_2 == NULL) goto err; kmemleak_not_leak(smd_info->buf_in_2); } if (smd_info->write_ptr_2 == NULL) { smd_info->write_ptr_2 = kzalloc(sizeof(struct diag_request), GFP_KERNEL); if (smd_info->write_ptr_2 == NULL) goto err; kmemleak_not_leak(smd_info->write_ptr_2); } } INIT_WORK(&(smd_info->diag_read_smd_work), diag_read_smd_work_fn); /* * The update function assigned to the diag_notify_update_smd_work * work_struct is meant to be used for updating that is not to * be done in the context of the smd notify function. The * notify_context variable can be used for passing additional * information to the update function. */ smd_info->notify_context = 0; if (type == SMD_DATA_TYPE) INIT_WORK(&(smd_info->diag_notify_update_smd_work), diag_clean_reg_fn); else if (type == SMD_CNTL_TYPE) INIT_WORK(&(smd_info->diag_notify_update_smd_work), diag_mask_update_fn); else if (type == SMD_DCI_TYPE) INIT_WORK(&(smd_info->diag_notify_update_smd_work), diag_update_smd_dci_work_fn); else { pr_err("diag: In %s, unknown type, type: %d\n", __func__, type); goto err; } /* * Set function ptr for function to call to process the data that * was just read from the smd channel */ if (type == SMD_DATA_TYPE) smd_info->process_smd_read_data = diag_process_smd_read_data; else if (type == SMD_CNTL_TYPE) smd_info->process_smd_read_data = diag_process_smd_cntl_read_data; else if (type == SMD_DCI_TYPE) smd_info->process_smd_read_data = diag_process_smd_dci_read_data; else { pr_err("diag: In %s, unknown type, type: %d\n", __func__, type); goto err; } return 1; err: kfree(smd_info->buf_in_1); kfree(smd_info->buf_in_2); kfree(smd_info->write_ptr_1); kfree(smd_info->write_ptr_2); return 0; }