static gboolean port_dispatch(GSource *source, GSourceFunc callback, gpointer user_data) { node_t *f; uint_t nget = 0; uint_t total = 0; FK_W ("%s 0x%p fd %d\n", __func__, source, PGPFD(source)->fd); G_LOCK (fen_lock); do { nget = 1; if (port_getn(PGPFD(source)->fd, pevents, PE_ALLOC, &nget, &zero_wait) == 0) { int i; for (i = 0; i < nget; i++) { f = (node_t *)pevents[i].portev_user; if (pevents[i].portev_source == PORT_SOURCE_FILE) { NODE_CLE_STATE(f, NODE_STATE_ASSOCIATED); NODE_SET_STATE(f, NODE_STATE_HAS_EVENTS); if (HAS_NO_EXCEPTION_EVENTS(pevents[i].portev_events)) { /* If the events do not show it's deleted, update * file timestamp to avoid missing events next time. */ if (node_lstat(f) != 0 /* || port_add(f) != 0 */) { /* Included deleted event. */ pevents[i].portev_events |= FILE_DELETE; } } /* Queue it and waiting for processing. */ g_queue_push_tail(g_eventq, node_event_new(pevents[i].portev_events, (gpointer)f)); } else { FK_W ("[kernel] unknown portev_source %d\n", pevents[i].portev_source); } } total += nget; } else { FK_W ("[kernel] port_getn %s\n", g_strerror (errno)); break; } } while (nget == PE_ALLOC); G_UNLOCK (fen_lock); if (total > 0 && callback) { FK_W ("[kernel] get total %ld events\n", total); return callback (user_data); } return TRUE; }
/* * port_add: * @f: * * Unsafe, need lock fen_lock. * port_add will associate a GSource to @f->source */ gint port_add(node_t *f) { GSource *source = f->source; FK_W ("%s [0x%p] %s\n", __func__, f, NODE_NAME(f)); g_assert(f); g_assert(NODE_HAS_FLAG(f, NODE_FLAG_STAT_UPDATED)); /* if (!NODE_HAS_FLAG(f, NODE_FLAG_STAT_DONE)) { */ /* if (NODE_STAT(f) != 0) { */ /* return errno; */ /* } */ /* } */ /* Try re-use f->pn. f->pn may be used by other request, e.g. f is deleted * for a long time. So if pn is full, we try to open a new one. */ if (!source) { start_over: /* Try the next visible source. */ if (pn_visible_list) { source = (GSource *)pn_visible_list->data; } else { if ((source = psource_new()) != NULL) { g_assert (g_list_find (pn_visible_list, source) == NULL); pn_visible_list = g_list_prepend (pn_visible_list, source); } } } if (port_associate(PGPFD(source)->fd, PORT_SOURCE_FILE, (uintptr_t)FILE_OBJECT(f), CONCERNED_EVENTS, (void *)f) == 0) { f->source = source; NODE_SET_STATE(f, NODE_STATE_ASSOCIATED); NODE_CLE_FLAG(f, NODE_FLAG_STAT_UPDATED); FK_W ("PORT_ASSOCIATE 0x%p OK\n", f); return 0; } else if (errno == EAGAIN) { /* Full, remove it. */ pn_visible_list = g_list_remove (pn_visible_list, source); /* Re-add to port */ goto start_over; } else if (errno == ENOENT) { /* File is not exist */ } else if (errno == ENOTSUP) { /* FS is not supported. Currently we think it no longer make sense to * monitor it, so clean the stat info and return 0 to ignore this * node. If there are requirement, we can consider to add polling * method. */ NODE_CLE_FLAG(f, NODE_FLAG_STAT_UPDATED); return 0; } else { FK_W ("PORT_ASSOCIATE 0x%p %s\n", f, g_strerror (errno)); } /* No matter if associated successfully, stat info is out-of-date, so clean it. */ NODE_CLE_FLAG(f, NODE_FLAG_STAT_UPDATED); return errno; }
/* * ======== node_allocate ======== * Purpose: * Allocate GPP resources to manage a node on the DSP. */ int node_allocate(struct proc_object *hprocessor, const struct dsp_uuid *node_uuid, const struct dsp_cbdata *pargs, const struct dsp_nodeattrin *attr_in, struct node_res_object **noderes, struct process_context *pr_ctxt) { struct node_mgr *hnode_mgr; struct dev_object *hdev_obj; struct node_object *pnode = NULL; enum node_type node_type = NODE_TASK; struct node_msgargs *pmsg_args; struct node_taskargs *ptask_args; u32 num_streams; struct bridge_drv_interface *intf_fxns; int status = 0; struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */ u32 proc_id; u32 pul_value; u32 dynext_base; u32 off_set = 0; u32 ul_stack_seg_addr, ul_stack_seg_val; u32 ul_gpp_mem_base; struct cfg_hostres *host_res; struct bridge_dev_context *pbridge_context; u32 mapped_addr = 0; u32 map_attrs = 0x0; struct dsp_processorstate proc_state; #ifdef DSP_DMM_DEBUG struct dmm_object *dmm_mgr; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; #endif void *node_res; DBC_REQUIRE(refs > 0); DBC_REQUIRE(hprocessor != NULL); DBC_REQUIRE(noderes != NULL); DBC_REQUIRE(node_uuid != NULL); *noderes = NULL; status = proc_get_processor_id(hprocessor, &proc_id); if (proc_id != DSP_UNIT) goto func_end; status = proc_get_dev_object(hprocessor, &hdev_obj); if (!status) { status = dev_get_node_manager(hdev_obj, &hnode_mgr); if (hnode_mgr == NULL) status = -EPERM; } if (status) goto func_end; status = dev_get_bridge_context(hdev_obj, &pbridge_context); if (!pbridge_context) { status = -EFAULT; goto func_end; } status = proc_get_state(hprocessor, &proc_state, sizeof(struct dsp_processorstate)); if (status) goto func_end; /* If processor is in error state then don't attempt to send the message */ if (proc_state.proc_state == PROC_ERROR) { status = -EPERM; goto func_end; } /* Assuming that 0 is not a valid function address */ if (hnode_mgr->ul_fxn_addrs[0] == 0) { /* No RMS on target - we currently can't handle this */ pr_err("%s: Failed, no RMS in base image\n", __func__); status = -EPERM; } else { /* Validate attr_in fields, if non-NULL */ if (attr_in) { /* Check if attr_in->prio is within range */ if (attr_in->prio < hnode_mgr->min_pri || attr_in->prio > hnode_mgr->max_pri) status = -EDOM; } } /* Allocate node object and fill in */ if (status) goto func_end; pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL); if (pnode == NULL) { status = -ENOMEM; goto func_end; } pnode->hnode_mgr = hnode_mgr; /* This critical section protects get_node_props */ mutex_lock(&hnode_mgr->node_mgr_lock); /* Get dsp_ndbprops from node database */ status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid, &(pnode->dcd_props)); if (status) goto func_cont; pnode->node_uuid = *node_uuid; pnode->hprocessor = hprocessor; pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype; pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout; pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio; /* Currently only C64 DSP builds support Node Dynamic * heaps */ /* Allocate memory for node heap */ pnode->create_args.asa.task_arg_obj.heap_size = 0; pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0; pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0; pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0; if (!attr_in) goto func_cont; /* Check if we have a user allocated node heap */ if (!(attr_in->pgpp_virt_addr)) goto func_cont; /* check for page aligned Heap size */ if (((attr_in->heap_size) & (PG_SIZE4K - 1))) { pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n", __func__, attr_in->heap_size); status = -EINVAL; } else { pnode->create_args.asa.task_arg_obj.heap_size = attr_in->heap_size; pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = (u32) attr_in->pgpp_virt_addr; } if (status) goto func_cont; status = proc_reserve_memory(hprocessor, pnode->create_args.asa.task_arg_obj. heap_size + PAGE_SIZE, (void **)&(pnode->create_args.asa. task_arg_obj.udsp_heap_res_addr), pr_ctxt); if (status) { pr_err("%s: Failed to reserve memory for heap: 0x%x\n", __func__, status); goto func_cont; } #ifdef DSP_DMM_DEBUG status = dmm_get_handle(p_proc_object, &dmm_mgr); if (!dmm_mgr) { status = DSP_EHANDLE; goto func_cont; } dmm_mem_map_dump(dmm_mgr); #endif map_attrs |= DSP_MAPLITTLEENDIAN; map_attrs |= DSP_MAPELEMSIZE32; map_attrs |= DSP_MAPVIRTUALADDR; status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, pnode->create_args.asa.task_arg_obj.heap_size, (void *)pnode->create_args.asa.task_arg_obj. udsp_heap_res_addr, (void **)&mapped_addr, map_attrs, pr_ctxt); if (status) pr_err("%s: Failed to map memory for Heap: 0x%x\n", __func__, status); else pnode->create_args.asa.task_arg_obj.udsp_heap_addr = (u32) mapped_addr; func_cont: mutex_unlock(&hnode_mgr->node_mgr_lock); if (attr_in != NULL) { /* Overrides of NBD properties */ pnode->utimeout = attr_in->utimeout; pnode->prio = attr_in->prio; } /* Create object to manage notifications */ if (!status) { pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (pnode->ntfy_obj) ntfy_init(pnode->ntfy_obj); else status = -ENOMEM; } if (!status) { node_type = node_get_type(pnode); /* Allocate dsp_streamconnect array for device, task, and * dais socket nodes. */ if (node_type != NODE_MESSAGE) { num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode); pnode->stream_connect = kzalloc(num_streams * sizeof(struct dsp_streamconnect), GFP_KERNEL); if (num_streams > 0 && pnode->stream_connect == NULL) status = -ENOMEM; } if (!status && (node_type == NODE_TASK || node_type == NODE_DAISSOCKET)) { /* Allocate arrays for maintainig stream connections */ pnode->inputs = kzalloc(MAX_INPUTS(pnode) * sizeof(struct stream_chnl), GFP_KERNEL); pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) * sizeof(struct stream_chnl), GFP_KERNEL); ptask_args = &(pnode->create_args.asa.task_arg_obj); ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) * sizeof(struct node_strmdef), GFP_KERNEL); ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) * sizeof(struct node_strmdef), GFP_KERNEL); if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL || ptask_args->strm_in_def == NULL)) || (MAX_OUTPUTS(pnode) > 0 && (pnode->outputs == NULL || ptask_args->strm_out_def == NULL))) status = -ENOMEM; } } if (!status && (node_type != NODE_DEVICE)) { /* Create an event that will be posted when RMS_EXIT is * received. */ pnode->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL); if (pnode->sync_done) sync_init_event(pnode->sync_done); else status = -ENOMEM; if (!status) { /*Get the shared mem mgr for this nodes dev object */ status = cmm_get_handle(hprocessor, &hcmm_mgr); if (!status) { /* Allocate a SM addr translator for this node * w/ deflt attr */ status = cmm_xlator_create(&pnode->xlator, hcmm_mgr, NULL); } } if (!status) { /* Fill in message args */ if ((pargs != NULL) && (pargs->cb_data > 0)) { pmsg_args = &(pnode->create_args.asa.node_msg_args); pmsg_args->pdata = kzalloc(pargs->cb_data, GFP_KERNEL); if (pmsg_args->pdata == NULL) { status = -ENOMEM; } else { pmsg_args->arg_length = pargs->cb_data; memcpy(pmsg_args->pdata, pargs->node_data, pargs->cb_data); } } } } if (!status && node_type != NODE_DEVICE) { /* Create a message queue for this node */ intf_fxns = hnode_mgr->intf_fxns; status = (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj, &pnode->msg_queue_obj, 0, pnode->create_args.asa. node_msg_args.max_msgs, pnode); } if (!status) { /* Create object for dynamic loading */ status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj, (void *)pnode, &pnode->dcd_props. obj_data.node_obj, &pnode-> nldr_node_obj, &pnode->phase_split); } /* Compare value read from Node Properties and check if it is same as * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate * GPP Address, Read the value in that address and override the * stack_seg value in task args */ if (!status && (char *)pnode->dcd_props.obj_data.node_obj.ndb_props. stack_seg_name != NULL) { if (strcmp((char *) pnode->dcd_props.obj_data.node_obj.ndb_props. stack_seg_name, STACKSEGLABEL) == 0) { status = hnode_mgr->nldr_fxns. pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG", &dynext_base); if (status) pr_err("%s: Failed to get addr for DYNEXT_BEG" " status = 0x%x\n", __func__, status); status = hnode_mgr->nldr_fxns. pfn_get_fxn_addr(pnode->nldr_node_obj, "L1DSRAM_HEAP", &pul_value); if (status) pr_err("%s: Failed to get addr for L1DSRAM_HEAP" " status = 0x%x\n", __func__, status); host_res = pbridge_context->resources; if (!host_res) status = -EPERM; if (status) { pr_err("%s: Failed to get host resource, status" " = 0x%x\n", __func__, status); goto func_end; } ul_gpp_mem_base = (u32) host_res->dw_mem_base[1]; off_set = pul_value - dynext_base; ul_stack_seg_addr = ul_gpp_mem_base + off_set; ul_stack_seg_val = readl(ul_stack_seg_addr); dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr =" " 0x%x\n", __func__, ul_stack_seg_val, ul_stack_seg_addr); pnode->create_args.asa.task_arg_obj.stack_seg = ul_stack_seg_val; } } if (!status) { /* Add the node to the node manager's list of allocated * nodes. */ lst_init_elem((struct list_head *)pnode); NODE_SET_STATE(pnode, NODE_ALLOCATED); mutex_lock(&hnode_mgr->node_mgr_lock); lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode); ++(hnode_mgr->num_nodes); /* Exit critical section */ mutex_unlock(&hnode_mgr->node_mgr_lock); /* Preset this to assume phases are split * (for overlay and dll) */ pnode->phase_split = true; /* Notify all clients registered for DSP_NODESTATECHANGE. */ proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE); } else { /* Cleanup */ if (pnode) delete_node(pnode, pr_ctxt); } if (!status) { status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt); if (status) { delete_node(pnode, pr_ctxt); goto func_end; } *noderes = (struct node_res_object *)node_res; drv_proc_node_update_heap_status(node_res, true); drv_proc_node_update_status(node_res, true); } DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes)); func_end: dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p " "node_res: %p status: 0x%x\n", __func__, hprocessor, node_uuid, pargs, attr_in, noderes, status); return status; }