/* * Exported interface to unregister a LDC endpoint with * the channel nexus */ static int cnex_unreg_chan(dev_info_t *dip, uint64_t id) { cnex_ldc_t *cldcp, *prev_cldcp; cnex_soft_state_t *cnex_ssp; int instance; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* find and remove channel from list */ mutex_enter(&cnex_ssp->clist_lock); prev_cldcp = NULL; cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) break; prev_cldcp = cldcp; cldcp = cldcp->next; } if (cldcp == 0) { DWARN("cnex_unreg_chan: invalid channel %d\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } if (cldcp->tx.hdlr || cldcp->rx.hdlr) { DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id); mutex_exit(&cnex_ssp->clist_lock); return (ENXIO); } if (prev_cldcp) prev_cldcp->next = cldcp->next; else cnex_ssp->clist = cldcp->next; mutex_exit(&cnex_ssp->clist_lock); /* destroy mutex */ mutex_destroy(&cldcp->lock); /* free channel */ kmem_free(cldcp, sizeof (*cldcp)); return (0); }
/* * vdds_match_niu_node -- callback function to verify a node is the * NIU Hybrid node. */ static int vdds_match_niu_node(dev_info_t *dip, void *arg) { vdds_cb_arg_t *warg = (vdds_cb_arg_t *)arg; char *name; vdds_reg_t *reg_p; uint_t reglen; int rv; uint32_t addr_hi; name = ddi_node_name(dip); if (strcmp(name, "network") != 0) { return (DDI_WALK_CONTINUE); } rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", (int **)®_p, ®len); if (rv != DDI_PROP_SUCCESS) { DWARN(NULL, "Failed to get reg property dip=0x%p", dip); return (DDI_WALK_CONTINUE); } addr_hi = reg_p->addr_hi; DBG1(NULL, "addr_hi = 0x%x dip=0x%p", addr_hi, dip); ddi_prop_free(reg_p); if (addr_hi == HVCOOKIE(warg->cookie)) { warg->dip = dip; if (!e_ddi_branch_held(dip)) e_ddi_branch_hold(dip); DBG1(NULL, "Found dip = 0x%p", dip); return (DDI_WALK_TERMINATE); } return (DDI_WALK_CONTINUE); }
int IpCmdAgent::send_broadcast(enum eResGrp rg, enum ePRIO prio) { GError *gerror = NULL; IpcomMessage *mesg = genIpActivityMessage(rg, prio); _agent->Broadcast(_agent, getSocket(), mesg, &gerror); if (gerror) { DWARN("%s\n", gerror->message); g_error_free(gerror); return false; } return true; }
/* * Internal function to disable an interrupt and wait * for any pending interrupts to finish. */ static int cnex_intr_dis_wait(cnex_soft_state_t *ssp, cnex_intr_t *iinfo) { int rv, intr_state, retries; /* disable interrupts */ rv = hvldc_intr_setvalid(ssp->cfghdl, iinfo->ino, HV_INTR_NOTVALID); if (rv) { DWARN("cnex_intr_dis_wait: ino=0x%llx, can't set valid\n", iinfo->ino); return (ENXIO); } /* * Make a best effort to wait for pending interrupts * to finish. There is not much we can do if we timeout. */ retries = 0; do { rv = hvldc_intr_getstate(ssp->cfghdl, iinfo->ino, &intr_state); if (rv) { DWARN("cnex_intr_dis_wait: ino=0x%llx, can't get " "state\n", iinfo->ino); return (ENXIO); } if (intr_state != HV_INTR_DELIVERED_STATE) break; drv_usecwait(cnex_wait_usecs); } while (!panicstr && ++retries <= cnex_wait_retries); return (0); }
/* * vdds_match_niu_nexus -- callback function to verify a node is the * NIU nexus node. */ static int vdds_match_niu_nexus(dev_info_t *dip, void *arg) { vdds_cb_arg_t *warg = (vdds_cb_arg_t *)arg; vdds_reg_t *reg_p; char *name; uint64_t hdl; uint_t reglen; int rv; if (dip == ddi_root_node()) { return (DDI_WALK_CONTINUE); } name = ddi_node_name(dip); if (strcmp(name, "niu") != 0) { return (DDI_WALK_CONTINUE); } rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", (int **)®_p, ®len); if (rv != DDI_PROP_SUCCESS) { DWARN(NULL, "Failed to get reg property dip=0x%p", dip); return (DDI_WALK_CONTINUE); } hdl = reg_p->addr_hi & 0x0FFFFFFF; ddi_prop_free(reg_p); DBG2(NULL, "Handle = 0x%lx dip=0x%p", hdl, dip); if (hdl == NIUCFGHDL(warg->cookie)) { /* Hold before returning */ if (!e_ddi_branch_held(dip)) e_ddi_branch_hold(dip); warg->dip = dip; DBG2(NULL, "Found dip = 0x%p", dip); return (DDI_WALK_TERMINATE); } return (DDI_WALK_CONTINUE); }
/* * Internal function to replace the CPU used by an interrupt * during interrupt redistribution. */ static int cnex_intr_new_cpu(cnex_soft_state_t *ssp, cnex_intr_t *iinfo) { int intr_state; int rv; /* Determine if the interrupt is enabled */ rv = hvldc_intr_getvalid(ssp->cfghdl, iinfo->ino, &intr_state); if (rv) { DWARN("cnex_intr_new_cpu: rx ino=0x%llx, can't get valid\n", iinfo->ino); return (rv); } /* If it is enabled, disable it */ if (intr_state == HV_INTR_VALID) { rv = cnex_intr_dis_wait(ssp, iinfo); if (rv) { return (rv); } } /* Target the interrupt at a new CPU. */ iinfo->cpuid = intr_dist_cpuid(); (void) hvldc_intr_settarget(ssp->cfghdl, iinfo->ino, iinfo->cpuid); intr_dist_cpuid_add_device_weight(iinfo->cpuid, iinfo->dip, iinfo->weight); /* Re-enable the interrupt if it was enabled */ if (intr_state == HV_INTR_VALID) { (void) hvldc_intr_setvalid(ssp->cfghdl, iinfo->ino, HV_INTR_VALID); } return (0); }
/* * Add Tx/Rx interrupt handler for the channel */ static int cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype, uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2) { int rv, idx, pil; cnex_ldc_t *cldcp; cnex_intr_t *iinfo; cnex_soft_state_t *cnex_ssp; int instance; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* get channel info */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) break; cldcp = cldcp->next; } if (cldcp == NULL) { DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } mutex_exit(&cnex_ssp->clist_lock); /* get channel lock */ mutex_enter(&cldcp->lock); /* get interrupt type */ if (itype == CNEX_TX_INTR) { iinfo = &(cldcp->tx); } else if (itype == CNEX_RX_INTR) { iinfo = &(cldcp->rx); } else { DWARN("cnex_add_intr: invalid interrupt type\n", id); mutex_exit(&cldcp->lock); return (EINVAL); } /* check if a handler is already added */ if (iinfo->hdlr != 0) { DWARN("cnex_add_intr: interrupt handler exists\n"); mutex_exit(&cldcp->lock); return (EINVAL); } /* save interrupt handler info */ iinfo->hdlr = hdlr; iinfo->arg1 = arg1; iinfo->arg2 = arg2; /* save data for DTrace probes used by intrstat(1m) */ iinfo->dip = cldcp->dip; iinfo->id = cldcp->id; iinfo->icookie = MINVINTR_COOKIE + iinfo->ino; /* * Verify that the ino does not generate a cookie which * is outside the (MINVINTR_COOKIE, MAXIVNUM) range of the * system interrupt table. */ if (iinfo->icookie >= MAXIVNUM || iinfo->icookie < MINVINTR_COOKIE) { DWARN("cnex_add_intr: invalid cookie %x ino %x\n", iinfo->icookie, iinfo->ino); mutex_exit(&cldcp->lock); return (EINVAL); } D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, " "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie); /* Pick a PIL on the basis of the channel's devclass */ for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) { if (cldcp->devclass == cnex_class_to_intr[idx].devclass) { pil = cnex_class_to_intr[idx].pil; break; } } /* add interrupt to solaris ivec table */ if (add_ivintr(iinfo->icookie, pil, (intrfunc)cnex_intr_wrapper, (caddr_t)iinfo, NULL, NULL) != 0) { DWARN("cnex_add_intr: add_ivintr fail cookie %x ino %x\n", iinfo->icookie, iinfo->ino); mutex_exit(&cldcp->lock); return (EINVAL); } /* set the cookie in the HV */ rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie); /* pick next CPU in the domain for this channel */ iinfo->cpuid = intr_dist_cpuid(); /* set the target CPU and then enable interrupts */ rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, iinfo->cpuid); if (rv) { DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n", iinfo->ino); goto hv_error; } rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_IDLE_STATE); if (rv) { DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n", iinfo->ino); goto hv_error; } rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID); if (rv) { DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n", iinfo->ino); goto hv_error; } intr_dist_cpuid_add_device_weight(iinfo->cpuid, iinfo->dip, iinfo->weight); mutex_exit(&cldcp->lock); return (0); hv_error: (void) rem_ivintr(iinfo->icookie, pil); mutex_exit(&cldcp->lock); return (ENXIO); }
/* * vdds_create_niu_node -- Create NIU Hybrid node. The NIU nexus * node also created if it doesn't exist already. */ dev_info_t * vdds_create_niu_node(uint64_t cookie, uint64_t macaddr, uint32_t max_frame_size) { dev_info_t *nexus_dip; dev_info_t *niu_dip; vdds_cb_arg_t cba; DBG1(NULL, "Called"); if (vdds_hv_hio_capable == B_FALSE) { return (NULL); } mutex_enter(&vdds_dev_lock); /* Check if the nexus node exists already */ nexus_dip = vdds_find_node(cookie, ddi_root_node(), vdds_match_niu_nexus); if (nexus_dip == NULL) { /* * NIU nexus node not found, so create it now. */ cba.dip = NULL; cba.cookie = cookie; cba.macaddr = macaddr; cba.max_frame_size = max_frame_size; nexus_dip = vdds_create_new_node(&cba, NULL, vdds_new_nexus_node); if (nexus_dip == NULL) { mutex_exit(&vdds_dev_lock); return (NULL); } } DBG2(NULL, "nexus_dip = 0x%p", nexus_dip); /* Check if NIU node exists already before creating one */ niu_dip = vdds_find_node(cookie, nexus_dip, vdds_match_niu_node); if (niu_dip == NULL) { cba.dip = NULL; cba.cookie = cookie; cba.macaddr = macaddr; cba.max_frame_size = max_frame_size; niu_dip = vdds_create_new_node(&cba, nexus_dip, vdds_new_niu_node); /* * Hold the niu_dip to prevent it from * detaching. */ if (niu_dip != NULL) { e_ddi_hold_devi(niu_dip); } else { DWARN(NULL, "niumx/network node creation failed"); } } else { DWARN(NULL, "niumx/network node already exists(dip=0x%p)", niu_dip); } /* release the hold that was done in find/create */ if ((niu_dip != NULL) && (e_ddi_branch_held(niu_dip))) e_ddi_branch_rele(niu_dip); if (e_ddi_branch_held(nexus_dip)) e_ddi_branch_rele(nexus_dip); mutex_exit(&vdds_dev_lock); DBG1(NULL, "returning niu_dip=0x%p", niu_dip); return (niu_dip); }
/* * vdds_process_dds_msg -- Process a DDS message. */ void vdds_process_dds_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg) { vnet_dds_info_t *vdds = &vnetp->vdds_info; int rv; DBG1(vdds, "DDS message received..."); if (dmsg->dds_class != DDS_VNET_NIU) { DBG2(vdds, "Invalid class send NACK"); (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); return; } mutex_enter(&vdds->lock); switch (dmsg->dds_subclass) { case DDS_VNET_ADD_SHARE: DBG2(vdds, "DDS_VNET_ADD_SHARE message..."); if ((vdds->task_flags != 0) || (vdds->hio_dip != NULL)) { /* * Either a task is already pending or * a hybrid device already exists. */ DWARN(vdds, "NACK: Already pending DDS task"); (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); mutex_exit(&vdds->lock); return; } vdds->task_flags = VNET_DDS_TASK_ADD_SHARE; bcopy(dmsg, &vnetp->vdds_info.dmsg, sizeof (vio_dds_msg_t)); DBG2(vdds, "Dispatching task for ADD_SHARE"); rv = ddi_taskq_dispatch(vdds->dds_taskqp, vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP); if (rv != 0) { /* Send NACK */ DBG2(vdds, "NACK: Failed to dispatch task"); (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); vdds->task_flags = 0; } break; case DDS_VNET_DEL_SHARE: DBG2(vdds, "DDS_VNET_DEL_SHARE message..."); if (vdds->task_flags == VNET_DDS_TASK_ADD_SHARE) { /* * ADD_SHARE task still pending, simply clear * task falgs and ACK. */ DBG2(vdds, "ACK:ADD_SHARE task still pending"); vdds->task_flags = 0; (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_TRUE); mutex_exit(&vdds->lock); return; } if ((vdds->task_flags == 0) && (vdds->hio_dip == NULL)) { /* Send NACK */ DBG2(vdds, "NACK:No HIO device exists"); (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); mutex_exit(&vdds->lock); return; } vdds->task_flags = VNET_DDS_TASK_DEL_SHARE; bcopy(dmsg, &vdds->dmsg, sizeof (vio_dds_msg_t)); DBG2(vdds, "Dispatching DEL_SHARE task"); rv = ddi_taskq_dispatch(vdds->dds_taskqp, vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP); if (rv != 0) { /* Send NACK */ DBG2(vdds, "NACK: failed to dispatch task"); (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); vdds->task_flags = 0; } break; case DDS_VNET_REL_SHARE: DBG2(vdds, "Reply for REL_SHARE reply=%d", dmsg->tag.vio_subtype); break; default: DWARN(vdds, "Discarding Unknown DDS message"); break; } mutex_exit(&vdds->lock); }
/* * vdds_get_interrupts -- A function that binds ino's to channels and * then provides them to create interrupts property. */ static int vdds_get_interrupts(uint64_t cookie, int ino_range, int *intrs, int *nintr) { uint32_t hvcookie = HVCOOKIE(cookie); uint64_t txmap; uint64_t rxmap; uint32_t ino = VDDS_INO_RANGE_START(ino_range); int rv; uint64_t i; *nintr = 0; rv = vdds_hv_niu_vr_get_txmap(hvcookie, &txmap); if (rv != H_EOK) { DWARN(NULL, "Failed to get txmap for hvcookie=0x%X rv=%d\n", hvcookie, rv); return (EIO); } rv = vdds_hv_niu_vr_get_rxmap(hvcookie, &rxmap); if (rv != H_EOK) { DWARN(NULL, "Failed to get rxmap for hvcookie=0x%X, rv=%d\n", hvcookie, rv); return (EIO); } /* Check if the number of total channels to be more than 8 */ for (i = 0; i < 4; i++) { if (rxmap & (((uint64_t)0x1) << i)) { rv = vdds_hv_niu_vrrx_set_ino(hvcookie, i, ino); if (rv != H_EOK) { DWARN(NULL, "Failed to get Rx ino for " "hvcookie=0x%X vch_idx=0x%lx rv=%d\n", hvcookie, i, rv); return (EIO); } DWARN(NULL, "hvcookie=0x%X RX vch_idx=0x%lx ino=0x%X\n", hvcookie, i, ino); *intrs = ino; ino++; } else { *intrs = VDDS_MAX_INTR_NUM; } intrs++; *nintr += 1; } for (i = 0; i < 4; i++) { if (txmap & (((uint64_t)0x1) << i)) { rv = vdds_hv_niu_vrtx_set_ino(hvcookie, i, ino); if (rv != H_EOK) { DWARN(NULL, "Failed to get Tx ino for " "hvcookie=0x%X vch_idx=0x%lx rv=%d\n", hvcookie, i, rv); return (EIO); } DWARN(NULL, "hvcookie=0x%X TX vch_idx=0x%lx ino=0x%X\n", hvcookie, i, ino); *intrs = ino; ino++; } else { *intrs = VDDS_MAX_INTR_NUM; } intrs++; *nintr += 1; } return (0); }
bool db_dwarf_line_at_pc(const char *linetab, size_t linetabsize, uintptr_t pc, const char **outdirname, const char **outbasename, int *outline) { struct dwbuf table = { .buf = linetab, .len = linetabsize }; /* * For simplicity, we simply brute force search through the entire * line table each time. */ uint32_t unitsize; struct dwbuf unit; next: /* Line tables are a sequence of compilation unit entries. */ if (!read_u32(&table, &unitsize) || unitsize >= 0xfffffff0 || !read_buf(&table, &unit, unitsize)) return (false); uint16_t version; uint32_t header_size; if (!read_u16(&unit, &version) || version > 2 || !read_u32(&unit, &header_size)) goto next; struct dwbuf headerstart = unit; uint8_t min_insn_length, default_is_stmt, line_range, opcode_base; int8_t line_base; if (!read_u8(&unit, &min_insn_length) || !read_u8(&unit, &default_is_stmt) || !read_s8(&unit, &line_base) || !read_u8(&unit, &line_range) || !read_u8(&unit, &opcode_base)) goto next; /* * Directory and file names are next in the header, but for now we * skip directly to the line number program. */ struct dwbuf names = unit; unit = headerstart; if (!skip_bytes(&unit, header_size)) return (false); /* VM registers. */ uint64_t address = 0, file = 1, line = 1, column = 0; uint8_t is_stmt = default_is_stmt; bool basic_block = false, end_sequence = false; bool prologue_end = false, epilogue_begin = false; /* Last line table entry emitted, if any. */ bool have_last = false; uint64_t last_line = 0, last_file = 0; /* Time to run the line program. */ uint8_t opcode; while (read_u8(&unit, &opcode)) { bool emit = false, reset_basic_block = false; if (opcode >= opcode_base) { /* "Special" opcodes. */ uint8_t diff = opcode - opcode_base; address += diff / line_range; line += line_base + diff % line_range; emit = true; } else if (opcode == 0) { /* "Extended" opcodes. */ uint64_t extsize; struct dwbuf extra; if (!read_uleb128(&unit, &extsize) || !read_buf(&unit, &extra, extsize) || !read_u8(&extra, &opcode)) goto next; switch (opcode) { case DW_LNE_end_sequence: emit = true; end_sequence = true; break; case DW_LNE_set_address: switch (extra.len) { case 4: { uint32_t address32; if (!read_u32(&extra, &address32)) goto next; address = address32; break; } case 8: if (!read_u64(&extra, &address)) goto next; break; default: DWARN("unexpected address length: %zu", extra.len); goto next; } break; case DW_LNE_define_file: /* XXX: hope this isn't needed */ default: DWARN("unknown extended opcode: %d", opcode); goto next; } } else { /* "Standard" opcodes. */ switch (opcode) { case DW_LNS_copy: emit = true; reset_basic_block = true; break; case DW_LNS_advance_pc: { uint64_t delta; if (!read_uleb128(&unit, &delta)) goto next; address += delta * min_insn_length; break; } case DW_LNS_advance_line: { int64_t delta; if (!read_sleb128(&unit, &delta)) goto next; line += delta; break; } case DW_LNS_set_file: if (!read_uleb128(&unit, &file)) goto next; break; case DW_LNS_set_column: if (!read_uleb128(&unit, &column)) goto next; break; case DW_LNS_negate_stmt: is_stmt = !is_stmt; break; case DW_LNS_set_basic_block: basic_block = true; break; case DW_LNS_const_add_pc: address += (255 - opcode_base) / line_range; break; case DW_LNS_set_prologue_end: prologue_end = true; break; case DW_LNS_set_epilogue_begin: epilogue_begin = true; break; default: DWARN("unknown standard opcode: %d", opcode); goto next; } } if (emit) { if (address > pc) { /* Found an entry after our target PC. */ if (!have_last) { /* Give up on this program. */ break; } /* Return the last entry. */ *outline = last_line; return (read_filename(&names, outdirname, outbasename, opcode_base, file)); } last_file = file; last_line = line; have_last = true; } if (reset_basic_block) basic_block = false; } goto next; }
/* * Clear pending Tx/Rx interrupt */ static int cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype) { int rv; cnex_ldc_t *cldcp; cnex_intr_t *iinfo; cnex_soft_state_t *cnex_ssp; int instance; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* get channel info */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) break; cldcp = cldcp->next; } if (cldcp == NULL) { DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } mutex_exit(&cnex_ssp->clist_lock); mutex_enter(&cldcp->lock); /* get interrupt type */ if (itype == CNEX_TX_INTR) { iinfo = &(cldcp->tx); } else if (itype == CNEX_RX_INTR) { iinfo = &(cldcp->rx); } else { DWARN("cnex_clr_intr: invalid interrupt type\n"); mutex_exit(&cldcp->lock); return (EINVAL); } D1("%s: interrupt ino=0x%x\n", __func__, iinfo->ino); /* check if a handler is already added */ if (iinfo->hdlr == 0) { DWARN("cnex_clr_intr: interrupt handler does not exist\n"); mutex_exit(&cldcp->lock); return (EINVAL); } rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_IDLE_STATE); if (rv) { DWARN("cnex_clr_intr: cannot clear interrupt state\n"); mutex_exit(&cldcp->lock); return (ENXIO); } mutex_exit(&cldcp->lock); return (0); }
/* * Remove Tx/Rx interrupt handler for the channel */ static int cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype) { int rv, idx, pil; cnex_ldc_t *cldcp; cnex_intr_t *iinfo; cnex_soft_state_t *cnex_ssp; int instance, istate; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* get channel info */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) break; cldcp = cldcp->next; } if (cldcp == NULL) { DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } mutex_exit(&cnex_ssp->clist_lock); /* get rid of the channel intr handler */ mutex_enter(&cldcp->lock); /* get interrupt type */ if (itype == CNEX_TX_INTR) { iinfo = &(cldcp->tx); } else if (itype == CNEX_RX_INTR) { iinfo = &(cldcp->rx); } else { DWARN("cnex_rem_intr: invalid interrupt type\n"); mutex_exit(&cldcp->lock); return (EINVAL); } D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino); /* check if a handler is already added */ if (iinfo->hdlr == 0) { DWARN("cnex_rem_intr: interrupt handler does not exist\n"); mutex_exit(&cldcp->lock); return (EINVAL); } D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino); rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_NOTVALID); if (rv) { DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino); mutex_exit(&cldcp->lock); return (ENXIO); } /* * Check if there are pending interrupts. If interrupts are * pending return EAGAIN. */ rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate); if (rv) { DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n", iinfo->ino); mutex_exit(&cldcp->lock); return (ENXIO); } /* if interrupts are still pending print warning */ if (istate != HV_INTR_IDLE_STATE) { DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n", iinfo->ino); mutex_exit(&cldcp->lock); return (EAGAIN); } /* Pick a PIL on the basis of the channel's devclass */ for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) { if (cldcp->devclass == cnex_class_to_intr[idx].devclass) { pil = cnex_class_to_intr[idx].pil; break; } } intr_dist_cpuid_rem_device_weight(iinfo->cpuid, iinfo->dip); /* remove interrupt */ (void) rem_ivintr(iinfo->icookie, pil); /* clear interrupt info */ bzero(iinfo, sizeof (*iinfo)); mutex_exit(&cldcp->lock); return (0); }
/* ldc callback */ static uint_t i_vldc_cb(uint64_t event, caddr_t arg) { int rv; vldc_port_t *vport = (vldc_port_t *)arg; ldc_status_t old_status; short pollevents = 0; ASSERT(vport != NULL); ASSERT(vport->minorp != NULL); D1("i_vldc_cb: vldc@%d:%d callback invoked, channel=0x%lx, " "event=0x%lx\n", vport->inst, vport->number, vport->ldc_id, event); /* ensure the port can't be destroyed while we are handling the cb */ mutex_enter(&vport->minorp->lock); if (vport->status == VLDC_PORT_CLOSED) { return (LDC_SUCCESS); } old_status = vport->ldc_status; rv = ldc_status(vport->ldc_handle, &vport->ldc_status); if (rv != 0) { DWARN("i_vldc_cb: vldc@%d:%d could not get ldc status, " "rv=%d\n", vport->inst, vport->number, rv); mutex_exit(&vport->minorp->lock); return (LDC_SUCCESS); } if (event & LDC_EVT_UP) { pollevents |= POLLOUT; vport->hanged_up = B_FALSE; } else if (event & LDC_EVT_RESET) { /* * Mark the port in reset, if it is not CLOSED and * the channel was previously in LDC_UP state. This * implies that the port cannot be used until it has * been closed and reopened. */ if (old_status == LDC_UP) { vport->status = VLDC_PORT_RESET; vport->hanged_up = B_TRUE; pollevents = POLLHUP; } else { rv = ldc_up(vport->ldc_handle); if (rv) { DWARN("i_vldc_cb: vldc@%d:%d cannot bring " "channel UP rv=%d\n", vport->inst, vport->number, rv); mutex_exit(&vport->minorp->lock); return (LDC_SUCCESS); } rv = ldc_status(vport->ldc_handle, &vport->ldc_status); if (rv != 0) { DWARN("i_vldc_cb: vldc@%d:%d could not get " "ldc status, rv=%d\n", vport->inst, vport->number, rv); mutex_exit(&vport->minorp->lock); return (LDC_SUCCESS); } if (vport->ldc_status == LDC_UP) { pollevents |= POLLOUT; vport->hanged_up = B_FALSE; } } } else if (event & LDC_EVT_DOWN) { /* * The other side went away - mark port in RESET state */ vport->status = VLDC_PORT_RESET; vport->hanged_up = B_TRUE; pollevents = POLLHUP; } if (event & LDC_EVT_READ) pollevents |= POLLIN; mutex_exit(&vport->minorp->lock); if (pollevents != 0) { D1("i_vldc_cb: port@%d pollwakeup=0x%x\n", vport->number, pollevents); pollwakeup(&vport->poll, pollevents); } return (LDC_SUCCESS); }
/* close a vldc port */ static int i_vldc_close_port(vldc_t *vldcp, uint_t portno) { vldc_port_t *vport; vldc_minor_t *vminor; int rv = DDI_SUCCESS; vport = &(vldcp->port[portno]); ASSERT(MUTEX_HELD(&vport->minorp->lock)); D1("i_vldc_close_port: vldc@%d:%d: closing port\n", vport->inst, vport->minorp->portno); vminor = vport->minorp; switch (vport->status) { case VLDC_PORT_CLOSED: /* nothing to do */ DWARN("i_vldc_close_port: port %d in an unexpected " "state (%d)\n", portno, vport->status); return (DDI_SUCCESS); case VLDC_PORT_READY: case VLDC_PORT_RESET: do { rv = i_vldc_ldc_close(vport); if (rv != EAGAIN) break; /* * EAGAIN indicates that ldc_close() failed because * ldc callback thread is active for the channel. * cv_timedwait() is used to release vminor->lock and * allow ldc callback thread to complete. * after waking up, check if the port has been closed * by another thread in the meantime. */ (void) cv_reltimedwait(&vminor->cv, &vminor->lock, drv_usectohz(vldc_close_delay), TR_CLOCK_TICK); rv = 0; } while (vport->status != VLDC_PORT_CLOSED); if ((rv != 0) || (vport->status == VLDC_PORT_CLOSED)) return (rv); break; case VLDC_PORT_OPEN: break; default: DWARN("i_vldc_close_port: port %d in an unexpected " "state (%d)\n", portno, vport->status); ASSERT(0); /* fail quickly to help diagnosis */ return (EINVAL); } ASSERT(vport->status == VLDC_PORT_OPEN); /* free memory */ kmem_free(vport->send_buf, vport->mtu); kmem_free(vport->recv_buf, vport->mtu); if (strcmp(vminor->sname, VLDC_HVCTL_SVCNAME) == 0) kmem_free(vport->cookie_buf, vldc_max_cookie); vport->status = VLDC_PORT_CLOSED; return (rv); }
/* * Exported interface to register a LDC endpoint with * the channel nexus */ static int cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass) { int idx; cnex_ldc_t *cldcp; cnex_ldc_t *new_cldcp; int listsz, num_nodes, num_channels; md_t *mdp = NULL; mde_cookie_t rootnode, *listp = NULL; uint64_t tmp_id; uint64_t rxino = (uint64_t)-1; uint64_t txino = (uint64_t)-1; cnex_soft_state_t *cnex_ssp; int status, instance; dev_info_t *chan_dip = NULL; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* Check to see if channel is already registered */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) { DWARN("cnex_reg_chan: channel 0x%llx exists\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } cldcp = cldcp->next; } mutex_exit(&cnex_ssp->clist_lock); /* Get the Tx/Rx inos from the MD */ if ((mdp = md_get_handle()) == NULL) { DWARN("cnex_reg_chan: cannot init MD\n"); return (ENXIO); } num_nodes = md_node_count(mdp); ASSERT(num_nodes > 0); listsz = num_nodes * sizeof (mde_cookie_t); listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); rootnode = md_root_node(mdp); /* search for all channel_endpoint nodes */ num_channels = md_scan_dag(mdp, rootnode, md_find_name(mdp, "channel-endpoint"), md_find_name(mdp, "fwd"), listp); if (num_channels <= 0) { DWARN("cnex_reg_chan: invalid channel id\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (EINVAL); } for (idx = 0; idx < num_channels; idx++) { /* Get the channel ID */ status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id); if (status) { DWARN("cnex_reg_chan: cannot read LDC ID\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (ENXIO); } if (tmp_id != id) continue; /* Get the Tx and Rx ino */ status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino); if (status) { DWARN("cnex_reg_chan: cannot read Tx ino\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (ENXIO); } status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino); if (status) { DWARN("cnex_reg_chan: cannot read Rx ino\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (ENXIO); } chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]); ASSERT(chan_dip != NULL); } kmem_free(listp, listsz); (void) md_fini_handle(mdp); /* * check to see if we looped through the list of channel IDs without * matching one (i.e. an 'ino' has not been initialised). */ if ((rxino == -1) || (txino == -1)) { DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id); return (ENOENT); } /* Allocate a new channel structure */ new_cldcp = kmem_zalloc(sizeof (*new_cldcp), KM_SLEEP); /* Initialize the channel */ mutex_init(&new_cldcp->lock, NULL, MUTEX_DRIVER, NULL); new_cldcp->id = id; new_cldcp->tx.ino = txino; new_cldcp->rx.ino = rxino; new_cldcp->devclass = devclass; new_cldcp->tx.weight = CNEX_TX_INTR_WEIGHT; new_cldcp->rx.weight = cnex_class_weight(devclass); new_cldcp->dip = chan_dip; /* * Add channel to nexus channel list. * Check again to see if channel is already registered since * clist_lock was dropped above. */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) { DWARN("cnex_reg_chan: channel 0x%llx exists\n", id); mutex_exit(&cnex_ssp->clist_lock); mutex_destroy(&new_cldcp->lock); kmem_free(new_cldcp, sizeof (*new_cldcp)); return (EINVAL); } cldcp = cldcp->next; } new_cldcp->next = cnex_ssp->clist; cnex_ssp->clist = new_cldcp; mutex_exit(&cnex_ssp->clist_lock); return (0); }
/*ARGSUSED*/ static int cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int rv, instance, reglen; cnex_regspec_t *reg_p; ldc_cnex_t cinfo; cnex_soft_state_t *cnex_ssp; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } /* * Get the instance specific soft state structure. * Save the devi for this instance in the soft_state data. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); cnex_ssp = ddi_get_soft_state(cnex_state, instance); cnex_ssp->devi = devi; cnex_ssp->clist = NULL; if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS) { return (DDI_FAILURE); } /* get the sun4v config handle for this device */ cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr); kmem_free(reg_p, reglen); D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl); /* init channel list mutex */ mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL); /* Register with LDC module */ cinfo.dip = devi; cinfo.reg_chan = cnex_reg_chan; cinfo.unreg_chan = cnex_unreg_chan; cinfo.add_intr = cnex_add_intr; cinfo.rem_intr = cnex_rem_intr; cinfo.clr_intr = cnex_clr_intr; /* * LDC register will fail if an nexus instance had already * registered with the LDC framework */ rv = ldc_register(&cinfo); if (rv) { DWARN("cnex_attach: unable to register with LDC\n"); ddi_soft_state_free(cnex_state, instance); mutex_destroy(&cnex_ssp->clist_lock); return (DDI_FAILURE); } if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) { ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(cnex_state, instance); mutex_destroy(&cnex_ssp->clist_lock); return (DDI_FAILURE); } /* Add interrupt redistribution callback. */ intr_dist_add_weighted(cnex_intr_redist, cnex_ssp); ddi_report_dev(devi); return (DDI_SUCCESS); }
/* * cnex_find_chan_dip -- Find the dip of a device that is corresponding * to the specific channel. Below are the details on how the dip * is derived. * * - In the MD, the cfg-handle is expected to be unique for * virtual-device nodes that have the same 'name' property value. * This value is expected to be the same as that of "reg" property * of the corresponding OBP device node. * * - The value of the 'name' property of a virtual-device node * in the MD is expected to be the same for the corresponding * OBP device node. * * - Find the virtual-device node corresponding to a channel-endpoint * by walking backwards. Then obtain the values for the 'name' and * 'cfg-handle' properties. * * - Walk all the children of the cnex, find a matching dip which * has the same 'name' and 'reg' property values. * * - The channels that have no corresponding device driver are * treated as if they correspond to the cnex driver, * that is, return cnex dip for them. This means, the * cnex acts as an umbrella device driver. Note, this is * for 'intrstat' statistics purposes only. As a result of this, * the 'intrstat' shows cnex as the device that is servicing the * interrupts corresponding to these channels. * * For now, only one such case is known, that is, the channels that * are used by the "domain-services". */ static dev_info_t * cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id, md_t *mdp, mde_cookie_t mde) { int listsz; int num_nodes; int num_devs; uint64_t cfghdl; char *md_name; mde_cookie_t *listp; dev_info_t *cdip = NULL; num_nodes = md_node_count(mdp); ASSERT(num_nodes > 0); listsz = num_nodes * sizeof (mde_cookie_t); listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"), md_find_name(mdp, "back"), listp); ASSERT(num_devs <= 1); if (num_devs <= 0) { DWARN("cnex_find_chan_dip:channel(0x%llx): " "No virtual-device found\n", chan_id); goto fdip_exit; } if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) { DWARN("cnex_find_chan_dip:channel(0x%llx): " "name property not found\n", chan_id); goto fdip_exit; } D1("cnex_find_chan_dip: channel(0x%llx): virtual-device " "name property value = %s\n", chan_id, md_name); if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) { DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's " "cfg-handle property not found\n", chan_id); goto fdip_exit; } D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle " " property value = 0x%x\n", chan_id, cfghdl); for (cdip = ddi_get_child(dip); cdip != NULL; cdip = ddi_get_next_sibling(cdip)) { int *cnex_regspec; uint32_t reglen; char *dev_name; if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "name", &dev_name) != DDI_PROP_SUCCESS) { DWARN("cnex_find_chan_dip: name property not" " found for dip(0x%p)\n", cdip); continue; } if (strcmp(md_name, dev_name) != 0) { ddi_prop_free(dev_name); continue; } ddi_prop_free(dev_name); if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "reg", &cnex_regspec, ®len) != DDI_SUCCESS) { DWARN("cnex_find_chan_dip: reg property not" " found for dip(0x%p)\n", cdip); continue; } if (*cnex_regspec == cfghdl) { D1("cnex_find_chan_dip:channel(0x%llx): found " "dip(0x%p) drvname=%s\n", chan_id, cdip, ddi_driver_name(cdip)); ddi_prop_free(cnex_regspec); break; } ddi_prop_free(cnex_regspec); } fdip_exit: if (cdip == NULL) { /* * If a virtual-device node exists but no dip found, * then for now print a DEBUG error message only. */ if (num_devs > 0) { DERR("cnex_find_chan_dip:channel(0x%llx): " "No device found\n", chan_id); } /* If no dip was found, return cnex device's dip. */ cdip = dip; } kmem_free(listp, listsz); D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n", chan_id, cdip); return (cdip); }
/* * vdds_new_niu_node -- callback function to create a new NIU Hybrid node. */ static int vdds_new_niu_node(dev_info_t *dip, void *arg, uint_t flags) { vdds_cb_arg_t *cba = (vdds_cb_arg_t *)arg; char *compat[] = { "SUNW,niusl" }; uint8_t macaddrbytes[ETHERADDRL]; int interrupts[VDDS_MAX_VRINTRS]; vdds_ranges_t *prng; vdds_ranges_t *prp; vdds_reg_t reg; dev_info_t *pdip; uint64_t start; uint64_t size; int prnglen; int nintr = 0; int nrng; int rnum; int rv; DBG1(NULL, "Called dip=0x%p flags=0x%X", dip, flags); pdip = ddi_get_parent(dip); if (pdip == NULL) { DWARN(NULL, "Failed to get parent dip(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* create "network" property */ if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, "name", "network") != DDI_SUCCESS) { DERR(NULL, "Failed to create name property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* * create "niutype" property, it is set to n2niu to * indicate NIU Hybrid node. */ if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, "niutype", "n2niu") != DDI_SUCCESS) { DERR(NULL, "Failed to create niuopmode property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* create "compatible" property */ if (ndi_prop_update_string_array(DDI_DEV_T_NONE, dip, "compatible", compat, 1) != DDI_SUCCESS) { DERR(NULL, "Failed to create compatible property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* create "device_type" property */ if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, "device_type", "network") != DDI_SUCCESS) { DERR(NULL, "Failed to create device_type property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* create "reg" property */ if (vdds_hv_niu_vr_getinfo(HVCOOKIE(cba->cookie), &start, &size) != H_EOK) { DERR(NULL, "Failed to get vrinfo for cookie(0x%lX)", cba->cookie); return (DDI_WALK_ERROR); } reg.addr_hi = HVCOOKIE(cba->cookie); reg.addr_lo = 0; reg.size_hi = 0; reg.size_lo = size; if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "reg", (int *)®, sizeof (reg) / sizeof (int)) != DDI_SUCCESS) { DERR(NULL, "Failed to create reg property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* * Modify the parent's ranges property to map the "reg" property * of the new child. */ if ((rv = ddi_getlongprop(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, "ranges", (caddr_t)&prng, &prnglen)) != DDI_SUCCESS) { DERR(NULL, "Failed to get parent's ranges property(pdip=0x%p) rv=%d", pdip, rv); return (DDI_WALK_ERROR); } nrng = prnglen/(sizeof (vdds_ranges_t)); /* * First scan all ranges to see if a range corresponding * to this virtual NIU exists already. */ for (rnum = 0; rnum < nrng; rnum++) { prp = &prng[rnum]; if (prp->child_hi == HVCOOKIE(cba->cookie)) { break; } } if (rnum == nrng) { /* Now to try to find an empty range */ for (rnum = 0; rnum < nrng; rnum++) { prp = &prng[rnum]; if (prp->child_hi == 0) { break; } } } if (rnum == nrng) { DERR(NULL, "No free ranges entry found"); return (DDI_WALK_ERROR); } /* * child_hi will have HV cookie as HV cookie is more like * a port in the HybridIO. */ prp->child_hi = HVCOOKIE(cba->cookie); prp->child_lo = 0; prp->parent_hi = 0x80000000 | (start >> 32); prp->parent_lo = start & 0x00000000FFFFFFFF; prp->size_hi = (size >> 32); prp->size_lo = size & 0x00000000FFFFFFFF; if (ndi_prop_update_int_array(DDI_DEV_T_NONE, pdip, "ranges", (int *)prng, (nrng * 6)) != DDI_SUCCESS) { DERR(NULL, "Failed to update parent ranges prop(pdip=0x%p)", pdip); return (DDI_WALK_ERROR); } kmem_free((void *)prng, prnglen); vnet_macaddr_ultostr(cba->macaddr, macaddrbytes); /* * create "local-mac-address" property, this will be same as * the vnet's mac-address. */ if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "local-mac-address", macaddrbytes, ETHERADDRL) != DDI_SUCCESS) { DERR(NULL, "Failed to update mac-addresses property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } rv = vdds_get_interrupts(cba->cookie, rnum, interrupts, &nintr); if (rv != 0) { DERR(NULL, "Failed to get interrupts for cookie=0x%lx", cba->cookie); return (DDI_WALK_ERROR); } /* create "interrupts" property */ if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "interrupts", interrupts, nintr) != DDI_SUCCESS) { DERR(NULL, "Failed to update interrupts property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } /* create "max_frame_size" property */ if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, "max-frame-size", cba->max_frame_size) != DDI_SUCCESS) { DERR(NULL, "Failed to update max-frame-size property(dip=0x%p)", dip); return (DDI_WALK_ERROR); } cba->dip = dip; DBG1(NULL, "Returning dip=0x%p", dip); return (DDI_WALK_TERMINATE); }
int armour_proc_recover (armour_proc *proc, void *data) { pid_t pid; sigset_t set; int i, fd; (void)data; //pid = vfork (); pid = fork (); switch (pid) { case -1: DWARN ("fork"); return -1; case 0: if (proc->flags & ARPROC_SETSID) { pid = fork (); switch (pid) { case -1: DWARN ("fork"); return -1; case 0: setsid (); // TODO check return value break; default: _exit (0); break; } } else { setpgid (0, 0); } /* * remove signal mask */ sigprocmask (SIG_BLOCK, NULL, &set); sigprocmask (SIG_UNBLOCK, &set, NULL); /* * attach file desc. 0, 1 and 2 to whatever files it used */ for (i = 0; i < 3; i++) { fd = open (proc->file[i], O_RDWR); dup2 (fd, i); } /* * change working and root directory */ chdir (proc->cwd); chroot (proc->root); /* * set uid and gid */ setgid (proc->gid); setuid (proc->uid); execve (proc->exe, proc->cmdline, proc->environ); DWARN ("execve"); _exit (127); /* exec error! */ default: break; } return 0; }