/* ARGSUSED3 */ static int px_open(dev_t *devp, int flags, int otyp, cred_t *credp) { px_t *px_p = PX_DEV_TO_SOFTSTATE(*devp); int minor = getminor(*devp); int rval; /* * Make sure the open is for the right file type. */ if (otyp != OTYP_CHR) return (EINVAL); /* * Get the soft state structure for the device. */ if (px_p == NULL) return (ENXIO); DBG(DBG_OPEN, px_p->px_dip, "devp=%x: flags=%x\n", devp, flags); /* * Handle the open by tracking the device state. */ mutex_enter(&px_p->px_mutex); switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) { case PCI_TOOL_REG_MINOR_NUM: case PCI_TOOL_INTR_MINOR_NUM: break; default: /* To handle devctl and hotplug related ioctls */ if (rval = pcie_open(px_p->px_dip, devp, flags, otyp, credp)) { mutex_exit(&px_p->px_mutex); return (rval); } } if (flags & FEXCL) { if (px_p->px_soft_state != PCI_SOFT_STATE_CLOSED) { mutex_exit(&px_p->px_mutex); DBG(DBG_OPEN, px_p->px_dip, "busy\n"); return (EBUSY); } px_p->px_soft_state = PCI_SOFT_STATE_OPEN_EXCL; } else { if (px_p->px_soft_state == PCI_SOFT_STATE_OPEN_EXCL) { mutex_exit(&px_p->px_mutex); DBG(DBG_OPEN, px_p->px_dip, "busy\n"); return (EBUSY); } px_p->px_soft_state = PCI_SOFT_STATE_OPEN; } mutex_exit(&px_p->px_mutex); return (0); }
/* ARGSUSED */ static int ppb_open(dev_t *devp, int flags, int otyp, cred_t *credp) { int instance = PCI_MINOR_NUM_TO_INSTANCE(getminor(*devp)); ppb_devstate_t *ppb_p = ddi_get_soft_state(ppb_state, instance); /* * Make sure the open is for the right file type. */ if (otyp != OTYP_CHR) return (EINVAL); if (ppb_p == NULL) return (ENXIO); mutex_enter(&ppb_p->ppb_mutex); /* * Ioctls will be handled by SPARC PCI Express framework for all * PCIe platforms */ if (ppb_p->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { int rv; rv = pcie_open(ppb_p->dip, devp, flags, otyp, credp); mutex_exit(&ppb_p->ppb_mutex); return (rv); } else if (ppb_p->hotplug_capable == B_TRUE) { mutex_exit(&ppb_p->ppb_mutex); return ((pcihp_get_cb_ops())->cb_open(devp, flags, otyp, credp)); } /* * Handle the open by tracking the device state. */ if (flags & FEXCL) { if (ppb_p->ppb_soft_state != PCI_SOFT_STATE_CLOSED) { mutex_exit(&ppb_p->ppb_mutex); return (EBUSY); } ppb_p->ppb_soft_state = PCI_SOFT_STATE_OPEN_EXCL; } else { if (ppb_p->ppb_soft_state == PCI_SOFT_STATE_OPEN_EXCL) { mutex_exit(&ppb_p->ppb_mutex); return (EBUSY); } ppb_p->ppb_soft_state = PCI_SOFT_STATE_OPEN; } mutex_exit(&ppb_p->ppb_mutex); return (0); }
int main(int argc, const char **argv){ mppadesc_t pcie_fd = 0; if (__k1_spawn_type() == __MPPA_PCI_SPAWN) { pcie_fd = pcie_open(0); pcie_queue_init(pcie_fd); pcie_register_console(pcie_fd, stdin, stdout); } mppa_rpc_server_init(1, 0, 1); mppa_remote_server_init(pcie_fd, 1); utask_t rm1; int blk_sz = atoi(argv[5]); int key_sz = atoi(argv[3]); int value_sz = atoi(argv[4]); if(blk_sz < (key_sz + value_sz + 16)) return 0; utask_create(&rm1, NULL, (void *) mppa_rpc_server_start, NULL); mppa_power_base_spawn(0, "search-k1", argv, NULL, MPPA_POWER_SHUFFLING_ENABLED); int status; mppa_power_base_waitpid(0, &status, 0); utask_join(rm1, NULL); pcie_queue_barrier(pcie_fd, 0, &status); if (__k1_spawn_type() == __MPPA_PCI_SPAWN) { pcie_queue_exit(pcie_fd, 0, &status); } return 0; }
/* ARGSUSED */ static int ppb_open(dev_t *devp, int flags, int otyp, cred_t *credp) { int instance = PCI_MINOR_NUM_TO_INSTANCE(getminor(*devp)); ppb_devstate_t *ppb_p = ddi_get_soft_state(ppb_state, instance); int rv; if (ppb_p == NULL) return (ENXIO); /* * Ioctls will be handled by PCI Express framework for all * PCIe platforms */ if (ppb_p->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { mutex_enter(&ppb_p->ppb_mutex); rv = pcie_open(ppb_p->dip, devp, flags, otyp, credp); mutex_exit(&ppb_p->ppb_mutex); return (rv); } return ((pcihp_get_cb_ops())->cb_open(devp, flags, otyp, credp)); }