/** * radeon_irq_kms_init - init driver interrupt info * * @rdev: radeon device pointer * * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). * Returns 0 for success, error for failure. */ int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; task_set(&rdev->hotplug_task, radeon_hotplug_work_func, rdev, NULL); task_set(&rdev->audio_task, r600_audio_update_hdmi, rdev, NULL); mtx_init(&rdev->irq.lock, IPL_TTY); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; } #ifdef notyet /* enable msi */ rdev->msi_enabled = 0; if (radeon_msi_ok(rdev)) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { rdev->msi_enabled = 1; dev_info(rdev->ddev, "radeon: using MSI.\n"); } } #endif rdev->irq.installed = true; r = drm_irq_install(rdev->ddev); if (r) { rdev->irq.installed = false; return r; } DRM_DEBUG("radeon: irq initialized.\n"); return 0; }
void task_powerdown_irqh(uint8_t type, uint8_t * buff) { DEBUG("POWERDOWN IRQH %d %d\n\n", type, *buff); switch(type) { case(TASK_IRQ_BUTTON_M): if (*buff == BE_LONG) { powerdown_loop_break = true; task_set(TASK_ACTIVE); } break; case(TASK_IRQ_USB): uint8_t state = *buff; if (state == 1) { powerdown_loop_break = true; task_set(TASK_USB); } break; } }
int main(int argc, char **argv) { int *array; int array_size; int nThreads; int i; if (argc != 3) { printf("Usage:\n\t%s <array_size> <nthreads>\n", argv[0]); exit(1); } array_size = atoi(argv[1]); nThreads = atoi(argv[2]); assert(array_size > 0); #pragma css start(nThreads) /* alloc spaces for the option data */ #pragma css malloc array = (int*)malloc( array_size * sizeof(int) ); #pragma css sync ; #pragma css task inout(array[array_size]) in(array_size) task_init(array, array_size); #pragma css task inout(array[array_size]) in(array_size) task_set(array, array_size); #pragma css task in(array[array_size]) in(array_size) task_print(array, array_size); #pragma css task inout(array[array_size]) in(array_size) task_unset(array, array_size); #pragma css task in(array[array_size]) in(array_size) task_print(array, array_size); #pragma css finish return 0; }
void task_init() { task_timer_setup(); USB_CONNECTED_IRQ_ON; powerdown_lock.Unlock(); //if ftest is not done if (!cfg_factory_passed()) task_set(TASK_ACTIVE); //if is USB connected go directly to USB task if ((usb_state = USB_CONNECTED)) task_set(TASK_USB); wdt_init(wdt_2s); }
void cfg_reset_factory_test() { uint8_t ff_buffer[sizeof(cfg_ro_t)]; for (uint16_t i = 0; i < sizeof(cfg_ro_t); i++) ff_buffer[i] = 0xFF; eeprom_busy_wait(); eeprom_update_block(ff_buffer, &config_ro, sizeof(cfg_ro_t)); eeprom_busy_wait(); task_set(TASK_POWERDOWN); }
int main(int argc, char **argv) { int *array, *block; int array_size, block_size; int nThreads; int i; if (argc != 3) { printf("Usage:\n\t%s <array_size> <nthreads>\n", argv[0]); exit(1); } array_size = atoi(argv[1]); nThreads = atoi(argv[2]); block_size = array_size / nThreads; assert(array_size > 0); assert(block_size > 0); #pragma css start(nThreads) /* alloc spaces for the option data */ #pragma css malloc array = (int*)malloc( array_size * sizeof(int) ); #pragma css sync for (i = 0; i < nThreads; i++) { block = array + i * block_size; #pragma css task inout(block[block_size]) in(block_size) task_init(block, block_size); } for (i = 0; i < nThreads; i++) { block = array + i * block_size; #pragma css task inout(block[block_size]) in(block_size) task_set(block, block_size); } #pragma css task in(array[array_size]) in(array_size) task_print(array, array_size); /*#pragma css sync*/ for (i = 0; i < nThreads; i++) { block = array + i * block_size; #pragma css task inout(block[block_size]) in(block_size) task_unset(block, block_size); } #pragma css task in(array[array_size]) in(array_size) task_print(array, array_size); #pragma css finish return 0; } /* main */
void vdsp_alloc(void *arg1) { struct vdsp_softc *sc = arg1; struct vio_dring_reg dr; KASSERT(sc->sc_num_descriptors <= VDSK_MAX_DESCRIPTORS); KASSERT(sc->sc_descriptor_size <= VDSK_MAX_DESCRIPTOR_SIZE); sc->sc_vd = mallocarray(sc->sc_num_descriptors, sc->sc_descriptor_size, M_DEVBUF, M_WAITOK); sc->sc_vd_ring = mallocarray(sc->sc_num_descriptors, sizeof(*sc->sc_vd_ring), M_DEVBUF, M_WAITOK); task_set(&sc->sc_vd_task, vdsp_vd_task, sc); bzero(&dr, sizeof(dr)); dr.tag.type = VIO_TYPE_CTRL; dr.tag.stype = VIO_SUBTYPE_ACK; dr.tag.stype_env = VIO_DRING_REG; dr.tag.sid = sc->sc_local_sid; dr.dring_ident = ++sc->sc_dring_ident; vdsp_sendmsg(sc, &dr, sizeof(dr), 1); }
void viomb_attach(struct device *parent, struct device *self, void *aux) { struct viomb_softc *sc = (struct viomb_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; u_int32_t features; int i; if (vsc->sc_child != NULL) { printf("child already attached for %s; something wrong...\n", parent->dv_xname); return; } /* fail on non-4K page size archs */ if (VIRTIO_PAGE_SIZE != PAGE_SIZE){ printf("non-4K page size arch found, needs %d, got %d\n", VIRTIO_PAGE_SIZE, PAGE_SIZE); return; } sc->sc_virtio = vsc; vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE]; vsc->sc_nvqs = 0; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_config_change = viomb_config_change; vsc->sc_intrhand = virtio_vq_intr; /* negotiate features */ features = VIRTIO_F_RING_INDIRECT_DESC; features = virtio_negotiate_features(vsc, features, viomb_feature_names); if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0)) goto err; vsc->sc_nvqs++; if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0)) goto err; vsc->sc_nvqs++; sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr; sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr; virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]); virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]); viomb_read_config(sc); TAILQ_INIT(&sc->sc_balloon_pages); if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ, PR_NOWAIT|PR_ZERO)) == NULL) { printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ, 1, sizeof(u_int32_t) * PGS_PER_REQ, 0, BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) { printf("%s: dmamap creation failed.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap, &sc->sc_req.bl_pages[0], sizeof(uint32_t) * PGS_PER_REQ, NULL, BUS_DMA_NOWAIT)) { printf("%s: dmamap load failed.\n", DEVNAME(sc)); goto err_dmamap; } sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO); if (sc->sc_taskq == NULL) goto err_dmamap; task_set(&sc->sc_task, viomb_worker, sc, NULL); printf("\n"); return; err_dmamap: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap); err: if (sc->sc_req.bl_pages) dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ); for (i = 0; i < vsc->sc_nvqs; i++) virtio_free_vq(vsc, &sc->sc_vq[i]); vsc->sc_nvqs = 0; vsc->sc_child = VIRTIO_CHILD_ERROR; return; }
int qlw_attach(struct qlw_softc *sc) { struct scsibus_attach_args saa; void (*parse_nvram)(struct qlw_softc *, int); int reset_delay; int bus; task_set(&sc->sc_update_task, qlw_update_task, sc); switch (sc->sc_isp_gen) { case QLW_GEN_ISP1000: sc->sc_nvram_size = 0; break; case QLW_GEN_ISP1040: sc->sc_nvram_size = 128; sc->sc_nvram_minversion = 2; parse_nvram = qlw_parse_nvram_1040; break; case QLW_GEN_ISP1080: case QLW_GEN_ISP12160: sc->sc_nvram_size = 256; sc->sc_nvram_minversion = 1; parse_nvram = qlw_parse_nvram_1080; break; default: printf("unknown isp type\n"); return (ENXIO); } /* after reset, mbox registers 1-3 should contain the string "ISP " */ if (qlw_read_mbox(sc, 1) != 0x4953 || qlw_read_mbox(sc, 2) != 0x5020 || qlw_read_mbox(sc, 3) != 0x2020) { /* try releasing the risc processor */ qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE); } qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE); if (qlw_softreset(sc) != 0) { printf("softreset failed\n"); return (ENXIO); } for (bus = 0; bus < sc->sc_numbusses; bus++) qlw_init_defaults(sc, bus); if (qlw_read_nvram(sc) == 0) { for (bus = 0; bus < sc->sc_numbusses; bus++) parse_nvram(sc, bus); } #ifndef ISP_NOFIRMWARE if (sc->sc_firmware && qlw_load_firmware(sc)) { printf("firmware load failed\n"); return (ENXIO); } #endif /* execute firmware */ sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE; sc->sc_mbox[1] = QLW_CODE_ORG; if (qlw_mbox(sc, 0x0003, 0x0001)) { printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]); return (ENXIO); } delay(250000); /* from isp(4) */ sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE; if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN, QLW_MBOX_ABOUT_FIRMWARE_OUT)) { printf("ISP not talking after firmware exec: %x\n", sc->sc_mbox[0]); return (ENXIO); } /* The ISP1000 firmware we use doesn't return a version number. */ if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) { sc->sc_mbox[1] = 1; sc->sc_mbox[2] = 37; sc->sc_mbox[3] = 0; sc->sc_mbox[6] = 0; } printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc), sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]); /* work out how many ccbs to allocate */ sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS; if (qlw_mbox(sc, 0x0001, 0x0007)) { printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_maxrequests = sc->sc_mbox[2]; if (sc->sc_maxrequests > 512) sc->sc_maxrequests = 512; for (bus = 0; bus < sc->sc_numbusses; bus++) { if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests) sc->sc_max_queue_depth[bus] = sc->sc_maxrequests; } /* * On some 1020/1040 variants the response queue is limited to * 256 entries. We don't really need all that many anyway. */ sc->sc_maxresponses = sc->sc_maxrequests / 2; if (sc->sc_maxresponses < 64) sc->sc_maxresponses = 64; /* We may need up to 3 request entries per SCSI command. */ sc->sc_maxccbs = sc->sc_maxrequests / 3; /* Allegedly the FIFO is busted on the 1040A. */ if (sc->sc_isp_type == QLW_ISP1040A) sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK; qlw_write(sc, QLW_CFG1, sc->sc_isp_config); if (sc->sc_isp_config & QLW_BURST_ENABLE) qlw_dma_burst_enable(sc); sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES; sc->sc_mbox[1] = 0; if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY) sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY; if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) { printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE; sc->sc_mbox[1] = sc->sc_clock; if (qlw_mbox(sc, 0x0003, 0x0001)) { printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT; sc->sc_mbox[1] = sc->sc_retry_count[0]; sc->sc_mbox[2] = sc->sc_retry_delay[0]; sc->sc_mbox[6] = sc->sc_retry_count[1]; sc->sc_mbox[7] = sc->sc_retry_delay[1]; if (qlw_mbox(sc, 0x00c7, 0x0001)) { printf("couldn't set retry count: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP; sc->sc_mbox[1] = sc->sc_async_data_setup[0]; sc->sc_mbox[2] = sc->sc_async_data_setup[1]; if (qlw_mbox(sc, 0x0007, 0x0001)) { printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION; sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5; sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4; sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5; sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4; if (qlw_mbox(sc, 0x0007, 0x0001)) { printf("couldn't set active negation: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT; sc->sc_mbox[1] = sc->sc_tag_age_limit[0]; sc->sc_mbox[2] = sc->sc_tag_age_limit[1]; if (qlw_mbox(sc, 0x0007, 0x0001)) { printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]); return (ENXIO); } sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT; sc->sc_mbox[1] = sc->sc_selection_timeout[0]; sc->sc_mbox[2] = sc->sc_selection_timeout[1]; if (qlw_mbox(sc, 0x0007, 0x0001)) { printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]); return (ENXIO); } for (bus = 0; bus < sc->sc_numbusses; bus++) { if (qlw_config_bus(sc, bus)) return (ENXIO); } if (qlw_alloc_ccbs(sc)) { /* error already printed */ return (ENOMEM); } sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE; sc->sc_mbox[1] = sc->sc_maxrequests; qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests); sc->sc_mbox[4] = 0; if (qlw_mbox(sc, 0x00df, 0x0001)) { printf("couldn't init request queue: %x\n", sc->sc_mbox[0]); goto free_ccbs; } sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE; sc->sc_mbox[1] = sc->sc_maxresponses; qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses); sc->sc_mbox[5] = 0; if (qlw_mbox(sc, 0x00ef, 0x0001)) { printf("couldn't init response queue: %x\n", sc->sc_mbox[0]); goto free_ccbs; } reset_delay = 0; for (bus = 0; bus < sc->sc_numbusses; bus++) { sc->sc_mbox[0] = QLW_MBOX_BUS_RESET; sc->sc_mbox[1] = sc->sc_reset_delay[bus]; sc->sc_mbox[2] = bus; if (qlw_mbox(sc, 0x0007, 0x0001)) { printf("couldn't reset bus: %x\n", sc->sc_mbox[0]); goto free_ccbs; } sc->sc_marker_required[bus] = 1; sc->sc_update_required[bus] = 0xffff; if (sc->sc_reset_delay[bus] > reset_delay) reset_delay = sc->sc_reset_delay[bus]; } /* wait for the busses to settle */ delay(reset_delay * 1000000); /* we should be good to go now, attach scsibus */ for (bus = 0; bus < sc->sc_numbusses; bus++) { sc->sc_link[bus].adapter = &qlw_switch; sc->sc_link[bus].adapter_softc = sc; sc->sc_link[bus].adapter_target = sc->sc_initiator[bus]; sc->sc_link[bus].adapter_buswidth = QLW_MAX_TARGETS; sc->sc_link[bus].openings = sc->sc_max_queue_depth[bus]; sc->sc_link[bus].pool = &sc->sc_iopool; memset(&saa, 0, sizeof(saa)); saa.saa_sc_link = &sc->sc_link[bus]; /* config_found() returns the scsibus attached to us */ sc->sc_scsibus[bus] = (struct scsibus_softc *) config_found(&sc->sc_dev, &saa, scsiprint); qlw_update_bus(sc, bus); } sc->sc_running = 1; return(0); free_ccbs: qlw_free_ccbs(sc); return (ENXIO); }
void ActionWarehouse::printActionDependencySets() const { /** * Note: This routine uses the XTerm colors directly which is not advised for general purpose * output coloring. * Most users should prefer using Problem::colorText() which respects the "color_output" option * for terminals * that do not support coloring. Since this routine is intended for debugging only and runs * before several * objects exist in the system, we are just using the constants directly. */ std::ostringstream oss; const auto & ordered_names = _syntax.getSortedTaskSet(); for (const auto & task_vector : ordered_names) { oss << "[DBG][ACT] (" << COLOR_YELLOW; std::copy( task_vector.begin(), task_vector.end(), infix_ostream_iterator<std::string>(oss, ", ")); oss << COLOR_DEFAULT << ")\n"; std::set<std::string> task_set(task_vector.begin(), task_vector.end()); for (const auto & task : task_set) { if (_action_blocks.find(task) == _action_blocks.end()) continue; for (const auto & act : _action_blocks.at(task)) { // The Syntax of the Action if it exists if (act->name() != "") oss << "[DBG][ACT]\t" << COLOR_GREEN << act->name() << COLOR_DEFAULT << '\n'; // The task sets oss << "[DBG][ACT]\t" << act->type(); const std::set<std::string> tasks = act->getAllTasks(); if (tasks.size() > 1) { oss << " ("; // Break the current Action's tasks into 2 sets, those intersecting with current set and // then the difference. std::set<std::string> intersection, difference; std::set_intersection(tasks.begin(), tasks.end(), task_set.begin(), task_set.end(), std::inserter(intersection, intersection.end())); std::set_difference(tasks.begin(), tasks.end(), intersection.begin(), intersection.end(), std::inserter(difference, difference.end())); oss << COLOR_CYAN; std::copy(intersection.begin(), intersection.end(), infix_ostream_iterator<std::string>(oss, ", ")); oss << COLOR_MAGENTA << (difference.empty() ? "" : ", "); std::copy( difference.begin(), difference.end(), infix_ostream_iterator<std::string>(oss, ", ")); oss << COLOR_DEFAULT << ")"; } oss << '\n'; } } } if (_show_actions) _console << oss.str() << std::endl; }
void radeondrm_attachhook(struct device *self) { struct radeon_device *rdev = (struct radeon_device *)self; int r, acpi_status; /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = radeon_device_init(rdev, rdev->ddev); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); radeon_fatal_error = 1; radeondrm_forcedetach(rdev); return; } /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run */ r = radeon_modeset_init(rdev); if (r) dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); /* Call ACPI methods: require modeset init * but failure is not fatal */ if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) DRM_DEBUG("Error during ACPI methods call\n"); } { struct wsemuldisplaydev_attach_args aa; struct rasops_info *ri = &rdev->ro; task_set(&rdev->switchtask, radeondrm_doswitch, ri); if (ri->ri_bits == NULL) return; #ifdef __sparc64__ fbwscons_setcolormap(&rdev->sf, radeondrm_setcolor); #endif drm_modeset_lock_all(rdev->ddev); drm_fb_helper_restore_fbdev_mode((void *)rdev->mode_info.rfbdev); drm_modeset_unlock_all(rdev->ddev); #ifndef __sparc64__ ri->ri_flg = RI_CENTER | RI_VCONS | RI_WRONLY; rasops_init(ri, 160, 160); ri->ri_hw = rdev; #else ri = &rdev->sf.sf_ro; #endif radeondrm_stdscreen.capabilities = ri->ri_caps; radeondrm_stdscreen.nrows = ri->ri_rows; radeondrm_stdscreen.ncols = ri->ri_cols; radeondrm_stdscreen.textops = &ri->ri_ops; radeondrm_stdscreen.fontwidth = ri->ri_font->fontwidth; radeondrm_stdscreen.fontheight = ri->ri_font->fontheight; aa.console = rdev->console; aa.scrdata = &radeondrm_screenlist; aa.accessops = &radeondrm_accessops; aa.accesscookie = ri; aa.defaultscreens = 0; if (rdev->console) { long defattr; ri->ri_ops.alloc_attr(ri->ri_active, 0, 0, 0, &defattr); wsdisplay_cnattach(&radeondrm_stdscreen, ri->ri_active, ri->ri_ccol, ri->ri_crow, defattr); } /* * Now that we've taken over the console, disable decoding of * VGA legacy addresses, and opt out of arbitration. */ radeon_vga_set_state(rdev, false); pci_disable_legacy_vga(&rdev->dev); printf("%s: %dx%d\n", rdev->dev.dv_xname, ri->ri_width, ri->ri_height); config_found_sm(&rdev->dev, &aa, wsemuldisplaydevprint, wsemuldisplaydevsubmatch); } }
void vdsp_attach(struct device *parent, struct device *self, void *aux) { struct vdsp_softc *sc = (struct vdsp_softc *)self; struct cbus_attach_args *ca = aux; struct ldc_conn *lc; sc->sc_idx = ca->ca_idx; sc->sc_bustag = ca->ca_bustag; sc->sc_dmatag = ca->ca_dmatag; sc->sc_tx_ino = ca->ca_tx_ino; sc->sc_rx_ino = ca->ca_rx_ino; printf(": ivec 0x%llx, 0x%llx", sc->sc_tx_ino, sc->sc_rx_ino); mtx_init(&sc->sc_desc_mtx, IPL_BIO); /* * Un-configure queues before registering interrupt handlers, * such that we dont get any stale LDC packets or events. */ hv_ldc_tx_qconf(ca->ca_id, 0, 0); hv_ldc_rx_qconf(ca->ca_id, 0, 0); sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino, IPL_BIO, BUS_INTR_ESTABLISH_MPSAFE, vdsp_tx_intr, sc, sc->sc_dv.dv_xname); sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino, IPL_BIO, BUS_INTR_ESTABLISH_MPSAFE, vdsp_rx_intr, sc, sc->sc_dv.dv_xname); if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { printf(", can't establish interrupt\n"); return; } lc = &sc->sc_lc; lc->lc_id = ca->ca_id; lc->lc_sc = sc; lc->lc_reset = vdsp_ldc_reset; lc->lc_start = vdsp_ldc_start; lc->lc_rx_data = vdsp_rx_data; lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES); if (lc->lc_txq == NULL) { printf(", can't allocate tx queue\n"); return; } lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES); if (lc->lc_rxq == NULL) { printf(", can't allocate rx queue\n"); goto free_txqueue; } task_set(&sc->sc_open_task, vdsp_open, sc); task_set(&sc->sc_alloc_task, vdsp_alloc, sc); task_set(&sc->sc_close_task, vdsp_close, sc); task_set(&sc->sc_read_task, vdsp_read, sc); printf("\n"); return; #if 0 free_rxqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_rxq); #endif free_txqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_txq); }
void vdsp_rx_vio_dring_data(struct vdsp_softc *sc, struct vio_msg_tag *tag) { struct vio_dring_msg *dm = (struct vio_dring_msg *)tag; struct vd_desc *vd; struct task *task; vaddr_t va; paddr_t pa; uint64_t size, off; psize_t nbytes; int err; switch(tag->stype) { case VIO_SUBTYPE_INFO: DPRINTF(("DATA/INFO/DRING_DATA\n")); if (dm->dring_ident != sc->sc_dring_ident || dm->start_idx >= sc->sc_num_descriptors) { dm->tag.stype = VIO_SUBTYPE_NACK; vdsp_sendmsg(sc, dm, sizeof(*dm), 0); return; } off = dm->start_idx * sc->sc_descriptor_size; vd = (struct vd_desc *)(sc->sc_vd + off); va = (vaddr_t)vd; size = sc->sc_descriptor_size; while (size > 0) { pmap_extract(pmap_kernel(), va, &pa); nbytes = MIN(size, PAGE_SIZE - (off & PAGE_MASK)); err = hv_ldc_copy(sc->sc_lc.lc_id, LDC_COPY_IN, sc->sc_dring_cookie.addr + off, pa, nbytes, &nbytes); if (err != H_EOK) { printf("%s: hv_ldc_copy %d\n", __func__, err); return; } va += nbytes; size -= nbytes; off += nbytes; } task = &sc->sc_vd_task[dm->start_idx]; DPRINTF(("%s: start_idx %d, end_idx %d, operation %x\n", sc->sc_dv.dv_xname, dm->start_idx, dm->end_idx, vd->operation)); switch (vd->operation) { case VD_OP_BREAD: task_set(task, vdsp_read_dring, sc, vd); break; case VD_OP_BWRITE: task_set(task, vdsp_write_dring, sc, vd); break; case VD_OP_FLUSH: task_set(task, vdsp_flush_dring, sc, vd); break; case VD_OP_GET_VTOC: task_set(task, vdsp_get_vtoc, sc, vd); break; case VD_OP_SET_VTOC: task_set(task, vdsp_set_vtoc, sc, vd); break; case VD_OP_GET_DISKGEOM: task_set(task, vdsp_get_diskgeom, sc, vd); break; case VD_OP_GET_WCE: case VD_OP_SET_WCE: case VD_OP_GET_DEVID: /* * Solaris issues VD_OP_GET_DEVID despite the * fact that we don't advertise it. It seems * to be able to handle failure just fine, so * we silently ignore it. */ task_set(task, vdsp_unimp, sc, vd); break; default: printf("%s: unsupported operation 0x%02x\n", sc->sc_dv.dv_xname, vd->operation); task_set(task, vdsp_unimp, sc, vd); break; } task_add(systq, task); break; case VIO_SUBTYPE_ACK: DPRINTF(("DATA/ACK/DRING_DATA\n")); break; case VIO_SUBTYPE_NACK: DPRINTF(("DATA/NACK/DRING_DATA\n")); break; default: DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); break; } }