/* * Reload the blade's kernel context into a GRU chiplet. Called holding * the bs_kgts_sema for READ. Will steal user contexts if necessary. */ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) { struct gru_state *gru; struct gru_thread_state *kgts; void *vaddr; int ctxnum, ncpus; up_read(&bs->bs_kgts_sema); down_write(&bs->bs_kgts_sema); if (!bs->bs_kgts) bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); kgts = bs->bs_kgts; if (!kgts->ts_gru) { STAT(load_kernel_context); ncpus = uv_blade_nr_possible_cpus(blade_id); kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU( GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs); kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( GRU_NUM_KERNEL_DSR_BYTES * ncpus + bs->bs_async_dsr_bytes); while (!gru_assign_gru_context(kgts, blade_id)) { msleep(1); gru_steal_context(kgts, blade_id); } gru_load_context(kgts); gru = bs->bs_kgts->ts_gru; vaddr = gru->gs_gru_base_vaddr; ctxnum = kgts->ts_ctxnum; bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0); bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0); } downgrade_write(&bs->bs_kgts_sema); }
int gru_kservices_init(struct gru_state *gru) { struct gru_blade_state *bs; struct gru_context_configuration_handle *cch; unsigned long cbr_map, dsr_map; int err, num, cpus_possible; /* * Currently, resources are reserved ONLY on the second chiplet * on each blade. This leaves ALL resources on chiplet 0 available * for user code. */ bs = gru->gs_blade; if (gru != &bs->bs_grus[1]) return 0; cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id); num = GRU_NUM_KERNEL_CBR * cpus_possible; cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL); gru->gs_reserved_cbrs += num; num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible; dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL); gru->gs_reserved_dsr_bytes += num; gru->gs_active_contexts++; __set_bit(KERNEL_CTXNUM, &gru->gs_context_map); cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); lock_cch_handle(cch); cch->tfm_fault_bit_enable = 0; cch->tlb_int_enable = 0; cch->tfm_done_bit_enable = 0; cch->unmap_enable = 1; err = cch_allocate(cch, 0, cbr_map, dsr_map); if (err) { gru_dbg(grudev, "Unable to allocate kernel CCH: gru %d, err %d\n", gru->gs_gid, err); BUG(); } if (cch_start(cch)) { gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n", gru->gs_gid, err); BUG(); } unlock_cch_handle(cch); if (gru_options & GRU_QUICKLOOK) quicktest(gru); return 0; }
/* * Lock previous reserved async GRU resources * * input: * han - handle to identify resources * output: * cb - pointer to first CBR * dsr - pointer to first DSR */ void gru_lock_async_resource(unsigned long han, void **cb, void **dsr) { struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); int blade_id = ASYNC_HAN_TO_BID(han); int ncpus; gru_lock_kernel_context(blade_id); ncpus = uv_blade_nr_possible_cpus(blade_id); if (cb) *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; if (dsr) *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES; }