psm2_error_t ips_tid_init(const psmi_context_t *context, struct ips_protoexp *protoexp, ips_tid_avail_cb_fn_t cb, void *cb_context) { const struct hfi1_user_info_dep *user_info = &context->user_info; const struct hfi1_base_info *base_info = &context->ctrl->base_info; const struct hfi1_ctxt_info *ctxt_info = &context->ctrl->ctxt_info; struct ips_tid *tidc = &protoexp->tidc; struct psmi_stats_entry entries[] = { PSMI_STATS_DECL("tid update count", MPSPAWN_STATS_REDUCTION_ALL, NULL, &tidc->tid_num_total), }; tidc->context = context; tidc->protoexp = protoexp; tidc->tid_num_total = 0; tidc->tid_num_inuse = 0; tidc->tid_avail_cb = cb; tidc->tid_avail_context = cb_context; tidc->tid_array = NULL; tidc->invalidation_event = (uint64_t *) (ptrdiff_t) base_info->events_bufbase; /* * PSM uses tid registration caching only if driver has enabled it. */ if (!(tidc->context->runtime_flags & HFI1_CAP_TID_UNMAP)) { int i; cl_qmap_t *p_map; cl_map_item_t *root,*nil_item; tidc->tid_array = (uint32_t *) psmi_calloc(context->ep, UNDEFINED, context->ctrl->__hfi_tidexpcnt, sizeof(uint32_t)); if (tidc->tid_array == NULL) return PSM2_NO_MEMORY; /* * first is root node, last is terminator node. */ p_map = &tidc->tid_cachemap; root = (cl_map_item_t *) psmi_calloc(context->ep, UNDEFINED, context->ctrl->__hfi_tidexpcnt + 2, sizeof(cl_map_item_t)); if (root == NULL) return PSM2_NO_MEMORY; nil_item = &root [context->ctrl->__hfi_tidexpcnt + 1]; ips_tidcache_map_init(p_map,root,nil_item); NTID = 0; NIDLE = 0; IPREV(IHEAD) = INEXT(IHEAD) = IHEAD; for (i = 1; i <= context->ctrl->__hfi_tidexpcnt; i++) { INVALIDATE(i) = 1; } /* * if not shared context, all tids are used by the same * process. Otherwise, subcontext process can only cache * its own portion. Driver makes the same tid number * assignment to subcontext processes. */ tidc->tid_cachesize = context->ctrl->__hfi_tidexpcnt; if (user_info->subctxt_cnt > 0) { uint16_t remainder = tidc->tid_cachesize % user_info->subctxt_cnt; tidc->tid_cachesize /= user_info->subctxt_cnt; if (ctxt_info->subctxt < remainder) tidc->tid_cachesize++; } } /* * Setup shared control structure. */ tidc->tid_ctrl = (struct ips_tid_ctrl *)context->tid_ctrl; if (!tidc->tid_ctrl) { tidc->tid_ctrl = (struct ips_tid_ctrl *) psmi_calloc(context->ep, UNDEFINED, 1, sizeof(struct ips_tid_ctrl)); if (tidc->tid_ctrl == NULL) { return PSM2_NO_MEMORY; } } /* * Only the master process can initialize. */ if (ctxt_info->subctxt == 0) { pthread_spin_init(&tidc->tid_ctrl->tid_ctrl_lock, PTHREAD_PROCESS_SHARED); tidc->tid_ctrl->tid_num_max = context->ctrl->__hfi_tidexpcnt; tidc->tid_ctrl->tid_num_avail = tidc->tid_ctrl->tid_num_max; } return psmi_stats_register_type(PSMI_STATS_NO_HEADING, PSMI_STATSTYPE_TIDS, entries, PSMI_STATS_HOWMANY(entries), tidc); }
static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; unsigned long bucket; struct pt_regs *regs = task_pt_regs(target); if (!regs) return -EIO; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s->r00, 0, 32*sizeof(unsigned long)); #define INEXT(KPT_REG, USR_REG) \ if (!ret) \ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \ KPT_REG, offsetof(struct user_regs_struct, USR_REG), \ offsetof(struct user_regs_struct, USR_REG) + \ sizeof(unsigned long)); /* Must be exactly same sequence as struct user_regs_struct */ INEXT(®s->sa0, sa0); INEXT(®s->lc0, lc0); INEXT(®s->sa1, sa1); INEXT(®s->lc1, lc1); INEXT(®s->m0, m0); INEXT(®s->m1, m1); INEXT(®s->usr, usr); INEXT(®s->preds, p3_0); INEXT(®s->gp, gp); INEXT(®s->ugp, ugp); INEXT(&pt_elr(regs), pc); /* CAUSE and BADVA aren't writeable. */ INEXT(&bucket, cause); INEXT(&bucket, badva); /* Ignore the rest, if needed */ if (!ret) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, offsetof(struct user_regs_struct, pad1), -1); if (ret) return ret; /* * This is special; SP is actually restored by the VM via the * special event record which is set by the special trap. */ regs->hvmer.vmpsp = regs->r29; return 0; }