static PyObject *Scopeable_addScopeMatchers(PyObject *self, PyObject *args) { PyObject *matchers; int len=PyTuple_Size(args)-1; int i; if (!(self=getSelf(args))) { return NULL; } matchers=PyObject_GetAttrString(self, MATCHERS); REFCNT("matchers", matchers); if (!PyList_Check(matchers)) { PyErr_SetString(PyExc_TypeError, "internal type error"); Py_XDECREF(matchers); return NULL; } if (len) { for (i=0;i<len;i++) { PyObject *newMatcher=PySequence_GetItem(args, i+1); PyList_Append(matchers, newMatcher); } PyList_Sort(matchers); } Py_DECREF(matchers); REFCNT("matchers", matchers); Py_INCREF(Py_None); return Py_None; }
static PyObject *Scopeable_mergeDefaults(PyObject *self, PyObject *args, PyObject *kwargs) { int len, i; len=PyTuple_Size(args); if (!(self=getSelf(args))) { return NULL; } REFCNT("self", self); for (i=1;i<len;i++) { PyObject *dict=PySequence_GetItem(args, i); if (!PyDict_Check(dict)) { PyErr_SetString(PyExc_TypeError, "Scopeable.mergeDefaults: expected a dictionary"); return NULL; } REFCNT("dict", dict); _mergeDefaults(self, dict); Py_DECREF(dict); REFCNT("dict", dict); } if (kwargs && PyObject_IsTrue(kwargs)) { _mergeDefaults(self, kwargs); } REFCNT("self", self); Py_INCREF(Py_None); return Py_None; }
static void _mergeDefaults(PyObject *self, PyObject *dict) { PyObject *dictList=PyObject_GetAttrString(self, DICTLIST); PyObject *lastDict; PyObject *key, *value; int len, lastIndex; int pos=0; if (!PyList_Check(dictList)) { PyErr_SetString(PyExc_TypeError, "dictList is supposed to be a list!"); return; } #ifdef MYDEBUG printf("in _mergeDefaults\n"); #endif if (!(len=PyList_GET_SIZE(dictList))){ PyObject *newDict=PyDict_New(); REFCNT("new dictionary", newDict); PyList_Append(dictList, newDict); Py_DECREF(newDict); /* ATC */ len++; } lastIndex=len-1; lastDict=PyList_GET_ITEM(dictList, lastIndex); assert(PyDict_Check(lastDict)); assert(PyDict_Check(dict)); while (PyDict_Next(lastDict, &pos, &key, &value)) { /* Py_INCREF(key); */ /* Py_INCREF(value); */ PyDict_SetItem(dict, key, value); } Py_INCREF(dict); PyList_SetItem(dictList, lastIndex, dict); Py_DECREF(dictList); REFCNT("dictList", dictList); }
static PyObject *Scopeable__getattr__(PyObject *self, PyObject *args) { char *attrname; PyObject *val=NULL, *inDict, *masher; if (!PyArg_ParseTuple(args, "Os", &self, &attrname)) { return NULL; } inDict=((PyInstanceObject *)self)->in_dict; masher=PyMapping_GetItemString(inDict, MASH); if (PyObject_IsTrue(masher) && PyMapping_HasKeyString(masher, attrname)) { val=PyMapping_GetItemString(masher, attrname); } else { PyObject *dictList; int len, i; dictList=PyMapping_GetItemString(inDict, DICTLIST); len=PyList_Size(dictList); for (i=0;i<len;i++) { PyObject *d=PyList_GetItem(dictList, i); if (PyMapping_HasKeyString(d, attrname)) { val=PyMapping_GetItemString(d, attrname); break; } } Py_DECREF(dictList); } if (val==NULL) { /* check for special attribute "__all__" */ char *allattr="__all__"; if (!strncmp(attrname, allattr, strlen(allattr))) { PyObject *mashed, *keys, *newargs; newargs=PyTuple_New(1); PyTuple_SetItem(newargs, 0, self); REFCNT("self", self); mashed=Scopeable_mash(NULL, newargs); keys=PyMapping_Keys(mashed); REFCNT("keys", keys); Py_DECREF(mashed); val=keys; } else { PyErr_SetString(PyExc_AttributeError, attrname); } } Py_DECREF(masher); return val; }
psm2_error_t ips_tidcache_release(struct ips_tid *tidc, uint32_t *tid_array, uint32_t tidcnt) { cl_qmap_t *p_map = &tidc->tid_cachemap; uint32_t i, j, idx; psm2_error_t err; psmi_assert(tidcnt > 0); j = 0; for (i = 0; i < tidcnt; i++) { /* * Driver only returns tidctrl=1 or tidctrl=2. */ idx = 2*IPS_TIDINFO_GET_TID(tid_array[i]) + IPS_TIDINFO_GET_TIDCTRL(tid_array[i]); psmi_assert(idx != 0); psmi_assert(idx <= tidc->tid_ctrl->tid_num_max); psmi_assert(REFCNT(idx) != 0); REFCNT(idx)--; if (REFCNT(idx) == 0) { if (INVALIDATE(idx) != 0) { ips_cl_qmap_remove_item(p_map, &p_map->root[idx]); tidc->tid_array[j] = tid_array[i]; j++; } else { IDLE_INSERT(idx); } } } if (j > 0) { /* * call driver to free the tids. */ if (hfi_free_tid(tidc->context->ctrl, (uint64_t) (uintptr_t) tidc->tid_array, j) < 0) { /* If failed to unpin pages, it's fatal error */ err = psmi_handle_error(tidc->context->ep, PSM2_EP_DEVICE_FAILURE, "Failed to tid free %d tids", j); return err; } } return PSM2_OK; }
/* * * Call driver to free all cached tids. */ psm2_error_t ips_tidcache_cleanup(struct ips_tid *tidc) { cl_qmap_t *p_map = &tidc->tid_cachemap; psm2_error_t err; int i, j; j = 0; for (i = 1; i <= tidc->tid_ctrl->tid_num_max; i++) { psmi_assert(REFCNT(i) == 0); if (INVALIDATE(i) == 0) { tidc->tid_array[j++] = p_map->root[i].payload.tidinfo; } } if (j > 0) { /* * call driver to free the tids. */ if (hfi_free_tid(tidc->context->ctrl, (uint64_t) (uintptr_t) tidc->tid_array, j) < 0) { /* If failed to unpin pages, it's fatal error */ err = psmi_handle_error(tidc->context->ep, PSM2_EP_DEVICE_FAILURE, "Failed to tid free %d tids", j); return err; } } psmi_free(tidc->tid_array); psmi_free(tidc->tid_cachemap.root); return PSM2_OK; }
/* * * Force to remove a tid, check invalidation event afterwards. */ static psm2_error_t ips_tidcache_remove(struct ips_tid *tidc, uint32_t tidcnt) { cl_qmap_t *p_map = &tidc->tid_cachemap; uint32_t idx; psm2_error_t err; /* * call driver to free the tids. */ if (hfi_free_tid(tidc->context->ctrl, (uint64_t) (uintptr_t) tidc->tid_array, tidcnt) < 0) { /* If failed to unpin pages, it's fatal error */ err = psmi_handle_error(tidc->context->ep, PSM2_EP_DEVICE_FAILURE, "Failed to tid free %d tids", 1); return err; } while (tidcnt) { tidcnt--; idx = 2*IPS_TIDINFO_GET_TID(tidc->tid_array[tidcnt]) + IPS_TIDINFO_GET_TIDCTRL(tidc->tid_array[tidcnt]); /* * sanity check. */ psmi_assert(idx != 0); psmi_assert(idx <= tidc->tid_ctrl->tid_num_max); psmi_assert(INVALIDATE(idx) == 0); psmi_assert(REFCNT(idx) == 0); /* * mark the tid invalidated. */ INVALIDATE(idx) = 1; /* * remove the tid from RB tree. */ IDLE_REMOVE(idx); ips_cl_qmap_remove_item(p_map, &p_map->root[idx]); } /* * Because the freed tid is not from invalidation list, * it is possible that kernel just invalidated the tid, * then we need to check and process the invalidation * before we can re-use this tid. The reverse order * will wrongly invalidate this tid again. */ if ((*tidc->invalidation_event) & HFI1_EVENT_TID_MMU_NOTIFY) { err = ips_tidcache_invalidation(tidc); if (err) return err; } return PSM2_OK; }
static PyObject *Scopeable_init(PyObject *self, PyObject *args) { PyObject *dict=NULL; PyObject *dictList=PyList_New(0); PyObject *matchers=PyList_New(0); PyObject *currentScopes=PyDict_New(); int alloced = 0; if (!PyArg_ParseTuple(args, "O|O", &self, &dict)) { return NULL; } if (dict == NULL) { alloced = 1; dict=PyDict_New(); } else if (!PyDict_Check(dict)) { PyErr_SetString(PyExc_TypeError, "a dictionary was expected"); return NULL; } /* else { */ /* Py_INCREF(dict); */ /* } */ PyList_Append(dictList, dict); PyObject_SetAttrString(self, DICTLIST, dictList); PyObject_SetAttrString(self, MATCHERS, matchers); PyObject_SetAttrString(self, CURRENT_SCOPES, currentScopes); /* ATC */ Py_DECREF(dictList); Py_DECREF(matchers); Py_DECREF(currentScopes); if (alloced) {Py_DECREF(dict);} /* /ATC */ Py_INCREF(Py_None); PyObject_SetAttrString(self, MASH, Py_None); REFCNT("self", self); REFCNT("dictList", dictList); REFCNT("matchers", matchers); REFCNT("currentScopes", currentScopes); Py_INCREF(Py_None); return Py_None; }
static PyObject *Scopeable_mash(PyObject *self, PyObject *args) { PyObject *nd, *dictList, *dlCopy; int i,len; if (!PyArg_ParseTuple(args, "O", &self)) { return NULL; } REFCNT("self", self); nd=PyDict_New(); dictList=PyObject_GetAttrString(self, DICTLIST); len=PyList_Size(dictList); dlCopy=PyList_GetSlice(dictList, 0, len); for (i=len-1;i>=0;i--) { PyObject *d, *key, *value; int pos=0; d=PyList_GetItem(dlCopy, i); if (!PyDict_Check(d)) { PyErr_SetString(PyExc_TypeError, "dictionary expected inside dictList"); Py_DECREF(dictList); Py_DECREF(nd); return NULL; } while (PyDict_Next(d, &pos, &key, &value)) { /* Py_INCREF(key); */ /* Py_INCREF(value); */ PyDict_SetItem(nd, key, value); } } Py_DECREF(dictList); Py_DECREF(dlCopy); REFCNT("dictList", dictList); REFCNT("nd", nd); return nd; }
static PyObject *_defaults(PyObject *self) { PyObject *inDict, *dictList, *defaults; int len; inDict=((PyInstanceObject *)self)->in_dict; dictList=PyMapping_GetItemString(inDict, DICTLIST); if (!PyList_Check(dictList)) { return NULL; } len=PyList_Size(dictList); if (len) { defaults=PySequence_GetItem(dictList, len-1); REFCNT("defaults", defaults); return defaults; } Py_INCREF(Py_None); return Py_None; }
/* i_ig_color: color the interference graph g for storage class (INT/FP) sc */ void i_ig_color (int sc) { ig_t g, g2; /* Backup of interference graph */ i_local_t *nstk; /* Node stack: when removing a node from IG, push it onto this stack */ unsigned int ncnt = 0; /* Counter for node stack */ unsigned int nnodes; /* No. of nodes left in IG when coloring */ unsigned int nremoved; /* No. of nodes removed in 1 coloring pass */ i_local_t i, j; /* Dummy variable indices / counters */ unsigned int k; unsigned int reg; /* Temporary color variable */ unsigned int ncolors; /* Number of available registers */ /* Data for prioritized spilling */ i_ref_t threshref = 0.; /* Threshold refcnt (see below) */ i_local_t *deferspill = 0; /* Maintains info of which spills have been deferred */ i_local_t Vnum, Vbase; /* Number of vars of sclass sc, and their base offset in i_locals */ #ifndef NDEBUG int dcnt = 0; #endif if (sc == FP) { g = fig; Vnum = num_f; Vbase = i_loc_ilim; } else { g = iig; Vnum = num_i; Vbase = 0; } ncolors = i_reg_navail(sc);/* Find number of available registers */ NEW(nstk, Vnum); /* Allocate register stack */ g2 = i_igcopy(g); /* Make backup copy of IG */ NEW0(removed, Vnum); /* Mark whether a node has been removed */ DEBUG(i_igunparse(g)); if (i_dorefcnt) { /* Find maxref */ i_ref_t t = 0.; unsigned n = 0; NEW0(deferspill, Vnum); for (i = 0; i < Vnum; i++) { j = i + Vbase; /* Get real index of this variable */ if (! (PARAMP(j) || SCLASS(j) == STACK)) { t += REFCNT(j); n++; } } threshref = t/(n*2); /* For any clique (V,E) where |V| > # regs, spill first all v with refcnt < threshref; this is a little bogus (the right way would be to sort the v \in V and spill in order of increasing refcnt), but fast, and it works pretty well. */ } /* * Dismantle interference graph */ do { nremoved = 0; /* Reset number of nodes removed */ nnodes = 0; /* And number of nodes remaining */ /* Loop over all variables (ie. nodes in IG) */ for (i = 0; i < Vnum; i++) { if (removed[i]) continue; j = i + Vbase; /* Skip params: already allocated */ if (PARAMP(j)) { i_igremove(i, g); continue; } /* Remove non-register candidates */ if (SCLASS(j) == STACK) { i_igspill(j, i, g); continue; } /* Find valence of node i */ k = bv_num(row(i, g)); if (k == 0) { removed[i] = 1; continue; } if (k <= ncolors) { /* Valence <= number of colors: can color. Store index of node i for later use */ push(i, nstk, ncnt); /* Remove node i from graph */ i_igremove(i, g); ++nremoved; /* Increase the counter of removed nodes */ } else ++nnodes; /* Can't color: increase number of nodes left in graph */ } } while (nnodes > 0 && nremoved > 0); while (nnodes > 0) { /* Try nodes with high valence/low refcount */ i_local_t target = i_zero; unsigned int mc = 0; #ifndef NDEBUG if (i_debug) { printf("Initial spill loop, #%d (nnodes=%d,dorefcnt=%d)\n", dcnt++, nnodes, i_dorefcnt); } #endif for (i = 0; i < Vnum; i++) { if (removed[i]) continue; j = i + Vbase; /* Find valence of node i */ k = bv_num(row(i, g)); assert(k); if (k <= ncolors) { /* Can color: evidently we removed some of this node's neighbors on a previous iteration of this inner loop */ /* Store index of node i for later use */ push(i, nstk, ncnt); /* Remove node i from graph */ i_igremove(i, g); --nnodes; /* We have removed a node */ if (!nnodes) break; } else if (REFCNT(j) < 2*threshref && k > mc) { mc = k; target = i; } } if (nnodes) if (target != i_zero) { DEBUG(printf("Spilling(1) %d[n=%d,d=%d,k=%d,c=%f,t=%f]\n",\ target, nnodes, deferspill[target], mc, \ REFCNT(target+Vbase), threshref)); i_igspill(target+Vbase, target, g); DEBUG(i_igunparse(g)); nnodes--; } else { /* Found nothing to spill here */ assert(target == i_zero); break; } } DEBUG(dcnt = 0); while (nnodes > 0) { /* Final, aggressive spill loop */ #ifndef NDEBUG if (i_debug) { printf("Final spill loop, #%d (nnodes=%d)\n", dcnt++, nnodes); } #endif for (i = 0; i < Vnum; i++) { if (removed[i]) continue; j = i + Vbase; /* Find valence of node i */ k = bv_num(row(i, g)); assert(k); if (k <= ncolors) { /* Can color: evidently we removed some of this node's neighbors on a previous iteration of this inner loop */ /* Store index of node i for later use */ push(i, nstk, ncnt); /* Remove node i from graph */ i_igremove(i, g); } else if (!i_dorefcnt || deferspill[i] > 1 || REFCNT(j) < 3*threshref) { /* Small refcnt or already deferred: spill. */ DEBUG(printf("Spilling(2) %d[n=%d,d=%d,k=%d,c=%f,t=%f]\n",\ i, nnodes, deferspill[i], k, \ REFCNT(j), threshref)); i_igspill(j, i, g); DEBUG(i_igunparse(g)); } else { /* This node is referenced a lot; try deferring the spill. */ ++deferspill[i]; continue; /* Do not remove this node, yet */ } --nnodes; /* We have removed a node */ } } /* * Rebuild interference graph, coloring nodes */ while (ncnt) { /* Find a new node to add back to the IG */ pop(i, nstk, ncnt); /* Make unavailable the registers of all nodes adjacent to node i in the IG */ bv_eachbit(row(i, g2), i_rmreg, (void *)Vbase); reg = i_reg_get(sc); /* Get an int register */ j = i + Vbase; ADDR(j) = reg; SCLASS(j) = REGISTER; /* Set storage location info */ i_reg_mask(sc); /* Reset the set of available registers */ } i_reg_resetmask(sc); }
psm2_error_t ips_tidcache_acquire(struct ips_tid *tidc, const void *buf, uint32_t *length, uint32_t *tid_array, uint32_t *tidcnt, uint32_t *tidoff) { cl_qmap_t *p_map = &tidc->tid_cachemap; cl_map_item_t *p_item; unsigned long start = (unsigned long)buf; unsigned long end = start + (*length); uint32_t idx, nbytes; psm2_error_t err; /* * Before every tid caching search, we need to update the * tid caching if there is invalidation event, otherwise, * the cached address may be invalidated and we might have * wrong matching. */ if ((*tidc->invalidation_event) & HFI1_EVENT_TID_MMU_NOTIFY) { err = ips_tidcache_invalidation(tidc); if (err) return err; } /* * Now we can do matching from the caching, because obsolete * address in caching has been removed or identified. */ retry: p_item = ips_cl_qmap_search(p_map, start, end); idx = 2*IPS_TIDINFO_GET_TID(p_item->payload.tidinfo) + IPS_TIDINFO_GET_TIDCTRL(p_item->payload.tidinfo); /* * There is tid matching. */ if (idx) { /* * if there is a caching match, but the tid has been * invalidated, we can't match this tid, and we also * can't register this address, we need to wait this * tid to be freed. */ if (INVALIDATE(idx) != 0) return PSM2_OK_NO_PROGRESS; /* * if the page offset within the tid is not less than * 128K, the address offset within the page is not 64B * multiple, PSM can't handle this tid with any offset * mode. We need to free this tid and re-register with * the asked page address. */ if (((start - START(idx)) >= 131072) && ((*tidoff) & 63)) { /* * If the tid is currently used, retry later. */ if (REFCNT(idx) != 0) return PSM2_OK_NO_PROGRESS; /* * free this tid. */ tidc->tid_array[0] = p_map->root[idx].payload.tidinfo; err = ips_tidcache_remove(tidc, 1); if (err) return err; /* try to match a node again */ goto retry; } } /* * If there is no match node, or 'start' falls out of node range, * whole or partial buffer from 'start' is not registered yet. */ if (!idx || START(idx) > start) { if (!idx) nbytes = end - start; else nbytes = START(idx) - start; /* * Because we don't have any match tid yet, if * there is an error, we return from here, PSM * will try later. */ err = ips_tidcache_register(tidc, start, nbytes, &idx); if (err) return err; } /* * sanity check. */ psmi_assert(START(idx) <= start); psmi_assert(INVALIDATE(idx) == 0); *tidoff += start - START(idx); *tidcnt = 1; tid_array[0] = p_map->root[idx].payload.tidinfo; REFCNT(idx)++; if (REFCNT(idx) == 1) IDLE_REMOVE(idx); start = END(idx); while (start < end) { p_item = ips_cl_qmap_successor(p_map, &p_map->root[idx]); idx = 2*IPS_TIDINFO_GET_TID(p_item->payload.tidinfo) + IPS_TIDINFO_GET_TIDCTRL(p_item->payload.tidinfo); if (!idx || START(idx) != start) { if (!idx) nbytes = end - start; else nbytes = (START(idx) > end) ? (end - start) : (START(idx) - start); /* * Because we already have at least one match tid, * if it is error to register new pages, we break * here and return the tids we already have. */ err = ips_tidcache_register(tidc, start, nbytes, &idx); if (err) break; } else if (INVALIDATE(idx) != 0) { /* * the tid has been invalidated, it is still in * caching because it is still being used, but * any new usage is not allowed, we ignore it and * return the tids we already have. */ psmi_assert(REFCNT(idx) != 0); break; } /* * sanity check. */ psmi_assert(START(idx) == start); psmi_assert(INVALIDATE(idx) == 0); tid_array[(*tidcnt)++] = p_map->root[idx].payload.tidinfo; REFCNT(idx)++; if (REFCNT(idx) == 1) IDLE_REMOVE(idx); start = END(idx); } if (start < end) *length = start - (unsigned long)buf; /* otherwise, all pages are registered */ psmi_assert((*tidcnt) > 0); return PSM2_OK; }
/* * Get mmu notifier invalidation info and update PSM's caching. */ psm2_error_t ips_tidcache_invalidation(struct ips_tid *tidc) { cl_qmap_t *p_map = &tidc->tid_cachemap; uint32_t i, j, idx, tidcnt; psm2_error_t err; /* * get a list of invalidated tids from driver, * driver will clear the event bit before return. */ tidcnt = 0; if (hfi_get_invalidation(tidc->context->ctrl, (uint64_t) (uintptr_t) tidc->tid_array, &tidcnt) < 0) { /* If failed to get invalidation info, it's fatal error */ err = psmi_handle_error(tidc->context->ep, PSM2_EP_DEVICE_FAILURE, "Failed to get invalidation info"); return err; } psmi_assert(tidcnt > 0 && tidcnt <= tidc->tid_ctrl->tid_num_max); j = 0; for (i = 0; i < tidcnt; i++) { /* * Driver only returns tidctrl=1 or tidctrl=2. */ idx = 2*IPS_TIDINFO_GET_TID(tidc->tid_array[i]) + IPS_TIDINFO_GET_TIDCTRL(tidc->tid_array[i]); psmi_assert(idx != 0); psmi_assert(idx <= tidc->tid_ctrl->tid_num_max); /* * sanity check. */ psmi_assert(p_map->root[idx].payload.tidinfo == tidc->tid_array[i]); psmi_assert(LENGTH(idx) == IPS_TIDINFO_GET_LENGTH(tidc->tid_array[i])); /* * if the tid is already invalidated, ignore it, * but do sanity check. */ if (INVALIDATE(idx) != 0) { psmi_assert(REFCNT(idx) == 0); continue; } /* * mark the tid invalidated. */ INVALIDATE(idx) = 1; /* * if the tid is idle, remove the tid from RB tree * and idle queue, put on free list. */ if (REFCNT(idx) == 0) { IDLE_REMOVE(idx); ips_cl_qmap_remove_item(p_map, &p_map->root[idx]); if (i != j) tidc->tid_array[j] = tidc->tid_array[i]; j++; } } if (j > 0) { /* * call driver to free the tids. */ if (hfi_free_tid(tidc->context->ctrl, (uint64_t) (uintptr_t) tidc->tid_array, j) < 0) { /* If failed to unpin pages, it's fatal error */ err = psmi_handle_error(tidc->context->ep, PSM2_EP_DEVICE_FAILURE, "Failed to tid free %d tids", j); return err; } } return PSM2_OK; }
/* * Register a new buffer with driver, and cache the tidinfo. */ static psm2_error_t ips_tidcache_register(struct ips_tid *tidc, unsigned long start, uint32_t length, uint32_t *firstidx) { cl_qmap_t *p_map = &tidc->tid_cachemap; uint32_t tidoff, tidlen; uint32_t idx, tidcnt; psm2_error_t err; /* * make sure we have at least one free tid to * register the new buffer. */ if (NTID == tidc->tid_cachesize) { /* all tids are in active use, error? */ if (NIDLE == 0) return PSM2_OK_NO_PROGRESS; /* * free the first tid in idle queue. */ idx = IPREV(IHEAD); tidc->tid_array[0] = p_map->root[idx].payload.tidinfo; err = ips_tidcache_remove(tidc, 1); if (err) return err; } psmi_assert(NTID < tidc->tid_cachesize); /* Clip length if it exceeds worst case tid allocation, where each entry in the tid array can accomodate only 1 page. */ if (length > 4096*tidc->tid_ctrl->tid_num_max) { length = 4096*tidc->tid_ctrl->tid_num_max; } /* * register the new buffer. */ retry: tidcnt = 0; if (hfi_update_tid(tidc->context->ctrl, (uint64_t) start, &length, (uint64_t) tidc->tid_array, &tidcnt) < 0) { /* if driver reaches lockable memory limit */ if (errno == ENOMEM && NIDLE) { uint64_t lengthEvicted = ips_tidcache_evict(tidc,length); if (lengthEvicted >= length) goto retry; } /* Unable to pin pages? retry later */ return PSM2_EP_DEVICE_FAILURE; } psmi_assert_always(tidcnt > 0); psmi_assert((tidcnt+NTID) <= tidc->tid_cachesize); /* * backward processing because we want to return * the first RB index in the array. */ idx = 0; tidoff = length; while (tidcnt) { /* * Driver only returns tidctrl=1 or tidctrl=2. */ tidcnt--; idx = 2*IPS_TIDINFO_GET_TID(tidc->tid_array[tidcnt]) + IPS_TIDINFO_GET_TIDCTRL(tidc->tid_array[tidcnt]); tidlen = IPS_TIDINFO_GET_LENGTH(tidc->tid_array[tidcnt]); /* * sanity check. */ psmi_assert(idx != 0); psmi_assert(idx <= tidc->tid_ctrl->tid_num_max); psmi_assert(INVALIDATE(idx) != 0); psmi_assert(REFCNT(idx) == 0); /* * clear the tid invalidated. */ INVALIDATE(idx) = 0; /* * put the tid into a RB node. */ tidoff -= tidlen << 12; START(idx) = start + tidoff; LENGTH(idx) = tidlen; p_map->root[idx].payload.tidinfo = tidc->tid_array[tidcnt]; /* * put the node into RB tree and idle queue head. */ IDLE_INSERT(idx); ips_cl_qmap_insert_item(p_map, &p_map->root[idx]); } psmi_assert(idx != 0); psmi_assert(tidoff == 0); *firstidx = idx; return PSM2_OK; }