int main() { int N, M, L, W, O; while (scanf("%d %d %d %d", &N, &M, &L, &W) == 4) { scanf("%d", &O); memset(g, '0', sizeof(g)); for (int i = 0; i < O; i++) { int x, y; scanf("%d %d", &x, &y); g[y][x] = '2'; } int lx = 0, ly = 0, cmd; while (scanf("%d", &cmd) == 1) { int tx, ty, rx, ry; rx = lx+W-1, ry = ly+L-1; if (cmd == 0) { for (int i = 0; i < N; i++) { static char out[512]; out[M] = '\0'; for (int j = 0; j < M; j++) { if (ly <= i && i <= ry && lx <= j && j <= rx) out[j] = '1'; else out[j] = g[i][j]; } puts(out); } continue; } tx = lx+dx[cmd], ty = ly+dy[cmd]; if (tx < 0 || ty < 0 || tx+W > M || ty+L > N) continue; int ob = 0; if (cmd == 1) ob = vfind(rx+1, ly, ry); else if (cmd == 2) ob = hfind(ry+1, lx, rx); else if (cmd == 3) ob = vfind(lx-1, ly, ry); else if (cmd == 4) ob = hfind(ly-1, lx, rx); else if (cmd == 5) { ob = vfind(rx+1, ly+1, ry+1); if (ob <= 1) ob += hfind(ry+1, lx+1, rx); } if (ob > 1) continue; if (ob == 1) g[fy][fx] = '0'; lx = tx, ly = ty; } } return 0; }
static _ctx * _profile_thread(PyThreadState *ts) { uintptr_t ctx_id; _ctx *ctx; _hitem *it; ctx_id = _current_context_id(ts); it = hfind(contexts, ctx_id); if (!it) { ctx = _create_ctx(); if (!ctx) { return NULL; } if (!hadd(contexts, ctx_id, (uintptr_t)ctx)) { _del_ctx(ctx); if (!flput(flctx, ctx)) { _log_err(10); } _log_err(11); return NULL; } } else { ctx = (_ctx *)it->val; } ts->use_tracing = 1; ts->c_profilefunc = _yapp_callback; ctx->id = ctx_id; ctx->tid = ts->thread_id; return ctx; }
static PyObject * shift_context_time(PyObject *self, PyObject *args) { int i; long context_id; double amount; long long shifted_amount; _hitem *it; _ctx *ctx; if (!PyArg_ParseTuple(args, "ld", &context_id, &amount)) { return NULL; } shifted_amount = (long long)(amount / tickfactor()); it = hfind(contexts, context_id); if (!it || !it->val) { // This context hasn't executed yet during this Yappi run; just abort. Py_RETURN_NONE; } // Advance the start time for each frame in this context's call stack // by the duration for which this context was paused. ctx = (_ctx *)it->val; for (i = 0; i <= ctx->cs->head; i++) { ctx->cs->_items[i].t0 += shifted_amount; } // advance the start time for the whole context by the pause duration ctx->t0 += shifted_amount; Py_RETURN_NONE; }
// maps the PyCodeObject to our internal pit item via hash table. static _pit * _code2pit(PyFrameObject *fobj) { _hitem *it; PyCodeObject *cobj; _pit *pit; cobj = fobj->f_code; it = hfind(current_ctx->pits, (uintptr_t)cobj); if (it) { return ((_pit *)it->val); } pit = _create_pit(); if (!pit) return NULL; if (!hadd(current_ctx->pits, (uintptr_t)cobj, (uintptr_t)pit)) return NULL; pit->name = NULL; Py_INCREF(cobj->co_filename); pit->modname = cobj->co_filename; pit->lineno = cobj->co_firstlineno; PyFrame_FastToLocals(fobj); if (cobj->co_argcount) { const char *firstarg = PyStr_AS_CSTRING(PyTuple_GET_ITEM(cobj->co_varnames, 0)); if (!strcmp(firstarg, "self")) { PyObject* locals = fobj->f_locals; if (locals) { PyObject* self = PyDict_GetItemString(locals, "self"); if (self) { PyObject *class_obj = PyObject_GetAttrString(self, "__class__"); if (class_obj) { PyObject *class_name = PyObject_GetAttrString(class_obj, "__name__"); if (class_name) { pit->name = PyStr_FromFormat("%s.%s", PyStr_AS_CSTRING(class_name), PyStr_AS_CSTRING(cobj->co_name)); Py_DECREF(class_name); } Py_DECREF(class_obj); } } } } } if (!pit->name) { Py_INCREF(cobj->co_name); pit->name = cobj->co_name; } PyFrame_LocalsToFast(fobj, 0); return pit; }
static long get_rec_level(uintptr_t key) { _hitem *it; it = hfind(current_ctx->rec_levels, key); if (!it) { _log_err(1); return -1; // should not happen } return it->val; }
dbuf_t *dtry_reasm_timed(void *pile, uint32_t xid, char *data, uint16_t len, uint16_t offs, int more, time_t now) { reasm_pile_struct_t *rp = (void *)(((dbuf_t *)pile)->buf); reasm_chunk_t *chk; if (now > 0) { check_timeouts(rp, now); } if(offs + len > rp->mtu) { debug(DBG_REASM, 10, "Offset + length (%d + %d) of fragment > MTU (%d), discard", offs, len, rp->mtu); return NULL; } if ((offs > 0) && (offs < HOLE_MIN_LENGTH)) { debug(DBG_REASM, 10, "Offset %d less than min hole length %d\n", offs, HOLE_MIN_LENGTH); return NULL; } chk = hfind(rp->chs, &xid, sizeof(xid)); if (!chk) { debug(DBG_REASM, 10, "Reasm chunk %lu not found, creating", xid); chk = malloc(sizeof(reasm_chunk_t)); chk->xid = xid; chk->maxfrags = rp->maxfrags; chk->hole = 0; chk->esize = rp->ftu; chk->d = dalloc(chk->esize); chk->deadline = now + rp->reasm_timeout; memset(chk->d->buf, 0xaa, chk->d->size); hole_set(chk, 0, chk->esize, 0); hinsert(rp->chs, &xid, sizeof(xid), chk, chk_destructor, NULL, NULL, NULL); TAILQ_INSERT_TAIL(&rp->lru, chk, entries); } else { debug(DBG_REASM, 10, "Reasm chunk %lu found", xid); } debug(DBG_REASM, 100, "Chunk data (hole: %d, esize: %d):", chk->hole, chk->esize); debug_dump(DBG_REASM, 100, chk->d->buf, chk->d->size); if(offs + len > chk->d->size) { debug(DBG_REASM, 10, "Reasm chunk %lu overflow - %d + %d > %d", xid, offs, len, chk->d->size); /* We already checked the MTU overflow above, so we can safely reallocate here */ int oldsize = chk->d->size; dgrow(chk->d, rp->mtu - chk->d->size); hole_grow(chk, oldsize-1, chk->d->size); chk->esize = chk->d->size; debug(DBG_REASM, 100, "Fresh chunk data after growth to MTU:"); debug_dump(DBG_REASM, 100, chk->d->buf, chk->d->size); } chk->atime = now; return dperform_reasm(rp, chk, xid, data, len, offs, more); }
static _ctx * _thread2ctx(PyThreadState *ts) { _hitem *it; it = hfind(contexts, _current_context_id(ts)); if (!it) { // callback functions in some circumtances, can be called before the context entry is not // created. (See issue 21). To prevent this problem we need to ensure the context entry for // the thread is always available here. return _profile_thread(ts); } return (_ctx *)it->val; }
STATIC bool getValue(htab *t, const unsigned char *key, unsigned char **val) { int16_t len = 0; if (t == NULL || key == NULL) { assert(LIB_NAME "Hash structure and key string must not be NULL" && (false || Storage_TestMode)); return false; } if (Utils_GetCharArrayLen(key, &len, KEY_VAL_MIN_STR_LEN, KEY_VAL_MAX_STR_LEN) == false) return false; len += UTILS_STR_LEN_SIZE; if (hfind(t, key, len) == false) { return false; } if (Utils_GetCharArrayLen((unsigned char *)hstuff(t), &len, KEY_VAL_MIN_STR_LEN, KEY_VAL_MAX_STR_LEN) == false) return false; Utils_CreateAndCopyUcString(val, (unsigned char *)hstuff(t), len + UTILS_STR_LEN_SIZE); return true; }
void hash_table2::add(size_t x,size_t y) { size_t h = hash(x,y); if (hfind(h,x,y) == NOT_FOUND) { if (check_table()) { h = hash(x,y); } bucket2 new_bucket; new_bucket.x = x; new_bucket.y = y; new_bucket.next = table[h]; table[h] = buckets.size(); buckets.push_back(new_bucket); } }
static int incr_rec_level(uintptr_t key) { _hitem *it; it = hfind(current_ctx->rec_levels, key); if (it) { it->val++; } else { if (!hadd(current_ctx->rec_levels, key, 1)) { _log_err(2); return 0; // should not happen } } return 1; }
static _pit * _ccode2pit(void *cco) { PyCFunctionObject *cfn; _hitem *it; PyObject *name; cfn = cco; // Issue #15: // Hashing cfn to the pits table causes different object methods // to be hashed into the same slot. Use cfn->m_ml for hashing the // Python C functions. it = hfind(current_ctx->pits, (uintptr_t)cfn->m_ml); if (!it) { _pit *pit = _create_pit(); if (!pit) return NULL; if (!hadd(current_ctx->pits, (uintptr_t)cfn->m_ml, (uintptr_t)pit)) return NULL; pit->builtin = 1; pit->modname = _pycfunction_module_name(cfn); pit->lineno = 0; // built-in method? if (cfn->m_self != NULL) { name = PyStr_FromString(cfn->m_ml->ml_name); if (name != NULL) { PyObject *obj_type = PyObject_Type(cfn->m_self); PyObject *mo = _PyType_Lookup((PyTypeObject *)obj_type, name); Py_XINCREF(mo); Py_XDECREF(obj_type); Py_DECREF(name); if (mo != NULL) { pit->name = PyObject_Repr(mo); Py_DECREF(mo); return pit; } } PyErr_Clear(); } pit->name = PyStr_FromString(cfn->m_ml->ml_name); return pit; } return ((_pit *)it->val); }
STATIC bool clearKey(const SecureStorageS *storage, const unsigned char *key) { int16_t len = 0; htab *t = NULL; if (storage == NULL || key == NULL) { assert(LIB_NAME "Storage structure and key string must not be NULL" && (false || Storage_TestMode)); return false; } t = storage->Data; if (Utils_GetCharArrayLen(key, &len, KEY_VAL_MIN_STR_LEN, KEY_VAL_MAX_STR_LEN) == false) return false; if (key != NULL && hfind(t, key, len + UTILS_STR_LEN_SIZE) == true) { // override existing item Utils_Free(hkey(t)); Utils_Free(hstuff(t)); hdel(t); return true; } return false; }
static int decr_rec_level(uintptr_t key) { _hitem *it; uintptr_t v; it = hfind(current_ctx->rec_levels, key); if (it) { v = it->val--; /*supress warning -- it is safe to cast long vs pointers*/ if (v == 0) { hfree(current_ctx->rec_levels, it); } } else { _log_err(3); return 0; // should not happen } return 1; }
void *hfinds(htable_t *ht, char *key) { return hfind(ht, key, strlen(key)+1); }
/* SRHFIND: If a SHORTREF map was declared, return the ptr to its header. Return NULL if it is not defined. */ PSRH srhfind(UNCH *sname) /* SHORTREF map name (with length and EOS). */ { return((PSRH)hfind((THASH)srhtab, sname, 0)); }
/* DCNFIND: If a notation was declared, return its DCNCB. Return NULL if it is not defined. */ struct dcncb *dcnfind(UNCH *nname) /* Notation name (with length and EOS). */ { return((PDCB)hfind((THASH)dcntab, nname, 0)); }
bool hash_table3::find(size_t x,size_t y,size_t z) { return (hfind(hash(x,y,z),x,y,z) != NOT_FOUND); }
bool hash_table2::find(size_t x,size_t y) { return (hfind(hash(x,y),x,y) != NOT_FOUND); }
char *hfindss(htable_t *ht, char *key) { return (char *)hfind(ht, key, strlen(key)+1); }