void push(const void *row) { CriticalBlock b2(fullCrit); // exclusivity for totSize / full if (stopped) return; rows.enqueue(row); totSize += thorRowMemoryFootprint(row); while (totSize > FUNNEL_MIN_BUFF_SIZE) { full = true; CriticalUnblock b(fullCrit); fullSem.wait(); // block pushers on crit } }
const void *nextRow() { REENTRANCY_CHECK(getrecheck) const void * ret; assertex(out); assertex(!waiting); // reentrancy checks loop { { SpinBlock block(lock); if (out->ordinality()) { ret = out->dequeue(); break; } if (diskin.ordinality()) { load(); ret = out->dequeue(); break; } if (in) { if (in->ordinality()) { ret = in->dequeue(); if (ret) { size32_t sz = thorRowMemoryFootprint(serializer, ret); assertex(insz>=sz); insz -= sz; } break; } else { if (waitflush) { waitflushsem.signal(); waitflush = false; } if (eoi) return NULL; } } assertex(!waiting); // reentrancy check waiting = true; } waitsem.wait(); } return ret; }
void putRow(const void *row) { REENTRANCY_CHECK(putrecheck) size32_t sz = thorRowMemoryFootprint(serializer, row); SpinBlock block(lock); if (eoi) { ReleaseThorRow(row); return; } assertex(in); // reentry check if (sz+insz+(in->ordinality()+2)*sizeof(byte)>blocksize) // byte extra per row + end byte diskflush(); in->enqueue(row); insz += sz; if (waiting) { waitsem.signal(); waiting = false; } }