/* itoa() is not a standard function, and we cannot safely call printf() * after suspending threads. So, we just implement our own copy. A * recursive approach is the easiest here. */ static char *local_itoa(char *buf, int i) { if (i < 0) { *buf++ = '-'; return local_itoa(buf, -i); } else { if (i >= 10) buf = local_itoa(buf, i/10); *buf++ = (i%10) + '0'; *buf = '\000'; return buf; } }
static size_t local_ftoa(char *buf, double n, size_t dp) { char *p = buf; long long a = (long long)n; double b = n - (double)a; b = (b < 0) ? -b : b; /* integral part */ p += local_itoa(p, (a < 0) ? -a : a, 10, false); *p++ = '.'; /* decimal digits */ while (dp--) { char v; b *= 10; v = (char)b; b -= v; *p++ = '0' + (char)v; } *p = '\0'; return p - buf; }
/** * Print a formatted string * * @param fmt Formatted string * @param ap Variable argument * @param vph Print handler * @param arg Handler argument * * @return 0 if success, otherwise errorcode * * Extensions: * * <pre> * %b (char *, size_t) Buffer string with pointer and length * %r (struct pl) Pointer-length object * %w (uint8_t *, size_t) Binary buffer to hexadecimal format * %j (struct sa *) Socket address - address part only * %J (struct sa *) Socket address and port - like 1.2.3.4:1234 * %H (re_printf_h *, void *) Print handler with argument * %v (char *fmt, va_list *) Variable argument list * %m (int) Describe an error code * </pre> * * Reserved for the future: * * %k * %y * */ int re_vhprintf(const char *fmt, va_list ap, re_vprintf_h *vph, void *arg) { uint8_t base, *bptr; char pch, ch, num[NUM_SIZE], addr[64], msg[256]; enum length_modifier lenmod = LENMOD_NONE; struct re_printf pf; bool fm = false, plr = false; const struct pl *pl; size_t pad = 0, fpad = -1, len, i; const char *str, *p = fmt, *p0 = fmt; const struct sa *sa; re_printf_h *ph; void *ph_arg; va_list *apl; int err = 0; void *ptr; uint64_t n; int64_t sn; bool uc = false; double dbl; if (!fmt || !vph) return EINVAL; pf.vph = vph; pf.arg = arg; for (;*p && !err; p++) { if (!fm) { if (*p != '%') continue; pch = ' '; plr = false; pad = 0; fpad = -1; lenmod = LENMOD_NONE; uc = false; if (p > p0) err |= vph(p0, p - p0, arg); fm = true; continue; } fm = false; base = 10; switch (*p) { case '-': plr = true; fm = true; break; case '.': fpad = pad; pad = 0; fm = true; break; case '%': ch = '%'; err |= vph(&ch, 1, arg); break; case 'b': str = va_arg(ap, const char *); len = va_arg(ap, size_t); err |= write_padded(str, str ? len : 0, pad, ' ', plr, NULL, vph, arg); break; case 'c': ch = va_arg(ap, int); err |= write_padded(&ch, 1, pad, ' ', plr, NULL, vph, arg); break; case 'd': case 'i': switch (lenmod) { case LENMOD_SIZE: sn = va_arg(ap, ssize_t); break; default: case LENMOD_LONG_LONG: sn = va_arg(ap, signed long long); break; case LENMOD_LONG: sn = va_arg(ap, signed long); break; case LENMOD_NONE: sn = va_arg(ap, signed); break; } len = local_itoa(num, (sn < 0) ? -sn : sn, base, false); err |= write_padded(num, len, pad, plr ? ' ' : pch, plr, (sn < 0) ? prfx_neg : NULL, vph, arg); break; case 'f': case 'F': dbl = va_arg(ap, double); if (fpad == (size_t)-1) { fpad = pad; pad = 0; } if (isinf(dbl)) { err |= write_padded("inf", 3, fpad, ' ', plr, NULL, vph, arg); } else if (isnan(dbl)) { err |= write_padded("nan", 3, fpad, ' ', plr, NULL, vph, arg); } else { len = local_ftoa(num, dbl, pad ? min(pad, DEC_SIZE) : 6); err |= write_padded(num, len, fpad, plr ? ' ' : pch, plr, (dbl<0) ? prfx_neg : NULL, vph, arg); } break; case 'H': ph = va_arg(ap, re_printf_h *); ph_arg = va_arg(ap, void *); if (ph) err |= ph(&pf, ph_arg); break; case 'l': ++lenmod; fm = true; break; case 'm': str = str_error(va_arg(ap, int), msg, sizeof(msg)); err |= write_padded(str, str_len(str), pad, ' ', plr, NULL, vph, arg); break; case 'p': ptr = va_arg(ap, void *); if (ptr) { len = local_itoa(num, (unsigned long int)ptr, 16, false); err |= write_padded(num, len, pad, plr ? ' ' : pch, plr, prfx_hex, vph, arg); } else { err |= write_padded(str_nil, sizeof(str_nil) - 1, pad, ' ', plr, NULL, vph, arg); } break; case 'r': pl = va_arg(ap, const struct pl *); err |= write_padded(pl ? pl->p : NULL, (pl && pl->p) ? pl->l : 0, pad, ' ', plr, NULL, vph, arg); break; case 's': str = va_arg(ap, const char *); err |= write_padded(str, str_len(str), pad, ' ', plr, NULL, vph, arg); break; case 'X': uc = true; /*@fallthrough@*/ case 'x': base = 16; /*@fallthrough@*/ case 'u': switch (lenmod) { case LENMOD_SIZE: n = va_arg(ap, size_t); break; default: case LENMOD_LONG_LONG: n = va_arg(ap, unsigned long long); break; case LENMOD_LONG: n = va_arg(ap, unsigned long); break; case LENMOD_NONE: n = va_arg(ap, unsigned); break; } len = local_itoa(num, n, base, uc); err |= write_padded(num, len, pad, plr ? ' ' : pch, plr, NULL, vph, arg); break; case 'v': str = va_arg(ap, char *); apl = va_arg(ap, va_list *); if (!str || !apl) break; err |= re_vhprintf(str, *apl, vph, arg); break; case 'W': uc = true; /*@fallthrough@*/ case 'w': bptr = va_arg(ap, uint8_t *); len = va_arg(ap, size_t); len = bptr ? len : 0; pch = plr ? ' ' : pch; while (!plr && pad-- > (len * 2)) err |= vph(&pch, 1, arg); for (i=0; i<len; i++) { const uint8_t v = *bptr++; uint32_t l = local_itoa(num, v, 16, uc); err |= write_padded(num, l, 2, '0', false, NULL, vph, arg); } while (plr && pad-- > (len * 2)) err |= vph(&pch, 1, arg); break; case 'z': lenmod = LENMOD_SIZE; fm = true; break; case 'j': sa = va_arg(ap, struct sa *); if (!sa) break; if (sa_ntop(sa, addr, sizeof(addr))) { err |= write_padded("?", 1, pad, ' ', plr, NULL, vph, arg); break; } err |= write_padded(addr, strlen(addr), pad, ' ', plr, NULL, vph, arg); break; case 'J': sa = va_arg(ap, struct sa *); if (!sa) break; if (sa_ntop(sa, addr, sizeof(addr))) { err |= write_padded("?", 1, pad, ' ', plr, NULL, vph, arg); break; } #ifdef HAVE_INET6 if (AF_INET6 == sa_af(sa)) { ch = '['; err |= vph(&ch, 1, arg); } #endif err |= write_padded(addr, strlen(addr), pad, ' ', plr, NULL, vph, arg); #ifdef HAVE_INET6 if (AF_INET6 == sa_af(sa)) { ch = ']'; err |= vph(&ch, 1, arg); } #endif ch = ':'; err |= vph(&ch, 1, arg); len = local_itoa(num, sa_port(sa), 10, false); err |= write_padded(num, len, pad, plr ? ' ' : pch, plr, NULL, vph, arg); break; default: if (('0' <= *p) && (*p <= '9')) { if (!pad && ('0' == *p)) { pch = '0'; } else { pad *= 10; pad += *p - '0'; } fm = true; break; } ch = '?'; err |= vph(&ch, 1, arg); break; } if (!fm) p0 = p + 1; } if (!fm && p > p0) err |= vph(p0, p - p0, arg); return err; }
static void ListerThread(struct ListerParams *args) { int found_parent = 0; pid_t clone_pid = sys_gettid(), ppid = sys_getppid(); char proc_self_task[80], marker_name[48], *marker_path; const char *proc_paths[3]; const char *const *proc_path = proc_paths; int proc = -1, marker = -1, num_threads = 0; int max_threads = 0, sig; struct kernel_stat marker_sb, proc_sb; stack_t altstack; /* Create "marker" that we can use to detect threads sharing the same * address space and the same file handles. By setting the FD_CLOEXEC flag * we minimize the risk of misidentifying child processes as threads; * and since there is still a race condition, we will filter those out * later, anyway. */ if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 || sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) { failure: args->result = -1; args->err = errno; if (marker >= 0) NO_INTR(sys_close(marker)); sig_marker = marker = -1; if (proc >= 0) NO_INTR(sys_close(proc)); sig_proc = proc = -1; sys__exit(1); } /* Compute search paths for finding thread directories in /proc */ local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid); strcpy(marker_name, proc_self_task); marker_path = marker_name + strlen(marker_name); strcat(proc_self_task, "/task/"); proc_paths[0] = proc_self_task; /* /proc/$$/task/ */ proc_paths[1] = "/proc/"; /* /proc/ */ proc_paths[2] = NULL; /* Compute path for marker socket in /proc */ local_itoa(strcpy(marker_path, "/fd/") + 4, marker); if (sys_stat(marker_name, &marker_sb) < 0) { goto failure; } /* Catch signals on an alternate pre-allocated stack. This way, we can * safely execute the signal handler even if we ran out of memory. */ memset(&altstack, 0, sizeof(altstack)); altstack.ss_sp = args->altstack_mem; altstack.ss_flags = 0; altstack.ss_size = ALT_STACKSIZE; sys_sigaltstack(&altstack, (const stack_t *)NULL); /* Some kernels forget to wake up traced processes, when the * tracer dies. So, intercept synchronous signals and make sure * that we wake up our tracees before dying. It is the caller's * responsibility to ensure that asynchronous signals do not * interfere with this function. */ sig_marker = marker; sig_proc = -1; for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) { struct kernel_sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_sigaction_ = SignalHandler; sys_sigfillset(&sa.sa_mask); sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND; sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL); } /* Read process directories in /proc/... */ for (;;) { /* Some kernels know about threads, and hide them in "/proc" * (although they are still there, if you know the process * id). Threads are moved into a separate "task" directory. We * check there first, and then fall back on the older naming * convention if necessary. */ if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) { if (*++proc_path != NULL) continue; goto failure; } if (sys_fstat(proc, &proc_sb) < 0) goto failure; /* Since we are suspending threads, we cannot call any libc * functions that might acquire locks. Most notably, we cannot * call malloc(). So, we have to allocate memory on the stack, * instead. Since we do not know how much memory we need, we * make a best guess. And if we guessed incorrectly we retry on * a second iteration (by jumping to "detach_threads"). * * Unless the number of threads is increasing very rapidly, we * should never need to do so, though, as our guestimate is very * conservative. */ if (max_threads < proc_sb.st_nlink + 100) max_threads = proc_sb.st_nlink + 100; /* scope */ { pid_t pids[max_threads]; int added_entries = 0; sig_num_threads = num_threads; sig_pids = pids; for (;;) { struct kernel_dirent *entry; char buf[4096]; ssize_t nbytes = sys_getdents(proc, (struct kernel_dirent *)buf, sizeof(buf)); if (nbytes < 0) goto failure; else if (nbytes == 0) { if (added_entries) { /* Need to keep iterating over "/proc" in multiple * passes until we no longer find any more threads. This * algorithm eventually completes, when all threads have * been suspended. */ added_entries = 0; sys_lseek(proc, 0, SEEK_SET); continue; } break; } for (entry = (struct kernel_dirent *)buf; entry < (struct kernel_dirent *)&buf[nbytes]; entry = (struct kernel_dirent *)((char *)entry+entry->d_reclen)) { if (entry->d_ino != 0) { const char *ptr = entry->d_name; pid_t pid; /* Some kernels hide threads by preceding the pid with a '.' */ if (*ptr == '.') ptr++; /* If the directory is not numeric, it cannot be a * process/thread */ if (*ptr < '0' || *ptr > '9') continue; pid = local_atoi(ptr); /* Attach (and suspend) all threads */ if (pid && pid != clone_pid) { struct kernel_stat tmp_sb; char fname[entry->d_reclen + 48]; strcat(strcat(strcpy(fname, "/proc/"), entry->d_name), marker_path); /* Check if the marker is identical to the one we created */ if (sys_stat(fname, &tmp_sb) >= 0 && marker_sb.st_ino == tmp_sb.st_ino) { long i, j; /* Found one of our threads, make sure it is no duplicate */ for (i = 0; i < num_threads; i++) { /* Linear search is slow, but should not matter much for * the typically small number of threads. */ if (pids[i] == pid) { /* Found a duplicate; most likely on second pass */ goto next_entry; } } /* Check whether data structure needs growing */ if (num_threads >= max_threads) { /* Back to square one, this time with more memory */ NO_INTR(sys_close(proc)); goto detach_threads; } /* Attaching to thread suspends it */ pids[num_threads++] = pid; sig_num_threads = num_threads; if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0, (void *)0) < 0) { /* If operation failed, ignore thread. Maybe it * just died? There might also be a race * condition with a concurrent core dumper or * with a debugger. In that case, we will just * make a best effort, rather than failing * entirely. */ num_threads--; sig_num_threads = num_threads; goto next_entry; } while (sys_waitpid(pid, (int *)0, __WALL) < 0) { if (errno != EINTR) { sys_ptrace_detach(pid); num_threads--; sig_num_threads = num_threads; goto next_entry; } } if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j || sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i != j) { /* Address spaces are distinct, even though both * processes show the "marker". This is probably * a forked child process rather than a thread. */ sys_ptrace_detach(pid); num_threads--; sig_num_threads = num_threads; } else { found_parent |= pid == ppid; added_entries++; } } } } next_entry:; } } NO_INTR(sys_close(proc)); sig_proc = proc = -1; /* If we failed to find any threads, try looking somewhere else in * /proc. Maybe, threads are reported differently on this system. */ if (num_threads > 1 || !*++proc_path) { NO_INTR(sys_close(marker)); sig_marker = marker = -1; /* If we never found the parent process, something is very wrong. * Most likely, we are running in debugger. Any attempt to operate * on the threads would be very incomplete. Let's just report an * error to the caller. */ if (!found_parent) { ResumeAllProcessThreads(num_threads, pids); sys__exit(3); } /* Now we are ready to call the callback, * which takes care of resuming the threads for us. */ args->result = args->callback(args->parameter, num_threads, pids, args->ap); args->err = errno; /* Callback should have resumed threads, but better safe than sorry */ if (ResumeAllProcessThreads(num_threads, pids)) { /* Callback forgot to resume at least one thread, report error */ args->err = EINVAL; args->result = -1; } sys__exit(0); } detach_threads: /* Resume all threads prior to retrying the operation */ ResumeAllProcessThreads(num_threads, pids); sig_pids = NULL; num_threads = 0; sig_num_threads = num_threads; max_threads += 100; } } }
static gdk_return CMDinfo(BAT **ret1, BAT **ret2, BAT *b) { BAT *bk, *bv; const char *mode, *accessmode; if (!(bk = BATnew(TYPE_void, TYPE_str, 128, TRANSIENT))) return GDK_FAIL; if (!(bv = BATnew(TYPE_void, TYPE_str, 128, TRANSIENT))) { BBPreclaim(bk); return GDK_FAIL; } BATseqbase(bk,0); BATseqbase(bv,0); *ret1 = bk; *ret2 = bv; if (b->batPersistence == PERSISTENT) { mode = "persistent"; } else if (b->batPersistence == TRANSIENT) { mode = "transient"; } else { mode ="unknown"; } switch (b->batRestricted) { case BAT_READ: accessmode = "read-only"; break; case BAT_WRITE: accessmode = "updatable"; break; case BAT_APPEND: accessmode = "append-only"; break; default: accessmode = "unknown"; } BUNappend(bk, "batId", FALSE); BUNappend(bv, BATgetId(b),FALSE); BUNappend(bk, "batCacheid", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->batCacheid)),FALSE); BUNappend(bk, "hparentid", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->H->heap.parentid)),FALSE); BUNappend(bk, "tparentid", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->T->heap.parentid)),FALSE); BUNappend(bk, "batSharecnt", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->batSharecnt)),FALSE); BUNappend(bk, "batCount", FALSE); BUNappend(bv, local_utoa((size_t)b->batCount),FALSE); BUNappend(bk, "batCapacity", FALSE); BUNappend(bv, local_utoa((size_t)b->batCapacity),FALSE); BUNappend(bk, "head", FALSE); BUNappend(bv, ATOMname(b->htype),FALSE); BUNappend(bk, "tail", FALSE); BUNappend(bv, ATOMname(b->ttype),FALSE); BUNappend(bk, "batPersistence", FALSE); BUNappend(bv, mode,FALSE); BUNappend(bk, "batRestricted", FALSE); BUNappend(bv, accessmode,FALSE); BUNappend(bk, "batRefcnt", FALSE); BUNappend(bv, local_itoa((ssize_t)(BBP_refs(b->batCacheid))),FALSE); BUNappend(bk, "batLRefcnt", FALSE); BUNappend(bv, local_itoa((ssize_t)(BBP_lrefs(b->batCacheid))),FALSE); BUNappend(bk, "batDirty", FALSE); BUNappend(bv, BATdirty(b) ? "dirty" : "clean",FALSE); BUNappend(bk, "hsorted", FALSE); BUNappend(bv, local_itoa((ssize_t)BAThordered(b)),FALSE); BUNappend(bk, "hrevsorted", FALSE); BUNappend(bv, local_itoa((ssize_t)BAThrevordered(b)),FALSE); BUNappend(bk, "hident", FALSE); BUNappend(bv, b->hident,FALSE); BUNappend(bk, "hdense", FALSE); BUNappend(bv, local_itoa((ssize_t)(BAThdense(b))),FALSE); BUNappend(bk, "hseqbase", FALSE); BUNappend(bv, oidtostr(b->hseqbase),FALSE); BUNappend(bk, "hkey", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->hkey)),FALSE); BUNappend(bk, "hvarsized", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->hvarsized)),FALSE); BUNappend(bk, "halign", FALSE); BUNappend(bv, local_utoa(b->halign),FALSE); BUNappend(bk, "hnosorted", FALSE); BUNappend(bv, local_utoa(b->H->nosorted),FALSE); BUNappend(bk, "hnorevsorted", FALSE); BUNappend(bv, local_utoa(b->H->norevsorted),FALSE); BUNappend(bk, "hnodense", FALSE); BUNappend(bv, local_utoa(b->H->nodense),FALSE); BUNappend(bk, "hnokey[0]", FALSE); BUNappend(bv, local_utoa(b->H->nokey[0]),FALSE); BUNappend(bk, "hnokey[1]", FALSE); BUNappend(bv, local_utoa(b->H->nokey[1]),FALSE); BUNappend(bk, "hnonil", FALSE); BUNappend(bv, local_utoa(b->H->nonil),FALSE); BUNappend(bk, "hnil", FALSE); BUNappend(bv, local_utoa(b->H->nil),FALSE); BUNappend(bk, "tident", FALSE); BUNappend(bv, b->tident,FALSE); BUNappend(bk, "tdense", FALSE); BUNappend(bv, local_itoa((ssize_t)(BATtdense(b))), FALSE); BUNappend(bk, "tseqbase", FALSE); BUNappend(bv, oidtostr(b->tseqbase), FALSE); BUNappend(bk, "tsorted", FALSE); BUNappend(bv, local_itoa((ssize_t)BATtordered(b)), FALSE); BUNappend(bk, "trevsorted", FALSE); BUNappend(bv, local_itoa((ssize_t)BATtrevordered(b)), FALSE); BUNappend(bk, "tkey", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->tkey)), FALSE); BUNappend(bk, "tvarsized", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->tvarsized)), FALSE); BUNappend(bk, "talign", FALSE); BUNappend(bv, local_utoa(b->talign), FALSE); BUNappend(bk, "tnosorted", FALSE); BUNappend(bv, local_utoa(b->T->nosorted), FALSE); BUNappend(bk, "tnorevsorted", FALSE); BUNappend(bv, local_utoa(b->T->norevsorted), FALSE); BUNappend(bk, "tnodense", FALSE); BUNappend(bv, local_utoa(b->T->nodense), FALSE); BUNappend(bk, "tnokey[0]", FALSE); BUNappend(bv, local_utoa(b->T->nokey[0]), FALSE); BUNappend(bk, "tnokey[1]", FALSE); BUNappend(bv, local_utoa(b->T->nokey[1]), FALSE); BUNappend(bk, "tnonil", FALSE); BUNappend(bv, local_utoa(b->T->nonil), FALSE); BUNappend(bk, "tnil", FALSE); BUNappend(bv, local_utoa(b->T->nil), FALSE); BUNappend(bk, "batInserted", FALSE); BUNappend(bv, local_utoa(b->batInserted), FALSE); BUNappend(bk, "batDeleted", FALSE); BUNappend(bv, local_utoa(b->batDeleted), FALSE); BUNappend(bk, "batFirst", FALSE); BUNappend(bv, local_utoa(b->batFirst), FALSE); BUNappend(bk, "htop", FALSE); BUNappend(bv, local_utoa(b->H->heap.free), FALSE); BUNappend(bk, "ttop", FALSE); BUNappend(bv, local_utoa(b->T->heap.free), FALSE); BUNappend(bk, "batStamp", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->batStamp)), FALSE); BUNappend(bk, "lastUsed", FALSE); BUNappend(bv, local_itoa((ssize_t)(BBP_lastused(b->batCacheid))), FALSE); BUNappend(bk, "curStamp", FALSE); BUNappend(bv, local_itoa((ssize_t)(BBPcurstamp())), FALSE); BUNappend(bk, "batCopiedtodisk", FALSE); BUNappend(bv, local_itoa((ssize_t)(b->batCopiedtodisk)), FALSE); BUNappend(bk, "batDirtydesc", FALSE); BUNappend(bv, b->batDirtydesc ? "dirty" : "clean", FALSE); BUNappend(bk, "H->heap.dirty", FALSE); BUNappend(bv, b->H->heap.dirty ? "dirty" : "clean", FALSE); BUNappend(bk, "T->heap.dirty", FALSE); BUNappend(bv, b->T->heap.dirty ? "dirty" : "clean", FALSE); infoHeap(bk, bv, &b->H->heap, "head."); infoHeap(bk, bv, &b->T->heap, "tail."); BUNappend(bk, "H->vheap->dirty", FALSE); BUNappend(bv, (b->H->vheap && b->H->vheap->dirty) ? "dirty" : "clean", FALSE); infoHeap(bk, bv, b->H->vheap, "hheap."); BUNappend(bk, "T->vheap->dirty", FALSE); BUNappend(bv, (b->T->vheap && b->T->vheap->dirty) ? "dirty" : "clean", FALSE); infoHeap(bk, bv, b->T->vheap, "theap."); /* dump index information */ if (b->H->hash) { HASHinfo(bk, bv, b->H->hash, "hhash->"); } if (b->T->hash) { HASHinfo(bk, bv, b->T->hash, "thash->"); } assert(BATcount(bk) == BATcount(bv)); return GDK_SUCCEED; }