static ssize_t xlat_a(TALLOC_CTX *ctx, char **out, size_t outlen, void const *mod_inst, UNUSED void const *xlat_inst, REQUEST *request, char const *fmt) { rlm_unbound_t const *inst = mod_inst; struct ub_result **ubres; int async_id; char *fmt2; /* For const warnings. Keep till new libunbound ships. */ /* This has to be on the heap, because threads. */ ubres = talloc(inst, struct ub_result *); /* Used and thus impossible value from heap to designate incomplete */ memcpy(ubres, &mod_inst, sizeof(*ubres)); fmt2 = talloc_typed_strdup(ctx, fmt); ub_resolve_async(inst->ub, fmt2, 1, 1, ubres, link_ubres, &async_id); talloc_free(fmt2); if (ub_common_wait(inst, request, inst->xlat_a_name, ubres, async_id)) { goto error0; } if (*ubres) { if (ub_common_fail(request, inst->xlat_a_name, *ubres)) { goto error1; } if (!inet_ntop(AF_INET, (*ubres)->data[0], *out, outlen)) { goto error1; }; ub_resolve_free(*ubres); talloc_free(ubres); return strlen(*out); } RWDEBUG("%s - No result", inst->xlat_a_name); error1: ub_resolve_free(*ubres); /* Handles NULL gracefully */ error0: talloc_free(ubres); return -1; }
int main(void) { struct ub_ctx* ctx; volatile int qlen = 0; int retval; char qname[QNAME_MAX]; char *moredata, *nl; /* basic checks */ if(QUEUE_MAX < 1) { printf("error: queue length must be >0\n"); return 1; } /* create context */ ctx = ub_ctx_create(); if(!ctx) { printf("error: could not create unbound context\n"); return 1; } fprintf(stderr, "HOST;ERR;RCODE;DATA;NXDOMAIN\n"); /* we keep running, lets do something while waiting */ moredata = (char*)-1; do { /* queue has room && more data on stdin? */ if(qlen < QUEUE_MAX && moredata) { /* read and prepare qname from stdin */ moredata = fgets(qname, QNAME_MAX, stdin); if(moredata != NULL) { nl = strrchr(qname, '\n'); if(nl) *nl = '\0'; if((int)nl == (int)&qname) { printf("empty input\n"); continue; } /* add async query to queue */ retval = ub_resolve_async(ctx, qname, 1, 1, (void*)&qlen, mycallback, NULL); if(retval != 0) { printf("resolve error for %s: %s\n", qname, ub_strerror(retval)); continue; } qlen++; } usleep(50000); /* wait 1/50 of a second */ } else { /* queue is full || eof stdin reached */ usleep(100000); /* wait 1/10 of a second */ retval = ub_process(ctx); if(retval != 0) { printf("resolve error: %s\n", ub_strerror(retval)); return 1; } } } while(qlen || moredata); ub_ctx_delete(ctx); return 0; }
/** main program for asynclook */ int main(int argc, char** argv) { int c; struct ub_ctx* ctx; struct lookinfo* lookups; int i, r, cancel=0, blocking=0, ext=0; /* init log now because solaris thr_key_create() is not threadsafe */ log_init(0,0,0); /* lock debug start (if any) */ checklock_start(); /* create context */ ctx = ub_ctx_create(); if(!ctx) { printf("could not create context, %s\n", strerror(errno)); return 1; } /* command line options */ if(argc == 1) { usage(argv); } while( (c=getopt(argc, argv, "bcdf:hH:r:tx")) != -1) { switch(c) { case 'd': r = ub_ctx_debuglevel(ctx, 3); checkerr("ub_ctx_debuglevel", r); break; case 't': r = ub_ctx_async(ctx, 1); checkerr("ub_ctx_async", r); break; case 'c': cancel = 1; break; case 'b': blocking = 1; break; case 'r': r = ub_ctx_resolvconf(ctx, optarg); if(r != 0) { printf("ub_ctx_resolvconf " "error: %s : %s\n", ub_strerror(r), strerror(errno)); return 1; } break; case 'H': r = ub_ctx_hosts(ctx, optarg); if(r != 0) { printf("ub_ctx_hosts " "error: %s : %s\n", ub_strerror(r), strerror(errno)); return 1; } break; case 'f': r = ub_ctx_set_fwd(ctx, optarg); checkerr("ub_ctx_set_fwd", r); break; case 'x': ext = 1; break; case 'h': case '?': default: usage(argv); } } argc -= optind; argv += optind; if(ext) return ext_test(ctx, argc, argv); /* allocate array for results. */ lookups = (struct lookinfo*)calloc((size_t)argc, sizeof(struct lookinfo)); if(!lookups) { printf("out of memory\n"); return 1; } /* perform asynchronous calls */ num_wait = argc; for(i=0; i<argc; i++) { lookups[i].name = argv[i]; if(blocking) { fprintf(stderr, "lookup %s\n", argv[i]); r = ub_resolve(ctx, argv[i], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &lookups[i].result); checkerr("ub_resolve", r); } else { fprintf(stderr, "start async lookup %s\n", argv[i]); r = ub_resolve_async(ctx, argv[i], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &lookups[i], &lookup_is_done, &lookups[i].async_id); checkerr("ub_resolve_async", r); } } if(blocking) num_wait = 0; else if(cancel) { for(i=0; i<argc; i++) { fprintf(stderr, "cancel %s\n", argv[i]); r = ub_cancel(ctx, lookups[i].async_id); if(r != UB_NOID) checkerr("ub_cancel", r); } num_wait = 0; } /* wait while the hostnames are looked up. Do something useful here */ if(num_wait > 0) for(i=0; i<1000; i++) { usleep(100000); fprintf(stderr, "%g seconds passed\n", 0.1*(double)i); r = ub_process(ctx); checkerr("ub_process", r); if(num_wait == 0) break; } if(i>=999) { printf("timed out\n"); return 0; } printf("lookup complete\n"); /* print lookup results */ for(i=0; i<argc; i++) { print_result(&lookups[i]); ub_resolve_free(lookups[i].result); } ub_ctx_delete(ctx); free(lookups); checklock_stop(); return 0; }
/** extended thread worker */ static void* ext_thread(void* arg) { struct ext_thr_info* inf = (struct ext_thr_info*)arg; int i, r; struct ub_result* result; struct track_id* async_ids = NULL; log_thread_set(&inf->thread_num); if(inf->thread_num > NUMTHR*2/3) { async_ids = (struct track_id*)calloc((size_t)inf->numq, sizeof(struct track_id)); if(!async_ids) { printf("out of memory\n"); exit(1); } for(i=0; i<inf->numq; i++) { lock_basic_init(&async_ids[i].lock); } } for(i=0; i<inf->numq; i++) { if(async_ids) { r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &async_ids[i], ext_callback, &async_ids[i].id); checkerr("ub_resolve_async", r); if(i > 100) { lock_basic_lock(&async_ids[i-100].lock); r = ub_cancel(inf->ctx, async_ids[i-100].id); if(r != UB_NOID) async_ids[i-100].cancel=1; lock_basic_unlock(&async_ids[i-100].lock); if(r != UB_NOID) checkerr("ub_cancel", r); } } else if(inf->thread_num > NUMTHR/2) { /* async */ r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, NULL, ext_callback, NULL); checkerr("ub_resolve_async", r); } else { /* blocking */ r = ub_resolve(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &result); ext_check_result("ub_resolve", r, result); ub_resolve_free(result); } } if(inf->thread_num > NUMTHR/2) { r = ub_wait(inf->ctx); checkerr("ub_ctx_wait", r); } /* if these locks are destroyed, or if the async_ids is freed, then a use-after-free happens in another thread. The allocation is only part of this test, though. */ /* if(async_ids) { for(i=0; i<inf->numq; i++) { lock_basic_destroy(&async_ids[i].lock); } } free(async_ids); */ return NULL; }