/* * scamper_task_alloc * * allocate and initialise a task object. */ scamper_task_t *scamper_task_alloc(void *data, scamper_task_funcs_t *funcs) { scamper_task_t *task; assert(data != NULL); assert(funcs != NULL); if((task = malloc_zero(sizeof(scamper_task_t))) == NULL) { printerror(errno, strerror, __func__, "could not malloc task"); goto err; } if((task->queue = scamper_queue_alloc(task)) == NULL) goto err; if((task->siglist = slist_alloc()) == NULL) goto err; task->funcs = funcs; task->data = data; return task; err: scamper_task_free(task); return NULL; }
/* * do_infile * * read the contents of the infile in one hit. */ static int do_infile(void) { if((commands = slist_alloc()) == NULL) { fprintf(stderr, "could not alloc commands list\n"); return -1; } if(infile == NULL) return 0; return file_lines(infile, command_new, NULL); }
void slist_init(struct simple_list* slist, size_t key_size, size_t element_size, size_t initial_size) { slist->key = NULL; slist->value = NULL; slist->size = 0; slist->capacity = 0; slist->key_size = key_size; slist->element_size = element_size; slist_alloc(slist, initial_size); #ifndef NVALGRIND VALGRIND_CREATE_MEMPOOL(slist->value, 0, 0); #endif }
void* slist_put(struct simple_list* slist, void* key) { if ( slist->size == slist->capacity ) { slist_alloc(slist, /* growth = */ slist->capacity); } unsigned int index = slist->size; slist->key[index] = key; slist->size++; void* ptr = slist_get(slist, index); #ifndef NVALGRIND VALGRIND_MEMPOOL_ALLOC(slist->value, ptr, slist->element_size); #endif return ptr; }
static int actual_restore(struct asfd *asfd, struct bu *bu, const char *manifest, regex_t *regex, int srestore, enum action act, struct sdirs *sdirs, enum cntr_status cntr_status, struct conf **cconfs) { int ret=-1; int do_restore_stream=1; // For out-of-sequence directory restoring so that the // timestamps come out right: struct slist *slist=NULL; if(linkhash_init() || !(slist=slist_alloc())) goto end; if(get_protocol(cconfs)==PROTO_2) { switch(maybe_restore_spool(asfd, manifest, sdirs, bu, srestore, regex, cconfs, slist, act, cntr_status)) { case 1: do_restore_stream=0; break; case 0: do_restore_stream=1; break; default: goto end; // Error; } } if(do_restore_stream && restore_stream(asfd, sdirs, slist, bu, manifest, regex, srestore, cconfs, act, cntr_status)) goto end; if(restore_remaining_dirs(asfd, bu, slist, act, sdirs, cntr_status, cconfs)) goto end; // Restore has nearly completed OK. ret=restore_end(asfd, cconfs); cntr_print(get_cntr(cconfs), act); cntr_stats_to_file(get_cntr(cconfs), bu->path, act, cconfs); end: slist_free(&slist); linkhash_free(); return ret; }
/* * scamper_writebuf_alloc * */ scamper_writebuf_t *scamper_writebuf_alloc(void) { scamper_writebuf_t *wb = NULL; if((wb = malloc_zero(sizeof(scamper_writebuf_t))) == NULL) { goto err; } if((wb->iovs = slist_alloc()) == NULL) { goto err; } return wb; err: scamper_writebuf_free(wb); return NULL; }
static #endif int do_backup_phase2_server_protocol2(struct async *as, struct asfd *chfd, struct sdirs *sdirs, int resume, struct conf **confs) { int ret=-1; uint8_t end_flags=0; struct slist *slist=NULL; struct iobuf wbuf; struct dpth *dpth=NULL; man_off_t *p1pos=NULL; struct manios *manios=NULL; // This is used to tell the client that a number of consecutive blocks // have been found and can be freed. uint64_t wrap_up=0; struct asfd *asfd=NULL; struct cntr *cntr=NULL; struct sbuf *csb=NULL; uint64_t file_no=1; if(!as) { logp("async not provided to %s()\n", __func__); goto end; } if(!sdirs) { logp("sdirs not provided to %s()\n", __func__); goto end; } if(!confs) { logp("confs not provided to %s()\n", __func__); goto end; } asfd=as->asfd; if(!asfd) { logp("asfd not provided to %s()\n", __func__); goto end; } if(!chfd) { logp("chfd not provided to %s()\n", __func__); goto end; } cntr=get_cntr(confs); if(get_int(confs[OPT_BREAKPOINT])>=2000 && get_int(confs[OPT_BREAKPOINT])<3000) { breaking=get_int(confs[OPT_BREAKPOINT]); breakcount=breaking-2000; } logp("Phase 2 begin (recv backup data)\n"); if(!(dpth=dpth_alloc()) || dpth_protocol2_init(dpth, sdirs->data, get_string(confs[OPT_CNAME]), sdirs->cfiles, get_int(confs[OPT_MAX_STORAGE_SUBDIRS]))) goto end; if(resume && !(p1pos=do_resume(sdirs, dpth, confs))) goto end; if(!(manios=manios_open_phase2(sdirs, p1pos, PROTO_2)) || !(slist=slist_alloc()) || !(csb=sbuf_alloc(PROTO_2))) goto end; iobuf_free_content(asfd->rbuf); memset(&wbuf, 0, sizeof(struct iobuf)); while(!(end_flags&END_BACKUP)) { if(maybe_add_from_scan(manios, slist, chfd, &csb)) goto end; if(!wbuf.len) { if(get_wbuf_from_sigs(&wbuf, slist, &end_flags)) goto end; if(!wbuf.len) { get_wbuf_from_files(&wbuf, slist, manios, &end_flags, &file_no); } } if(wbuf.len && asfd->append_all_to_write_buffer(asfd, &wbuf)==APPEND_ERROR) goto end; if(append_for_champ_chooser(chfd, slist->blist, end_flags)) goto end; if(as->read_write(as)) { logp("error from as->read_write in %s\n", __func__); goto end; } while(asfd->rbuf->buf) { if(deal_with_read(asfd->rbuf, slist, cntr, &end_flags, dpth)) goto end; // Get as much out of the readbuf as possible. if(asfd->parse_readbuf(asfd)) goto end; } while(chfd->rbuf->buf) { if(deal_with_read_from_chfd(chfd, slist->blist, &wrap_up, dpth, cntr)) goto end; // Get as much out of the readbuf as possible. if(chfd->parse_readbuf(chfd)) goto end; } if(write_to_changed_file(asfd, chfd, manios, slist, end_flags)) goto end; } // Hack: If there are some entries left after the last entry that // contains block data, it will not be written to the changed file // yet because the last entry of block data has not had // sb->protocol2->bend set. if(slist->head && slist->head->next) { struct sbuf *sb=NULL; sb=slist->head; slist->head=sb->next; sbuf_free(&sb); if(write_to_changed_file(asfd, chfd, manios, slist, end_flags)) goto end; } if(manios_close(&manios)) goto end; if(check_for_missing_work_in_slist(slist)) goto end; // Need to release the last left. There should be one at most. if(dpth->head && dpth->head->next) { logp("ERROR: More data locks remaining after: %s\n", dpth->head->save_path); goto end; } if(dpth_release_all(dpth)) goto end; ret=0; end: logp("End backup\n"); sbuf_free(&csb); slist_free(&slist); if(asfd) iobuf_free_content(asfd->rbuf); if(chfd) iobuf_free_content(chfd->rbuf); dpth_free(&dpth); manios_close(&manios); man_off_t_free(&p1pos); return ret; }
int backup_phase2_server(struct async *as, struct sdirs *sdirs, const char *manifest_dir, int resume, struct conf *conf) { int ret=-1; int sigs_end=0; int backup_end=0; int requests_end=0; int blk_requests_end=0; struct slist *slist=NULL; struct blist *blist=NULL; struct iobuf *wbuf=NULL; struct dpth *dpth=NULL; struct manio *cmanio=NULL; // current manifest struct manio *p1manio=NULL; // phase1 scan manifest struct manio *chmanio=NULL; // changed manifest struct manio *unmanio=NULL; // unchanged manifest // This is used to tell the client that a number of consecutive blocks // have been found and can be freed. uint64_t wrap_up=0; // Main fd is first in the list. struct asfd *asfd=as->asfd; // Champ chooser fd is second in the list. struct asfd *chfd=asfd->next; logp("Phase 2 begin (recv backup data)\n"); //if(champ_chooser_init(sdirs->data, conf) if(!(cmanio=manio_alloc()) || !(p1manio=manio_alloc()) || !(chmanio=manio_alloc()) || !(unmanio=manio_alloc()) || manio_init_read(cmanio, sdirs->cmanifest) || manio_init_read(p1manio, sdirs->phase1data) || manio_init_write(chmanio, sdirs->changed) || manio_init_write(unmanio, sdirs->unchanged) || !(slist=slist_alloc()) || !(blist=blist_alloc()) || !(wbuf=iobuf_alloc()) || !(dpth=dpth_alloc(sdirs->data)) || dpth_init(dpth)) goto end; // The phase1 manifest looks the same as a burp1 one. manio_set_protocol(p1manio, PROTO_BURP1); while(!backup_end) { if(maybe_add_from_scan(asfd, p1manio, cmanio, unmanio, slist, conf)) goto end; if(!wbuf->len) { if(get_wbuf_from_sigs(wbuf, slist, blist, sigs_end, &blk_requests_end, dpth, conf)) goto end; if(!wbuf->len) { get_wbuf_from_files(wbuf, slist, p1manio, &requests_end); } } if(wbuf->len) asfd->append_all_to_write_buffer(asfd, wbuf); append_for_champ_chooser(chfd, blist, sigs_end); if(as->read_write(as)) { logp("error in %s\n", __func__); goto end; } while(asfd->rbuf->buf) { if(deal_with_read(asfd->rbuf, slist, blist, conf, &sigs_end, &backup_end, dpth)) goto end; // Get as much out of the // readbuf as possible. if(asfd->parse_readbuf(asfd)) goto end; } while(chfd->rbuf->buf) { if(deal_with_read_from_chfd(asfd, chfd, blist, &wrap_up, dpth)) goto end; // Get as much out of the // readbuf as possible. if(chfd->parse_readbuf(chfd)) goto end; } if(write_to_changed_file(asfd, chfd, chmanio, slist, blist, dpth, backup_end, conf)) goto end; } // Hack: If there are some entries left after the last entry that // contains block data, it will not be written to the changed file // yet because the last entry of block data has not had // sb->burp2->bend set. if(slist->head && slist->head->next) { slist->head=slist->head->next; if(write_to_changed_file(asfd, chfd, chmanio, slist, blist, dpth, backup_end, conf)) goto end; } if(manio_close(unmanio) || manio_close(chmanio)) goto end; if(blist->head) { logp("ERROR: finishing but still want block: %lu\n", blist->head->index); goto end; } // Need to release the last left. There should be one at most. if(dpth->head && dpth->head->next) { logp("ERROR: More data locks remaining after: %s\n", dpth->head->save_path); goto end; } if(dpth_release_all(dpth)) goto end; ret=0; end: logp("End backup\n"); slist_free(slist); blist_free(blist); iobuf_free_content(asfd->rbuf); iobuf_free_content(chfd->rbuf); // Write buffer did not allocate 'buf'. if(wbuf) wbuf->buf=NULL; iobuf_free(wbuf); dpth_release_all(dpth); dpth_free(&dpth); manio_free(&cmanio); manio_free(&p1manio); manio_free(&chmanio); manio_free(&unmanio); return ret; }
int backup_phase2_client_protocol2(struct asfd *asfd, struct conf **confs, int resume) { int ret=-1; uint8_t end_flags=0; struct slist *slist=NULL; struct iobuf *rbuf=NULL; struct iobuf *wbuf=NULL; struct cntr *cntr=NULL; if(confs) cntr=get_cntr(confs); if(!asfd || !asfd->as) { logp("%s() called without async structs!\n", __func__); goto end; } logp("Phase 2 begin (send backup data)\n"); logfmt("\n"); if(!(slist=slist_alloc()) || !(wbuf=iobuf_alloc()) || blks_generate_init()) goto end; rbuf=asfd->rbuf; if(!resume) { // Only do this bit if the server did not tell us to resume. if(asfd->write_str(asfd, CMD_GEN, "backupphase2") || asfd_read_expect(asfd, CMD_GEN, "ok")) goto end; } else { // On resume, the server might update the client with cntr. if(cntr_recv(asfd, confs)) goto end; } while(!(end_flags&END_BACKUP)) { if(!wbuf->len) { get_wbuf_from_data(confs, wbuf, slist, end_flags); if(!wbuf->len) { if(get_wbuf_from_blks(wbuf, slist, &end_flags)) goto end; } } if(wbuf->len) { if(asfd->append_all_to_write_buffer(asfd, wbuf) ==APPEND_ERROR) goto end; } if(asfd->as->read_write(asfd->as)) { logp("error in %s\n", __func__); goto end; } if(rbuf->buf && deal_with_read(rbuf, slist, cntr, &end_flags)) goto end; if(slist->head // Need to limit how many blocks are allocated at once. && (!slist->blist->head || slist->blist->tail->index - slist->blist->head->index<BLKS_MAX_IN_MEM) ) { if(add_to_blks_list(asfd, confs, slist)) goto end; } if(end_flags&END_BLK_REQUESTS) { // If got to the end of the file request list // and the last block of the last file, and // the write buffer is empty, we got to the end. if(slist->head==slist->tail) { if(!slist->tail || slist->blist->last_sent== slist->tail->protocol2->bend) { if(!wbuf->len) break; } } } } if(asfd->write_str(asfd, CMD_GEN, "backup_end")) goto end; ret=0; end: slist_free(&slist); blks_generate_free(); if(wbuf) { // Write buffer did not allocate 'buf'. wbuf->buf=NULL; iobuf_free(&wbuf); } cntr_print_end(cntr); cntr_print(cntr, ACTION_BACKUP, asfd); if(ret) logp("Error in backup\n"); logp("End backup\n"); return ret; }
void configs_init(void) { configs = slist_alloc(sizeof(struct ConfigItem)); }
int backup_phase2_client_burp2(struct asfd *asfd, struct conf *conf, int resume) { int ret=-1; int sigs_end=0; int backup_end=0; int requests_end=0; int blk_requests_end=0; struct win *win=NULL; // Rabin sliding window. struct slist *slist=NULL; struct blist *blist=NULL; struct iobuf *rbuf=NULL; struct iobuf *wbuf=NULL; logp("Phase 2 begin (send backup data)\n"); if(!(slist=slist_alloc()) || !(blist=blist_alloc()) || !(wbuf=iobuf_alloc()) || blks_generate_init(conf) || !(win=win_alloc(&conf->rconf))) goto end; rbuf=asfd->rbuf; if(!resume) { // Only do this bit if the server did not tell us to resume. if(asfd->write_str(asfd, CMD_GEN, "backupphase2") || asfd->read_expect(asfd, CMD_GEN, "ok")) goto end; } else if(conf->send_client_cntr) { // On resume, the server might update the client with the // counters. if(cntr_recv(asfd, conf)) goto end; } while(!backup_end) { if(!wbuf->len) { get_wbuf_from_data(conf, wbuf, slist, blist, blk_requests_end); if(!wbuf->len) { get_wbuf_from_blks(wbuf, slist, requests_end, &sigs_end); } } if(wbuf->len) asfd->append_all_to_write_buffer(asfd, wbuf); if(asfd->as->read_write(asfd->as)) { logp("error in %s\n", __func__); goto end; } if(rbuf->buf && deal_with_read(rbuf, slist, blist, conf, &backup_end, &requests_end, &blk_requests_end)) goto end; if(slist->head // Need to limit how many blocks are allocated at once. && (!blist->head || blist->tail->index - blist->head->index<BLKS_MAX_IN_MEM) ) { if(add_to_blks_list(asfd, conf, slist, blist, win)) goto end; } if(blk_requests_end) { // If got to the end of the file request list // and the last block of the last file, and // the write buffer is empty, we got to the end. if(slist->head==slist->tail) { if(!slist->tail || blist->last_sent==slist->tail->burp2->bend) { if(!wbuf->len) break; } } } } if(asfd->write_str(asfd, CMD_GEN, "backup_end")) goto end; ret=0; end: blk_print_alloc_stats(); //sbuf_print_alloc_stats(); win_free(win); slist_free(&slist); blist_free(&blist); // Write buffer did not allocate 'buf'. wbuf->buf=NULL; iobuf_free(&wbuf); cntr_print_end(conf->cntr); cntr_print(conf->cntr, ACTION_BACKUP); if(ret) logp("Error in backup\n"); logp("End backup\n"); return ret; }