static struct asfd *asfd_setup(const char *outputpath) { struct asfd *asfd; fail_unless((asfd=asfd_alloc())!=NULL); fail_unless((asfd->rbuf=iobuf_alloc())!=NULL); asfd->write=my_asfd_write; fail_unless(!build_path_w(outputpath)); fail_unless((output=fzp_open(outputpath, "wb"))!=NULL); json_set_pretty_print(1); return asfd; }
int write_status(enum cntr_status cntr_status, const char *path, struct cntr *cntr) { time_t now=0; time_t diff=0; static time_t lasttime=0; static size_t l=0; static struct iobuf *wbuf=NULL; if(!wasfd) return 0; if(!cntr || !cntr->bno) return 0; // Only update every 2 seconds. now=time(NULL); diff=now-lasttime; if(diff<2) { // Might as well do this in case they fiddled their // clock back in time. if(diff<0) lasttime=now; return 0; } lasttime=now; // Only get a new string if we did not manage to write the previous // one. if(!l) { cntr->cntr_status=cntr_status; if(!(l=cntr_to_str(cntr, path))) goto error; if(!wbuf && !(wbuf=iobuf_alloc())) goto error; iobuf_set(wbuf, CMD_APPEND, cntr->str, l); } switch(wasfd->append_all_to_write_buffer(wasfd, wbuf)) { case APPEND_OK: l=0; // Fall through. case APPEND_BLOCKED: return 0; default: break; } error: iobuf_free(&wbuf); return -1; }
struct asfd *asfd_mock_setup(struct ioevent_list *user_reads, struct ioevent_list *user_writes) { struct asfd *asfd=NULL; fail_unless((asfd=asfd_alloc())!=NULL); fail_unless((asfd->rbuf=iobuf_alloc())!=NULL); asfd->read=mock_asfd_read; asfd->write=do_asfd_assert_write; asfd->write_str=mock_asfd_assert_write_str; asfd->append_all_to_write_buffer= mock_asfd_assert_append_all_to_write_buffer; asfd->parse_readbuf=mock_parse_readbuf; asfd->simple_loop=asfd_simple_loop; ioevent_list_init(user_reads); ioevent_list_init(user_writes); asfd->data1=(void *)user_reads; asfd->data2=(void *)user_writes; return asfd; };
static int append_for_champ_chooser(struct asfd *chfd, struct blist *blist, int sigs_end) { static int finished_sending=0; static struct iobuf *wbuf=NULL; if(!wbuf) { if(!(wbuf=iobuf_alloc()) || !(wbuf->buf=(char *)malloc_w(128, __func__))) return -1; wbuf->cmd=CMD_SIG; } while(blist->blk_for_champ_chooser) { // FIX THIS: This should not need to be done quite like this. // Make weak/strong into uint64 and uint8_t array, then // send them unconverted. wbuf->len=snprintf(wbuf->buf, 128, #ifdef HAVE_WIN32 "%016I64X%s", #else "%016lX%s", #endif blist->blk_for_champ_chooser->fingerprint, bytes_to_md5str(blist->blk_for_champ_chooser->md5sum)); if(chfd->append_all_to_write_buffer(chfd, wbuf)) return 0; // Try again later. blist->blk_for_champ_chooser=blist->blk_for_champ_chooser->next; } if(sigs_end && !finished_sending && !blist->blk_for_champ_chooser) { wbuf->cmd=CMD_GEN; wbuf->len=snprintf(wbuf->buf, 128, "%s", "sigs_end"); if(chfd->append_all_to_write_buffer(chfd, wbuf)) return 0; // Try again later. finished_sending++; } return 0; }
int backup_phase2_server(struct async *as, struct sdirs *sdirs, const char *manifest_dir, int resume, struct conf *conf) { int ret=-1; int sigs_end=0; int backup_end=0; int requests_end=0; int blk_requests_end=0; struct slist *slist=NULL; struct blist *blist=NULL; struct iobuf *wbuf=NULL; struct dpth *dpth=NULL; struct manio *cmanio=NULL; // current manifest struct manio *p1manio=NULL; // phase1 scan manifest struct manio *chmanio=NULL; // changed manifest struct manio *unmanio=NULL; // unchanged manifest // This is used to tell the client that a number of consecutive blocks // have been found and can be freed. uint64_t wrap_up=0; // Main fd is first in the list. struct asfd *asfd=as->asfd; // Champ chooser fd is second in the list. struct asfd *chfd=asfd->next; logp("Phase 2 begin (recv backup data)\n"); //if(champ_chooser_init(sdirs->data, conf) if(!(cmanio=manio_alloc()) || !(p1manio=manio_alloc()) || !(chmanio=manio_alloc()) || !(unmanio=manio_alloc()) || manio_init_read(cmanio, sdirs->cmanifest) || manio_init_read(p1manio, sdirs->phase1data) || manio_init_write(chmanio, sdirs->changed) || manio_init_write(unmanio, sdirs->unchanged) || !(slist=slist_alloc()) || !(blist=blist_alloc()) || !(wbuf=iobuf_alloc()) || !(dpth=dpth_alloc(sdirs->data)) || dpth_init(dpth)) goto end; // The phase1 manifest looks the same as a burp1 one. manio_set_protocol(p1manio, PROTO_BURP1); while(!backup_end) { if(maybe_add_from_scan(asfd, p1manio, cmanio, unmanio, slist, conf)) goto end; if(!wbuf->len) { if(get_wbuf_from_sigs(wbuf, slist, blist, sigs_end, &blk_requests_end, dpth, conf)) goto end; if(!wbuf->len) { get_wbuf_from_files(wbuf, slist, p1manio, &requests_end); } } if(wbuf->len) asfd->append_all_to_write_buffer(asfd, wbuf); append_for_champ_chooser(chfd, blist, sigs_end); if(as->read_write(as)) { logp("error in %s\n", __func__); goto end; } while(asfd->rbuf->buf) { if(deal_with_read(asfd->rbuf, slist, blist, conf, &sigs_end, &backup_end, dpth)) goto end; // Get as much out of the // readbuf as possible. if(asfd->parse_readbuf(asfd)) goto end; } while(chfd->rbuf->buf) { if(deal_with_read_from_chfd(asfd, chfd, blist, &wrap_up, dpth)) goto end; // Get as much out of the // readbuf as possible. if(chfd->parse_readbuf(chfd)) goto end; } if(write_to_changed_file(asfd, chfd, chmanio, slist, blist, dpth, backup_end, conf)) goto end; } // Hack: If there are some entries left after the last entry that // contains block data, it will not be written to the changed file // yet because the last entry of block data has not had // sb->burp2->bend set. if(slist->head && slist->head->next) { slist->head=slist->head->next; if(write_to_changed_file(asfd, chfd, chmanio, slist, blist, dpth, backup_end, conf)) goto end; } if(manio_close(unmanio) || manio_close(chmanio)) goto end; if(blist->head) { logp("ERROR: finishing but still want block: %lu\n", blist->head->index); goto end; } // Need to release the last left. There should be one at most. if(dpth->head && dpth->head->next) { logp("ERROR: More data locks remaining after: %s\n", dpth->head->save_path); goto end; } if(dpth_release_all(dpth)) goto end; ret=0; end: logp("End backup\n"); slist_free(slist); blist_free(blist); iobuf_free_content(asfd->rbuf); iobuf_free_content(chfd->rbuf); // Write buffer did not allocate 'buf'. if(wbuf) wbuf->buf=NULL; iobuf_free(wbuf); dpth_release_all(dpth); dpth_free(&dpth); manio_free(&cmanio); manio_free(&p1manio); manio_free(&chmanio); manio_free(&unmanio); return ret; }
static int write_to_changed_file(struct asfd *asfd, struct asfd *chfd, struct manio *chmanio, struct slist *slist, struct blist *blist, struct dpth *dpth, int backup_end, struct conf *conf) { struct sbuf *sb; static struct iobuf *wbuf=NULL; if(!slist) return 0; if(!wbuf && !(wbuf=iobuf_alloc())) return -1; while((sb=slist->head)) { if(sb->flags & SBUF_NEED_DATA) { int hack=0; // Need data... struct blk *blk; if(!(sb->flags & SBUF_HEADER_WRITTEN_TO_MANIFEST)) { if(manio_write_sbuf(chmanio, sb)) return -1; sb->flags |= SBUF_HEADER_WRITTEN_TO_MANIFEST; } while((blk=sb->burp2->bstart) && blk->got==BLK_GOT && (blk->next || backup_end)) { if(*(blk->save_path)) { if(manio_write_sig_and_path(chmanio, blk)) return -1; if(chmanio->sig_count==0) { // Have finished a manifest // file. Want to start using // it as a dedup candidate // now. iobuf_from_str(wbuf, CMD_MANIFEST, chmanio->fpath); printf("send manifest path\n"); if(chfd->write(chfd, wbuf)) return -1; if(!blk->requested) { // Also let the client know, // so that it can free memory // if there was a long // consecutive number of // unrequested blocks. get_wbuf_from_wrap_up(wbuf, blk->index); if(asfd->write(asfd, wbuf)) return -1; } } } /* else { // This gets hit if there is a zero // length file. printf("!!!!!!!!!!!!! no data; %s\n", sb->path); exit(1); } */ if(blk==sb->burp2->bend) { slist->head=sb->next; if(!(blist->head=sb->burp2->bstart)) blist->tail=NULL; sanity_before_sbuf_free(slist, sb); sbuf_free(&sb); hack=1; break; } if(sb->burp2->bsighead==sb->burp2->bstart) sb->burp2->bsighead=blk->next; sb->burp2->bstart=blk->next; if(blk==blist->blk_from_champ_chooser) blist->blk_from_champ_chooser=blk->next; //printf("freeing blk %d\n", blk->index); blk_free(&blk); } if(hack) continue; if(!(blist->head=sb->burp2->bstart)) blist->tail=NULL; break; } else { // No change, can go straight in. if(manio_write_sbuf(chmanio, sb)) return -1; // Move along. slist->head=sb->next; sanity_before_sbuf_free(slist, sb); sbuf_free(&sb); } } return 0; }
int backup_phase2_client_protocol2(struct asfd *asfd, struct conf **confs, int resume) { int ret=-1; uint8_t end_flags=0; struct slist *slist=NULL; struct iobuf *rbuf=NULL; struct iobuf *wbuf=NULL; struct cntr *cntr=NULL; if(confs) cntr=get_cntr(confs); if(!asfd || !asfd->as) { logp("%s() called without async structs!\n", __func__); goto end; } logp("Phase 2 begin (send backup data)\n"); logfmt("\n"); if(!(slist=slist_alloc()) || !(wbuf=iobuf_alloc()) || blks_generate_init()) goto end; rbuf=asfd->rbuf; if(!resume) { // Only do this bit if the server did not tell us to resume. if(asfd->write_str(asfd, CMD_GEN, "backupphase2") || asfd_read_expect(asfd, CMD_GEN, "ok")) goto end; } else { // On resume, the server might update the client with cntr. if(cntr_recv(asfd, confs)) goto end; } while(!(end_flags&END_BACKUP)) { if(!wbuf->len) { get_wbuf_from_data(confs, wbuf, slist, end_flags); if(!wbuf->len) { if(get_wbuf_from_blks(wbuf, slist, &end_flags)) goto end; } } if(wbuf->len) { if(asfd->append_all_to_write_buffer(asfd, wbuf) ==APPEND_ERROR) goto end; } if(asfd->as->read_write(asfd->as)) { logp("error in %s\n", __func__); goto end; } if(rbuf->buf && deal_with_read(rbuf, slist, cntr, &end_flags)) goto end; if(slist->head // Need to limit how many blocks are allocated at once. && (!slist->blist->head || slist->blist->tail->index - slist->blist->head->index<BLKS_MAX_IN_MEM) ) { if(add_to_blks_list(asfd, confs, slist)) goto end; } if(end_flags&END_BLK_REQUESTS) { // If got to the end of the file request list // and the last block of the last file, and // the write buffer is empty, we got to the end. if(slist->head==slist->tail) { if(!slist->tail || slist->blist->last_sent== slist->tail->protocol2->bend) { if(!wbuf->len) break; } } } } if(asfd->write_str(asfd, CMD_GEN, "backup_end")) goto end; ret=0; end: slist_free(&slist); blks_generate_free(); if(wbuf) { // Write buffer did not allocate 'buf'. wbuf->buf=NULL; iobuf_free(&wbuf); } cntr_print_end(cntr); cntr_print(cntr, ACTION_BACKUP, asfd); if(ret) logp("Error in backup\n"); logp("End backup\n"); return ret; }
/* * The buf is already using BUF for an output buffer, and probably * contains some buffered output now. Write this out to F, and reset * the buffer cursor. */ rs_result rs_outfilebuf_drain(rs_job_t *job, rs_buffers_t *buf, void *opaque) { rs_filebuf_t *fb=(rs_filebuf_t *)opaque; int fd=fb->fd; size_t wlen; //logp("in rs_outfilebuf_drain\n"); /* This is only allowed if either the buf has no output buffer * yet, or that buffer could possibly be BUF. */ if(!buf->next_out) { if(buf->avail_out) { logp("buf->avail_out is %d in %s\n", buf->avail_out, __func__); return RS_IO_ERROR; } buf->next_out = fb->buf; buf->avail_out = fb->buf_len; return RS_DONE; } if(buf->avail_out > fb->buf_len) { logp("buf->avail_out > fb->buf_len (%d > %d) in %s\n", buf->avail_out, fb->buf_len, __func__); return RS_IO_ERROR; } if(buf->next_out < fb->buf) { logp("buf->next_out < fb->buf (%p < %p) in %s\n", buf->next_out, fb->buf, __func__); return RS_IO_ERROR; } if(buf->next_out > fb->buf + fb->buf_len) { logp("buf->next_out > fb->buf + fb->buf_len in %s\n", __func__); return RS_IO_ERROR; } if((wlen=buf->next_out-fb->buf)>0) { //logp("wlen: %d\n", wlen); if(fd>0) { size_t w=wlen; static struct iobuf *wbuf=NULL; if(!wbuf && !(wbuf=iobuf_alloc())) return RS_IO_ERROR; wbuf->cmd=CMD_APPEND; wbuf->buf=fb->buf; wbuf->len=wlen; switch(fb->asfd->append_all_to_write_buffer( fb->asfd, wbuf)) { case APPEND_OK: break; case APPEND_BLOCKED: return RS_BLOCKED; case APPEND_ERROR: default: return RS_IO_ERROR; } fb->bytes+=w; } else { size_t result=0; result=fzp_write(fb->fzp, fb->buf, wlen); if(wlen!=result) { logp("error draining buf to file: %s", strerror(errno)); return RS_IO_ERROR; } } } buf->next_out = fb->buf; buf->avail_out = fb->buf_len; return RS_DONE; }
// Return p1manio position. static man_off_t *do_resume_work(struct sdirs *sdirs, struct dpth *dpth, struct conf **cconfs) { man_off_t *pos=NULL; man_off_t *p1pos=NULL; struct iobuf *chb=NULL; struct manio *cmanio=NULL; struct manio *umanio=NULL; struct manio *p1manio=NULL; enum protocol protocol=get_protocol(cconfs); struct cntr *cntr=get_cntr(cconfs); int compression=get_int(cconfs[OPT_COMPRESSION]); if(!(p1manio=manio_open_phase1(sdirs->phase1data, "rb", protocol)) || !(cmanio=manio_open_phase2(sdirs->changed, "rb", protocol)) || !(umanio=manio_open_phase2(sdirs->unchanged, "rb", protocol))) goto end; if(!(chb=iobuf_alloc())) return NULL; logp("Setting up resume positions...\n"); if(get_last_good_entry(cmanio, chb, cntr, dpth, protocol, &pos)) goto error; if(manio_close_and_truncate(&cmanio, pos, compression)) goto error; man_off_t_free(&pos); if(chb->buf) { logp(" last good entry: %s\n", chb->buf); // Now need to go to the appropriate places in p1manio and // unchanged. if(forward_past_entry(p1manio, chb, protocol, &p1pos)) goto error; // The unchanged file needs to be positioned just before the // found entry, otherwise it ends up having a duplicated entry. if(forward_before_entry(umanio, chb, cntr, dpth, protocol, &pos)) goto error; if(manio_close_and_truncate(&umanio, pos, compression)) goto error; man_off_t_free(&pos); } else { logp(" nothing previously transferred\n"); if(!(p1pos=manio_tell(p1manio))) goto error; if(!(pos=manio_tell(umanio))) goto error; if(manio_close_and_truncate(&umanio, pos, compression)) goto error; } // Now should have all file pointers in the right places to resume. goto end; error: man_off_t_free(&p1pos); end: iobuf_free(&chb); man_off_t_free(&pos); manio_close(&p1manio); manio_close(&cmanio); manio_close(&umanio); return p1pos; }
int backup_phase2_client_burp2(struct asfd *asfd, struct conf *conf, int resume) { int ret=-1; int sigs_end=0; int backup_end=0; int requests_end=0; int blk_requests_end=0; struct win *win=NULL; // Rabin sliding window. struct slist *slist=NULL; struct blist *blist=NULL; struct iobuf *rbuf=NULL; struct iobuf *wbuf=NULL; logp("Phase 2 begin (send backup data)\n"); if(!(slist=slist_alloc()) || !(blist=blist_alloc()) || !(wbuf=iobuf_alloc()) || blks_generate_init(conf) || !(win=win_alloc(&conf->rconf))) goto end; rbuf=asfd->rbuf; if(!resume) { // Only do this bit if the server did not tell us to resume. if(asfd->write_str(asfd, CMD_GEN, "backupphase2") || asfd->read_expect(asfd, CMD_GEN, "ok")) goto end; } else if(conf->send_client_cntr) { // On resume, the server might update the client with the // counters. if(cntr_recv(asfd, conf)) goto end; } while(!backup_end) { if(!wbuf->len) { get_wbuf_from_data(conf, wbuf, slist, blist, blk_requests_end); if(!wbuf->len) { get_wbuf_from_blks(wbuf, slist, requests_end, &sigs_end); } } if(wbuf->len) asfd->append_all_to_write_buffer(asfd, wbuf); if(asfd->as->read_write(asfd->as)) { logp("error in %s\n", __func__); goto end; } if(rbuf->buf && deal_with_read(rbuf, slist, blist, conf, &backup_end, &requests_end, &blk_requests_end)) goto end; if(slist->head // Need to limit how many blocks are allocated at once. && (!blist->head || blist->tail->index - blist->head->index<BLKS_MAX_IN_MEM) ) { if(add_to_blks_list(asfd, conf, slist, blist, win)) goto end; } if(blk_requests_end) { // If got to the end of the file request list // and the last block of the last file, and // the write buffer is empty, we got to the end. if(slist->head==slist->tail) { if(!slist->tail || blist->last_sent==slist->tail->burp2->bend) { if(!wbuf->len) break; } } } } if(asfd->write_str(asfd, CMD_GEN, "backup_end")) goto end; ret=0; end: blk_print_alloc_stats(); //sbuf_print_alloc_stats(); win_free(win); slist_free(&slist); blist_free(&blist); // Write buffer did not allocate 'buf'. wbuf->buf=NULL; iobuf_free(&wbuf); cntr_print_end(conf->cntr); cntr_print(conf->cntr, ACTION_BACKUP); if(ret) logp("Error in backup\n"); logp("End backup\n"); return ret; }