ex_off_t _lru_attempt_free_mem(cache_t *c, segment_t *page_seg, ex_off_t bytes_to_free) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; segment_t *pseg; cache_page_t *p; page_lru_t *lp; Stack_ele_t *ele; op_generic_t *gop; opque_t *q; ex_off_t total_bytes, freed_bytes, pending_bytes, *poff; ex_off_t *segid; ex_off_t min_off, max_off; list_iter_t sit; int count, bits, cw, flush_count; list_t *table; page_table_t *ptable; pigeon_coop_hole_t pch, pt_pch; log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(page_seg), bytes_to_free, cp->bytes_used, stack_size(cp->stack)); freed_bytes = 0; pending_bytes = 0; total_bytes = 0; //** cache_lock(c) is already acquired pch = reserve_pigeon_coop_hole(cp->free_pending_tables); table = *(list_t **)pigeon_coop_hole_data(&pch); //** Get the list of pages to free move_to_bottom(cp->stack); ele = stack_unlink_current(cp->stack, 1); while ((total_bytes < bytes_to_free) && (ele != NULL)) { p = (cache_page_t *)get_stack_ele_data(ele); lp = (page_lru_t *)p->priv; bits = atomic_get(p->bit_fields); log_printf(15, "checking page for release seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); flush_log(); if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal ptable = (page_table_t *)list_search(table, (list_key_t *)&(segment_id(p->seg))); if (ptable == NULL) { //** Have to make a new segment entry pt_pch = reserve_pigeon_coop_hole(cp->free_page_tables); ptable = (page_table_t *)pigeon_coop_hole_data(&pt_pch); ptable->seg = p->seg; ptable->id = segment_id(p->seg); ptable->pch = pt_pch; list_insert(table, &(ptable->id), ptable); } cp->limbo_pages++; log_printf(15, "UNLINKING seg=" XIDT " p->offset=" XOT " bits=%d limbo=%d\n", segment_id(p->seg), p->offset, bits, cp->limbo_pages); atomic_inc(p->access_pending[CACHE_READ]); //** Do this so it's not accidentally deleted push(ptable->stack, p); s = (cache_segment_t *)p->seg->priv; total_bytes += s->page_size; free(lp->ele); lp->ele = NULL; //** Mark it as removed from the list so a page_release doesn't free also } if (total_bytes < bytes_to_free) ele = stack_unlink_current(cp->stack, 1); } if (total_bytes == 0) { //** Nothing to do so exit log_printf(15, "Nothing to do so exiting\n"); release_pigeon_coop_hole(cp->free_pending_tables, &pch); return(0); } cache_unlock(c); //** Don't need the cache lock for the next part q = new_opque(); opque_start_execution(q); //** Now cycle through the segments to be freed pending_bytes = 0; sit = list_iter_search(table, list_first_key(table), 0); list_next(&sit, (list_key_t **)&segid, (list_data_t **)&ptable); while (ptable != NULL) { //** Verify the segment is still valid. If not then just delete everything pseg = list_search(c->segments, segid); if (pseg != NULL) { segment_lock(ptable->seg); min_off = s->total_size; max_off = -1; s = (cache_segment_t *)ptable->seg->priv; while ((p = pop(ptable->stack)) != NULL) { atomic_dec(p->access_pending[CACHE_READ]); //** Removed my access control from earlier flush_count = atomic_get(p->access_pending[CACHE_FLUSH]); cw = atomic_get(p->access_pending[CACHE_WRITE]); count = atomic_get(p->access_pending[CACHE_READ]) + cw + flush_count; bits = atomic_get(p->bit_fields); if (count != 0) { //** Currently in use so wait for it to be released if (cw > 0) { //** Got writes so need to wait until they complete otherwise the page may not get released bits = bits | C_TORELEASE; //** Mark it for release atomic_set(p->bit_fields, bits); _cache_drain_writes(p->seg, p); //** Drain the write ops bits = atomic_get(p->bit_fields); //** Get the bit fields to see if it's dirty } if (flush_count == 0) { //** Make sure it's not already being flushed if ((bits & C_ISDIRTY) != 0) { //** Have to flush it don't have to track it cause the flush will do the release if (min_off > p->offset) min_off = p->offset; if (max_off < p->offset) max_off = p->offset; } } bits = bits | C_TORELEASE; log_printf(15, "in use tagging for release seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); atomic_set(p->bit_fields, bits); pending_bytes += s->page_size; } else { //** Not in use if ((bits & (C_ISDIRTY|C_EMPTY)) == 0) { //** Don't have to flush it just drop the page cp->limbo_pages--; log_printf(15, "FREEING page seg=" XIDT " p->offset=" XOT " bits=%d limbo=%d\n", segment_id(p->seg), p->offset, bits, cp->limbo_pages); list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); lp = (page_lru_t *)p->priv; free(lp); freed_bytes += s->page_size; } else { //** Got to flush the page first but don't have to track it cause the flush will do the release if (p->offset > -1) { //** Skip blank pages if (min_off > p->offset) min_off = p->offset; if (max_off < p->offset) max_off = p->offset; } bits = bits | C_TORELEASE; atomic_set(p->bit_fields, bits); pending_bytes += s->page_size; if (p->offset > -1) { log_printf(15, "FLUSHING page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); } else { log_printf(15, "RELEASE trigger for empty page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); } } } list_next(&sit, (list_key_t **)&poff, (list_data_t **)&p); } segment_unlock(ptable->seg); if (max_off>-1) { gop = cache_flush_range(ptable->seg, s->c->da, min_off, max_off + s->page_size - 1, s->c->timeout); opque_add(q, gop); } } else { //** Segment has been deleted so drop everything cause it's already freeed empty_stack(ptable->stack, 0); } cache_lock(c); release_pigeon_coop_hole(cp->free_page_tables, &(ptable->pch)); cache_unlock(c); list_next(&sit, (skiplist_key_t **)&pseg, (skiplist_data_t **)&ptable); } cache_lock(c); log_printf(15, "BEFORE waitall seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " freed_bytes=" XOT " pending_bytes=" XOT "\n", segment_id(page_seg), bytes_to_free, cp->bytes_used, freed_bytes, pending_bytes); cache_unlock(c); //** Wait for any tasks to complete opque_waitall(q); opque_free(q, OP_DESTROY); //** Had this when we came in cache_lock(c); log_printf(15, "AFTER waitall seg=" XIDT " bytes_used=" XOT "\n", segment_id(page_seg), cp->bytes_used); cp->bytes_used -= freed_bytes; //** Update how much I directly freed log_printf(15, "AFTER used update seg=" XIDT " bytes_used=" XOT "\n", segment_id(page_seg), cp->bytes_used); //** Clean up empty_skiplist(table); release_pigeon_coop_hole(cp->free_pending_tables, &pch); log_printf(15, "total_bytes marked for removal =" XOT "\n", total_bytes); return(total_bytes); }
void *lru_dirty_thread(apr_thread_t *th, void *data) { cache_t *c = (cache_t *)data; cache_lru_t *cp = (cache_lru_t *)c->fn.priv; double df; int n, i; ex_id_t *id; segment_t *seg; opque_t *q; op_generic_t *gop; cache_segment_t *s; skiplist_iter_t it; segment_t **flush_list; cache_lock(c); log_printf(15, "Dirty thread launched\n"); while (c->shutdown_request == 0) { apr_thread_cond_timedwait(cp->dirty_trigger, c->lock, cp->dirty_max_wait); df = cp->max_bytes; df = c->stats.dirty_bytes / df; log_printf(15, "Dirty thread running. dirty fraction=%lf dirty bytes=" XOT " inprogress=%d cached segments=%d\n", df, c->stats.dirty_bytes, cp->flush_in_progress, list_key_count(c->segments)); cp->flush_in_progress = 1; q = new_opque(); n = list_key_count(c->segments); type_malloc(flush_list, segment_t *, n); it = list_iter_search(c->segments, NULL, 0); list_next(&it, (list_key_t **)&id, (list_data_t **)&seg); i = 0; while (seg != NULL) { log_printf(15, "Flushing seg=" XIDT " i=%d\n", *id, i); flush_log(); flush_list[i] = seg; s = (cache_segment_t *)seg->priv; atomic_set(s->cache_check_in_progress, 1); //** Flag it as being checked gop = cache_flush_range(seg, s->c->da, 0, -1, s->c->timeout); gop_set_myid(gop, i); opque_add(q, gop); i++; list_next(&it, (list_key_t **)&id, (list_data_t **)&seg); } cache_unlock(c); //** Flag the tasks as they complete opque_start_execution(q); while ((gop = opque_waitany(q)) != NULL) { i = gop_get_myid(gop); segment_lock(flush_list[i]); s = (cache_segment_t *)flush_list[i]->priv; log_printf(15, "Flush completed seg=" XIDT " i=%d\n", segment_id(flush_list[i]), i); flush_log(); atomic_set(s->cache_check_in_progress, 0); //** Flag it as being finished segment_unlock(flush_list[i]); gop_free(gop, OP_DESTROY); } opque_free(q, OP_DESTROY); cache_lock(c); cp->flush_in_progress = 0; free(flush_list); df = cp->max_bytes; df = c->stats.dirty_bytes / df; log_printf(15, "Dirty thread sleeping. dirty fraction=%lf dirty bytes=" XOT " inprogress=%d\n", df, c->stats.dirty_bytes, cp->flush_in_progress); // apr_thread_cond_timedwait(cp->dirty_trigger, c->lock, cp->dirty_max_wait); } log_printf(15, "Dirty thread Exiting\n"); cache_unlock(c); return(NULL); }
void lru_pages_destroy(cache_t *c, cache_page_t **page, int n_pages, int remove_from_segment) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; page_lru_t *lp; cache_page_t *p; // cache_cond_t *cache_cond; int i; int cr, cw, cf, count; cache_lock(c); log_printf(15, " START cp->bytes_used=" XOT "\n", cp->bytes_used); for (i=0; i<n_pages; i++) { p = page[i]; s = (cache_segment_t *)p->seg->priv; cr = atomic_get(p->access_pending[CACHE_READ]); cw = atomic_get(p->access_pending[CACHE_WRITE]); cf = atomic_get(p->access_pending[CACHE_FLUSH]); count = cr +cw + cf; // cache_cond = (cache_cond_t *)pigeon_coop_hole_data(&(p->cond_pch)); // if (cache_cond == NULL) { //** No one listening so free normally if (count == 0) { //** No one is listening log_printf(15, "lru_pages_destroy i=%d p->offset=" XOT " seg=" XIDT " remove_from_segment=%d limbo=%d\n", i, p->offset, segment_id(p->seg), remove_from_segment, cp->limbo_pages); cp->bytes_used -= s->page_size; lp = (page_lru_t *)p->priv; if (lp->ele != NULL) { move_to_ptr(cp->stack, lp->ele); delete_current(cp->stack, 0, 0); } if (remove_from_segment == 1) { s = (cache_segment_t *)p->seg->priv; list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var } if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } else { //** Someone is listening so trigger them and also clear the bits so it will be released atomic_set(p->bit_fields, C_TORELEASE); log_printf(15, "lru_pages_destroy i=%d p->offset=" XOT " seg=" XIDT " remove_from_segment=%d cr=%d cw=%d cf=%d limbo=%d\n", i, p->offset, segment_id(p->seg), remove_from_segment, cr, cw, cf, cp->limbo_pages); } } log_printf(15, " AFTER LOOP cp->bytes_used=" XOT "\n", cp->bytes_used); log_printf(15, " END cp->bytes_used=" XOT "\n", cp->bytes_used); cache_unlock(c); }
int _lru_free_mem(cache_t *c, segment_t *pseg, ex_off_t bytes_to_free) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; cache_page_t *p; page_lru_t *lp; Stack_ele_t *ele; apr_thread_mutex_t *plock; ex_off_t total_bytes, pending_bytes; int gotlock, count, bits, err; total_bytes = 0; err = 0; log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(pseg), bytes_to_free, cp->bytes_used, stack_size(cp->stack)); move_to_bottom(cp->stack); ele = get_ptr(cp->stack); while ((total_bytes < bytes_to_free) && (ele != NULL) && (err == 0)) { p = (cache_page_t *)get_stack_ele_data(ele); lp = (page_lru_t *)p->priv; plock = p->seg->lock; gotlock = apr_thread_mutex_trylock(plock); if ((gotlock == APR_SUCCESS) || (p->seg == pseg)) { bits = atomic_get(p->bit_fields); if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal count = atomic_get(p->access_pending[CACHE_READ]) + atomic_get(p->access_pending[CACHE_WRITE]) + atomic_get(p->access_pending[CACHE_FLUSH]); if (count == 0) { //** No one is using it s = (cache_segment_t *)p->seg->priv; if ((bits & C_ISDIRTY) == 0) { //** Don't have to flush it total_bytes += s->page_size; log_printf(15, "lru_free_mem: freeing page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var delete_current(cp->stack, 1, 0); if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } else { //** Got to flush the page first err = 1; } } else { err = 1; } } if (gotlock == APR_SUCCESS) apr_thread_mutex_unlock(plock); } else { err = 1; } if ((total_bytes < bytes_to_free) && (err == 0)) ele = get_ptr(cp->stack); } cp->bytes_used -= total_bytes; pending_bytes = bytes_to_free - total_bytes; log_printf(15, "END seg=" XIDT " bytes_to_free=" XOT " pending_bytes=" XOT " bytes_used=" XOT "\n", segment_id(pseg), bytes_to_free, pending_bytes, cp->bytes_used); return(pending_bytes); }
int lru_pages_release(cache_t *c, cache_page_t **page, int n_pages) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; page_lru_t *lp; cache_page_t *p; int bits, i; cache_lock(c); for (i=0; i<n_pages; i++) { p = page[i]; bits = atomic_get(p->bit_fields); log_printf(15, "seg=" XIDT " p->offset=" XOT " bits=%d bytes_used=" XOT "\n", segment_id(p->seg), p->offset, bits, cp->bytes_used); if ((bits & C_TORELEASE) > 0) { log_printf(15, "DESTROYING seg=" XIDT " p->offset=" XOT " bits=%d bytes_used=" XOT "cache_pages=%d\n", segment_id(p->seg), p->offset, bits, cp->bytes_used, stack_size(cp->stack)); s = (cache_segment_t *)p->seg->priv; lp = (page_lru_t *)p->priv; cp->bytes_used -= s->page_size; if (lp->ele != NULL) { move_to_ptr(cp->stack, lp->ele); delete_current(cp->stack, 0, 0); } else { cp->limbo_pages--; log_printf(15, "seg=" XIDT " limbo page p->offset=" XOT " limbo=%d\n", segment_id(p->seg), p->offset, cp->limbo_pages); } if (p->offset > -1) { list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var } if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } } //** Now check if we can handle some waiters _lru_process_waiters(c); cache_unlock(c); return(0); }
cache_page_t *_lru_new_page(cache_t *c, segment_t *seg) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s = (cache_segment_t *)seg->priv; page_lru_t *lp; cache_page_t *p; type_malloc_clear(lp, page_lru_t, 1); p = &(lp->page); p->curr_data = &(p->data[0]); p->current_index = 0; type_malloc_clear(p->curr_data->ptr, char, s->page_size); cp->bytes_used += s->page_size; p->priv = (void *)lp; p->seg = seg; // p->offset = -1; p->offset = atomic_dec(lru_dummy); atomic_set(p->bit_fields, C_EMPTY); //** This way it's not accidentally deleted //** Store my position push(cp->stack, p); lp->ele = get_ptr(cp->stack); log_printf(15, " seg=" XIDT " page created initial->offset=" XOT " page_size=" XOT " data[0]=%p bytes_used=" XOT " stack_size=%d\n", segment_id(seg), p->offset, s->page_size, p->curr_data->ptr, cp->bytes_used, stack_size(cp->stack)); return(p); }
op_status_t segment_copy_func(void *arg, int id) { segment_copy_t *sc = (segment_copy_t *)arg; tbuffer_t *wbuf, *rbuf, *tmpbuf; tbuffer_t tbuf1, tbuf2; int err; ex_off_t bufsize; ex_off_t rpos, wpos, rlen, wlen, tlen, nbytes, dend; ex_iovec_t rex, wex; opque_t *q; op_generic_t *rgop, *wgop; op_status_t status; //** Set up the buffers bufsize = sc->bufsize / 2; //** The buffer is split for R/W tbuffer_single(&tbuf1, bufsize, sc->buffer); tbuffer_single(&tbuf2, bufsize, &(sc->buffer[bufsize])); rbuf = &tbuf1; wbuf = &tbuf2; //** Check the length nbytes = segment_size(sc->src) - sc->src_offset; if (nbytes < 0) { rlen = bufsize; } else { rlen = (nbytes > bufsize) ? bufsize : nbytes; } if ((sc->len != -1) && (sc->len < nbytes)) nbytes = sc->len; //** Go ahead and reserve the space in the destintaion dend = sc->dest_offset + nbytes; log_printf(1, "reserving space=" XOT "\n", dend); gop_sync_exec(segment_truncate(sc->dest, sc->da, -dend, sc->timeout)); //** Read the initial block rpos = sc->src_offset; wpos = sc->dest_offset; // rlen = (nbytes > bufsize) ? bufsize : nbytes; wlen = 0; ex_iovec_single(&rex, rpos, rlen); rpos += rlen; nbytes -= rlen; rgop = segment_read(sc->src, sc->da, sc->rw_hints, 1, &rex, rbuf, 0, sc->timeout); err = gop_waitall(rgop); if (err != OP_STATE_SUCCESS) { log_printf(1, "Intial read failed! src=%" PRIu64 " rpos=" XOT " len=" XOT "\n", segment_id(sc->src), rpos, rlen); gop_free(rgop, OP_DESTROY); return(op_failure_status); } gop_free(rgop, OP_DESTROY); q = new_opque(); do { //** Swap the buffers tmpbuf = rbuf; rbuf = wbuf; wbuf = tmpbuf; tlen = rlen; rlen = wlen; wlen = tlen; log_printf(1, "sseg=" XIDT " dseg=" XIDT " wpos=%" PRId64 " rlen=%" PRId64 " wlen=%" PRId64 "\n", segment_id(sc->src), segment_id(sc->dest), wpos, rlen, wlen); //** Start the write ex_iovec_single(&wex, wpos, wlen); wpos += wlen; wgop = segment_write(sc->dest, sc->da, sc->rw_hints, 1, &wex, wbuf, 0, sc->timeout); opque_add(q, wgop); //** Read in the next block // rlen = (nbytes > bufsize) ? bufsize : nbytes; if (nbytes < 0) { rlen = bufsize; } else { rlen = (nbytes > bufsize) ? bufsize : nbytes; } if (rlen > 0) { ex_iovec_single(&rex, rpos, rlen); rpos += rlen; nbytes -= rlen; rgop = segment_read(sc->src, sc->da, sc->rw_hints, 1, &rex, rbuf, 0, sc->timeout); opque_add(q, rgop); } err = opque_waitall(q); if (err != OP_STATE_SUCCESS) { log_printf(1, "ERROR read/write failed! src=" XIDT " rpos=" XOT " len=" XOT "\n", segment_id(sc->src), rpos, rlen); opque_free(q, OP_DESTROY); return(op_failure_status); } } while (rlen > 0); opque_free(q, OP_DESTROY); if (sc->truncate == 1) { //** Truncate if wanted gop_sync_exec(segment_truncate(sc->dest, sc->da, wpos, sc->timeout)); } status = op_success_status; status.error_code = rpos; return(status); }
op_status_t segment_put_func(void *arg, int id) { segment_copy_t *sc = (segment_copy_t *)arg; tbuffer_t *wbuf, *rbuf, *tmpbuf; tbuffer_t tbuf1, tbuf2; char *rb, *wb, *tb; ex_off_t bufsize; int err; ex_off_t rpos, wpos, rlen, wlen, tlen, nbytes, got, dend; ex_iovec_t wex; op_generic_t *gop; op_status_t status; apr_time_t loop_start, file_start; double dt_loop, dt_file; //** Set up the buffers bufsize = sc->bufsize / 2; //** The buffer is split for R/W rb = sc->buffer; wb = &(sc->buffer[bufsize]); tbuffer_single(&tbuf1, bufsize, rb); tbuffer_single(&tbuf2, bufsize, wb); rbuf = &tbuf1; wbuf = &tbuf2; nbytes = sc->len; status = op_success_status; //** Go ahead and reserve the space in the destintaion dend = sc->dest_offset + nbytes; gop_sync_exec(segment_truncate(sc->dest, sc->da, -dend, sc->timeout)); //** Read the initial block rpos = 0; wpos = sc->dest_offset; if (nbytes < 0) { rlen = bufsize; } else { rlen = (nbytes > bufsize) ? bufsize : nbytes; } wlen = 0; rpos += rlen; nbytes -= rlen; log_printf(0, "FILE fd=%p bufsize=" XOT " rlen=" XOT " nbytes=" XOT "\n", sc->fd, bufsize, rlen, nbytes); loop_start = apr_time_now(); got = fread(rb, 1, rlen, sc->fd); dt_file = apr_time_now() - loop_start; dt_file /= (double)APR_USEC_PER_SEC; if (got == 0) { if (feof(sc->fd) == 0) { log_printf(1, "ERROR from fread=%d dest sid=" XIDT " rlen=" XOT " got=" XOT "\n", errno, segment_id(sc->dest), rlen, got); status = op_failure_status; } goto finished; } rlen = got; do { //** Swap the buffers tb = rb; rb = wb; wb = tb; tmpbuf = rbuf; rbuf = wbuf; wbuf = tmpbuf; tlen = rlen; rlen = wlen; wlen = tlen; log_printf(1, "dseg=" XIDT " wpos=" XOT " rlen=" XOT " wlen=" XOT "\n", segment_id(sc->dest), wpos, rlen, wlen); //** Start the write ex_iovec_single(&wex, wpos, wlen); wpos += wlen; loop_start = apr_time_now(); gop = segment_write(sc->dest, sc->da, sc->rw_hints, 1, &wex, wbuf, 0, sc->timeout); gop_start_execution(gop); //** Start doing the transfer //** Read in the next block if (nbytes < 0) { rlen = bufsize; } else { rlen = (nbytes > bufsize) ? bufsize : nbytes; } if (rlen > 0) { file_start = apr_time_now(); got = fread(rb, 1, rlen, sc->fd); dt_file = apr_time_now() - file_start; dt_file /= (double)APR_USEC_PER_SEC; if (got == 0) { if (feof(sc->fd) == 0) { log_printf(1, "ERROR from fread=%d dest sid=" XIDT " got=" XOT " rlen=" XOT "\n", errno, segment_id(sc->dest), got, rlen); status = op_failure_status; gop_waitall(gop); gop_free(gop, OP_DESTROY); goto finished; } } rlen = got; rpos += rlen; nbytes -= rlen; } //** Wait for it to complete err = gop_waitall(gop); dt_loop = apr_time_now() - loop_start; dt_loop /= (double)APR_USEC_PER_SEC; log_printf(1, "dt_loop=%lf dt_file=%lf\n", dt_loop, dt_file); if (err != OP_STATE_SUCCESS) { log_printf(1, "ERROR write(dseg=" XIDT ") failed! wpos=" XOT " len=" XOT "\n", segment_id(sc->dest), wpos, wlen); status = op_failure_status; gop_free(gop, OP_DESTROY); goto finished; } gop_free(gop, OP_DESTROY); } while (rlen > 0); if (sc->truncate == 1) { //** Truncate if wanted gop_sync_exec(segment_truncate(sc->dest, sc->da, wpos, sc->timeout)); } finished: // status.error_code = rpos; return(status); }
op_status_t segment_get_func(void *arg, int id) { segment_copy_t *sc = (segment_copy_t *)arg; tbuffer_t *wbuf, *rbuf, *tmpbuf; tbuffer_t tbuf1, tbuf2; char *rb, *wb, *tb; ex_off_t bufsize; int err; ex_off_t rpos, wpos, rlen, wlen, tlen, nbytes, got, total; ex_iovec_t rex; apr_time_t loop_start, file_start; double dt_loop, dt_file; op_generic_t *gop; op_status_t status; //** Set up the buffers bufsize = sc->bufsize / 2; //** The buffer is split for R/W rb = sc->buffer; wb = &(sc->buffer[bufsize]); tbuffer_single(&tbuf1, bufsize, rb); tbuffer_single(&tbuf2, bufsize, wb); rbuf = &tbuf1; wbuf = &tbuf2; status = op_success_status; //** Read the initial block rpos = sc->src_offset; wpos = 0; nbytes = segment_size(sc->src) - sc->src_offset; if (nbytes < 0) { rlen = bufsize; } else { rlen = (nbytes > bufsize) ? bufsize : nbytes; } log_printf(5, "FILE fd=%p\n", sc->fd); ex_iovec_single(&rex, rpos, rlen); wlen = 0; rpos += rlen; nbytes -= rlen; loop_start = apr_time_now(); gop = segment_read(sc->src, sc->da, sc->rw_hints, 1, &rex, rbuf, 0, sc->timeout); err = gop_waitall(gop); if (err != OP_STATE_SUCCESS) { log_printf(1, "Intial read failed! src=" XIDT " rpos=" XOT " len=" XOT "\n", segment_id(sc->src), rpos, rlen); gop_free(gop, OP_DESTROY); return(op_failure_status); } gop_free(gop, OP_DESTROY); total = 0; do { //** Swap the buffers tb = rb; rb = wb; wb = tb; tmpbuf = rbuf; rbuf = wbuf; wbuf = tmpbuf; tlen = rlen; rlen = wlen; wlen = tlen; log_printf(1, "sseg=" XIDT " rpos=" XOT " wpos=" XOT " rlen=" XOT " wlen=" XOT " nbytes=" XOT "\n", segment_id(sc->src), rpos, wpos, rlen, wlen, nbytes); //** Read in the next block if (nbytes < 0) { rlen = bufsize; } else { rlen = (nbytes > bufsize) ? bufsize : nbytes; } if (rlen > 0) { ex_iovec_single(&rex, rpos, rlen); loop_start = apr_time_now(); gop = segment_read(sc->src, sc->da, sc->rw_hints, 1, &rex, rbuf, 0, sc->timeout); gop_start_execution(gop); //** Start doing the transfer rpos += rlen; nbytes -= rlen; } //** Start the write file_start = apr_time_now(); got = fwrite(wb, 1, wlen, sc->fd); dt_file = apr_time_now() - file_start; dt_file /= (double)APR_USEC_PER_SEC; total += got; log_printf(5, "sid=" XIDT " fwrite(wb,1," XOT ", sc->fd)=" XOT " total=" XOT "\n", segment_id(sc->src), wlen, got, total); if (wlen != got) { log_printf(1, "ERROR from fread=%d dest sid=" XIDT "\n", errno, segment_id(sc->dest)); status = op_failure_status; gop_waitall(gop); gop_free(gop, OP_DESTROY); goto fail; } wpos += wlen; //** Wait for the read to complete if (rlen > 0) { err = gop_waitall(gop); gop_free(gop, OP_DESTROY); if (err != OP_STATE_SUCCESS) { log_printf(1, "ERROR write(dseg=" XIDT ") failed! wpos=" XOT " len=" XOT "\n", segment_id(sc->dest), wpos, wlen); status = op_failure_status; goto fail; } } dt_loop = apr_time_now() - loop_start; dt_loop /= (double)APR_USEC_PER_SEC; log_printf(1, "dt_loop=%lf dt_file=%lf\n", dt_loop, dt_file); } while (rlen > 0); fail: return(status); }