/** * Create a copy of a restart marker. * @ingroup globus_ftp_client_restart_marker * * This function copies the contents of marker to new_marker. * * @param new_marker * A pointer to a new restart marker. * @param marker * The marker to copy. * * @see globus_ftp_client_restart_marker_init(), * globus_ftp_client_restart_marker_destroy() */ globus_result_t globus_ftp_client_restart_marker_copy( globus_ftp_client_restart_marker_t * new_marker, globus_ftp_client_restart_marker_t * marker) { globus_fifo_t * tmp; GlobusFuncName(globus_ftp_client_restart_marker_copy); if(new_marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("new_marker")); } if(marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker")); } globus_ftp_client_restart_marker_init(new_marker); new_marker->type = marker->type; switch(new_marker->type) { case GLOBUS_FTP_CLIENT_RESTART_NONE: break; case GLOBUS_FTP_CLIENT_RESTART_STREAM: new_marker->stream.offset = marker->stream.offset; break; case GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK: globus_fifo_init(&new_marker->extended_block.ranges); if(globus_fifo_empty(&marker->extended_block.ranges)) { break; } tmp = globus_fifo_copy(&marker->extended_block.ranges); while(!globus_fifo_empty(tmp)) { globus_i_ftp_client_range_t * range; range = (globus_i_ftp_client_range_t *) globus_fifo_dequeue(tmp); globus_ftp_client_restart_marker_insert_range(new_marker, range->offset, range->end_offset); } globus_fifo_destroy(tmp); globus_free(tmp); break; } return GLOBUS_SUCCESS; }
/** * Get total bytes accounted for in restart marker * @ingroup globus_ftp_client_restart_marker * * This funtion will return the sum of all bytes accounted for in * a restart marker. If this restart marker contains a stream offset * then this value is the same as the offset (not the ascii offset) * that it was set with. If it is a range list, it a sum of all the * bytes in the ranges. * * @param marker * A previously initialized or copied restart marker * * @param total_bytes * pointer to storage for total bytes in marker * * @return * - Error on NULL marker or total bytes * - <possible return> */ globus_result_t globus_ftp_client_restart_marker_get_total( globus_ftp_client_restart_marker_t * marker, globus_off_t * total_bytes) { GlobusFuncName(globus_ftp_client_restart_marker_get_total); if(marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker")); } if(total_bytes == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("total_bytes")); } *total_bytes = 0; if(marker->type == GLOBUS_FTP_CLIENT_RESTART_STREAM) { *total_bytes = marker->stream.offset; } else if(marker->type == GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK && !globus_fifo_empty(&marker->extended_block.ranges)) { globus_fifo_t * tmp; globus_off_t total; globus_i_ftp_client_range_t * range; tmp = globus_fifo_copy(&marker->extended_block.ranges); total = 0; while((!globus_fifo_empty(tmp))) { range = (globus_i_ftp_client_range_t *) globus_fifo_dequeue(tmp); total += range->end_offset - range->offset; } *total_bytes = total; globus_fifo_destroy(tmp); globus_libc_free(tmp); } return GLOBUS_SUCCESS; }
/** * Destroy a restart marker. * @ingroup globus_ftp_client_restart_marker * * @param marker * Restart marker. This marker must be initialized by either * calling globus_ftp_client_restart_marker_init() or * globus_ftp_client_restart_marker_copy() * * @see globus_ftp_client_restart_marker_t, * globus_ftp_client_restart_marker_init(), * globus_ftp_client_restart_marker_copy() */ globus_result_t globus_ftp_client_restart_marker_destroy( globus_ftp_client_restart_marker_t * marker) { GlobusFuncName(globus_ftp_client_restart_marker_destroy); if(marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker")); } switch(marker->type) { case GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK: while(!globus_fifo_empty(&marker->extended_block.ranges)) { globus_i_ftp_client_range_t * range; range = (globus_i_ftp_client_range_t *) globus_fifo_dequeue(&marker->extended_block.ranges); globus_libc_free(range); } globus_fifo_destroy(&marker->extended_block.ranges); /* FALLSTHROUGH */ case GLOBUS_FTP_CLIENT_RESTART_NONE: case GLOBUS_FTP_CLIENT_RESTART_STREAM: memset(marker, '\0', sizeof(globus_ftp_client_restart_marker_t)); marker->type = GLOBUS_FTP_CLIENT_RESTART_NONE; break; } return GLOBUS_SUCCESS; }
static void gfs_l_xio_close_write_handles( gfs_i_xio_cp_handle_t * cp_h) { globus_xio_handle_t xio_h; while(!globus_fifo_empty(cp_h->write_all_q)) { xio_h = (globus_xio_handle_t) globus_fifo_dequeue(cp_h->write_all_q); result = globus_xio_register_close( xio_h, NULL, gfs_l_xio_cp_write_close_cb, cp_h); if(result != GLOBUS_SUCCESS) { cp_h->write_handle_count--; } } if(cp_h->write_handle_count == 0) { globus_callback_register_oneshot( NULL, NULL, gfs_l_xio_cp_close_os, cp_h); } }
static void gfork_l_client_writev_cb( globus_xio_handle_t xio_handle, globus_result_t result, globus_xio_iovec_t * iovec, int count, globus_size_t nbytes, globus_xio_data_descriptor_t data_desc, void * user_arg) { gfork_i_msg_t * msg; gfork_i_lib_handle_t * handle; msg = (gfork_i_msg_t *) user_arg; handle = msg->lib_handle; /* lazy reuse of XIO callback. perhaps we should define our own */ if(msg->client_cb) { msg->client_cb(NULL, result, &msg->iov[1], count - 1, nbytes, data_desc, msg->user_arg); } globus_free(msg->iov); globus_free(msg); globus_mutex_lock(&handle->mutex); { handle->writing = GLOBUS_FALSE; if(result != GLOBUS_SUCCESS) { goto error; } if(!globus_fifo_empty(&handle->write_q)) { msg = (gfork_i_msg_t *) globus_fifo_dequeue(&handle->write_q); result = globus_xio_register_writev( handle->write_xio, msg->iov, msg->iovc, msg->nbytes, NULL, gfork_l_client_writev_cb, msg); if(result != GLOBUS_SUCCESS) { goto error; } handle->writing = GLOBUS_TRUE; } } globus_mutex_unlock(&handle->mutex); return; error: assert(0); globus_mutex_unlock(&handle->mutex); }
void * consumer( void * nitems_arg) { int i; long nitems; prod_data_t * data; long thread_id; nitems = (long) nitems_arg; thread_id_assign(); thread_id = thread_id_get(); wait_for_all(); for (i = 0; i < nitems ; i++) { globus_mutex_lock(&queue_mutex); { while(globus_fifo_empty(&queue)) { # if (DEBUG_LEVEL > 1) { globus_stdio_lock(); { printf("%04ld: consumer() - waiting for data item %d\n", thread_id, i); } globus_stdio_unlock(); } # endif globus_cond_wait(&queue_cond, &queue_mutex); } data = globus_fifo_dequeue(&queue); } globus_mutex_unlock(&queue_mutex); globus_mutex_lock(&data->mutex); { data->done = GLOBUS_TRUE; globus_cond_signal(&data->cond); } globus_mutex_unlock(&data->mutex); } wait_for_all(); return NULL; }
/** * Callback for writing event to scheduler. * * @param handle * @param result * @param iovec * @param count * @param nbytes * @param data_desc * @param user_arg */ static void globus_l_seg_writev_callback( globus_xio_handle_t handle, globus_result_t result, globus_xio_iovec_t * iovec, int count, globus_size_t nbytes, globus_xio_data_descriptor_t data_desc, void * user_arg) { int i; globus_bool_t trigger_fault = GLOBUS_FALSE; globus_bool_t reregister_write = GLOBUS_FALSE; int do_shutdown = 0; for (i = 0; i < count; i++) { globus_libc_free(iovec[i].iov_base); } globus_libc_free(iovec); globus_mutex_lock(&globus_l_seg_mutex); globus_l_seg_write_registered = GLOBUS_FALSE; if (result != GLOBUS_SUCCESS) { trigger_fault = GLOBUS_TRUE; } else if (!globus_fifo_empty(&globus_l_seg_buffers)) { reregister_write = GLOBUS_TRUE; } else if (globus_l_seg_shutdown) { do_shutdown = 1; } if (trigger_fault) { globus_scheduler_event_generator_fault(result); } if (reregister_write) { globus_l_seg_register_write(NULL); } if (do_shutdown) { globus_l_seg_shutdown = 2; globus_cond_signal(&globus_l_seg_cond); } globus_mutex_unlock(&globus_l_seg_mutex); }
/* * Function: globus_i_gass_transfer_send_disaptcher() * * Description: if the head of the pending fifo should be * sent over, send it. * * Parameters: * * Returns: */ void globus_i_gass_transfer_send_dispatcher( globus_gass_transfer_request_t request) { globus_gass_transfer_pending_t * head; globus_gass_transfer_request_struct_t * req; req = globus_handle_table_lookup(&globus_i_gass_transfer_request_handles, request); if(req == GLOBUS_NULL) { return; } /* If we are not in the PENDING state, we should not look at the queue */ if(req->status != GLOBUS_GASS_TRANSFER_REQUEST_PENDING) { return; } /* If the fifo is empty, there is nothing to do */ if(globus_fifo_empty(&req->pending_data)) { return; } head = globus_fifo_peek(&req->pending_data); if(head->pending == GLOBUS_TRUE) { /* * If the first in the fifo has already been sent to * the protocol module, there is nothing to do */ return; } else { head->pending = GLOBUS_TRUE; req->status = GLOBUS_GASS_TRANSFER_REQUEST_ACTING; globus_i_gass_transfer_unlock(); req->proto->send_buffer(req->proto, request, head->bytes, head->length, head->last_data); globus_i_gass_transfer_lock(); } }
globus_result_t gfs_i_xio_cp_start( gfs_i_xio_cp_handle_t ** cp_h_out, globus_fifo_t * read_handle_fifo, globus_fifo_t * write_handle_fifo, globus_callback_func_t complete_cb, globus_callback_func_t update_cb, void * user_arg) { globus_fifo_t * read_q; gfs_i_xio_cp_handle_t * cp_h; cp_h = (gfs_i_xio_cp_handle_t *) globus_calloc(1, sizeof(gfs_i_xio_cp_handle_t)); cp_h->read_all_q = globus_fifo_copy(net_handle_fifo); cp_h->write_all_q = globus_fifo_copy(net_handle_fifo); cp_h->write_q = globus_fifo_copy(net_handle_fifo); globus_fifo_init(&cp_h->read_buffer_q); cp_h->block_size = block_size; cp_h->cb = complete_cb; cp_h->user_arg = user_arg; globus_mutex_init(&cp_h->mutex, NULL); cp_h->state = GFS_XIO_CP_STATE_OPEN; read_q = globus_fifo_copy(net_handle_fifo); cp_h->read_handle_count = globus_fifo_size(cp_h->read_all_q); cp_h->write_handle_count = globus_fifo_size(cp_h->write_all_q); *cp_h_out = cp_h; globus_mutex_lock(&cp_h->mutex); { while(!globus_fifo_empty(read_q)) { xio_h = (globus_xio_handle_t) globus_fifo_dequeue(cp_h->read_q); read_buf = (gfs_l_xio_read_buffer_t *) globus_calloc(sizeof(gfs_l_xio_read_buffer_t)+block_size, 1); read_buf->block_size = block_size; read_buf->whos_my_daddy = cp_h; gfs_l_xio_cp_post_read(xio_h, read_buf); } } globus_mutex_unlock(&cp_h->mutex); globus_fifo_destroy(read_q); return GLOBUS_SUCCESS; }
globus_result_t globus_ftp_client_restart_marker_get_first_block( globus_ftp_client_restart_marker_t * marker, globus_off_t * start_offset, globus_off_t * end_offset) { GlobusFuncName(globus_ftp_client_restart_marker_get_first_block); if(marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker")); } if(start_offset == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("start_offset")); } if(end_offset == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("end_offset")); } *start_offset = 0; *end_offset = 0; if(marker->type == GLOBUS_FTP_CLIENT_RESTART_STREAM) { *end_offset = marker->stream.offset; } else if(marker->type == GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK && !globus_fifo_empty(&marker->extended_block.ranges)) { globus_i_ftp_client_range_t * range; range = (globus_i_ftp_client_range_t *) globus_fifo_peek(&marker->extended_block.ranges); *start_offset = range->offset; *end_offset = range->end_offset; } return GLOBUS_SUCCESS; }
/* * called locked */ static void gfs_l_xio_cp_error( gfs_i_xio_cp_handle_t * cp_h, globus_result_t result) { globus_xio_handle_t xio_h; /* call this for shutdown in error cases, but some error cases will be a result of closing. */ if(cp_h->state != GFS_CIO_CP_STATE_OPEN) { return; } cp_h->state = GFS_CIO_CP_STATE_ERROR; cp_h->err_obj = globus_error_get(result); while(!globus_fifo_empty(cp_h->read_all_q)) { xio_h = (globus_xio_handle_t) globus_fifo_dequeue(cp_h->read_all_q); result = globus_xio_register_close( xio_h, NULL, gfs_l_xio_cp_read_close_cb, cp_h); if(result != GLOBUS_SUCCESS) { cp_h->read_handle_count--; } } if(cp_h->read_handle_count <= 0) { gfs_l_xio_close_write_handles(cp_h); } }
static void gfs_l_xio_cp_read_cb( globus_xio_handle_t handle, globus_result_t result, globus_byte_t * buffer, globus_size_t len, globus_size_t nbytes, globus_xio_data_descriptor_t data_desc, void * user_arg) { gfs_l_xio_read_buffer_t * read_buf; gfs_i_xio_cp_handle_t * cp_h; read_buf = (gfs_l_xio_read_buffer_t *) user_arg; cp_h = read_buf->whos_my_daddy; globus_mutex_lock(&cp_h->mutex); { read_buf->nbytes = nbytes; if(result != GLOBUS_SUCCESS) { if(eof) { read_buf->eof = GLOBUS_TRUE; } else { /* what if this is just EOF */ goto error; } } /* it is possible to get here in the CLOSING state without an error */ if(cp_h->state == GFS_CIO_CP_STATE_ERROR) { goto error; } /* XXX need to get an offset for this buffer */ read_buf->nbytes = nbytes; result = globus_xio_data_descriptor_cntl( data_desc, NULL, GLOBUS_XIO_DD_GET_OFFSET, &offset); if(result != GLOBUS_SUCCESS) { goto error; } read_buf->offset = offset; if(!globus_fifo_empty(cp_h->write_q)) { read_buf->write_xio = (globus_xio_handle_t) globus_fifo_dequeue(cp_h->write_q); result = globus_xio_handle_cntl( read_buf->write_xio, GLOBUS_XIO_QUERY, GLOBUS_XIO_SEEK, read_buf->offset); if(result != GLOBUS_SUCCESS) { goto error; } result = globus_xio_register_write( read_buf->write_xio, read_buf->buffer, read_buf->nbytes, read_buf->nbytes, NULL, gfs_l_xio_cp_write_cb, read_buf); if(result != GLOBUS_SUCCESS) { goto error; } } else { /* stick this one in the queue */ globus_fifo_enqueue(&cp_h->read_buffer_q, read_buf); } if(!eof) { /* make and post a new one */ read_buf = (gfs_l_xio_read_buffer_t *) globus_calloc(sizeof(gfs_l_xio_read_buffer_t)+block_size, 1); read_buf->block_size = cp_h->block_size; read_buf->whos_my_daddy = cp_h; /* do this last since it can inspire the CLOSING state */ gfs_l_xio_cp_post_read(xio_h, read_buf); } else { /* remove it from close q and close */ globus_fifo_remove(cp_h->read_all_q, read_buf->read_xio); result = globus_xio_register_close( read_buf->read_xio, NULL, gfs_l_xio_cp_close_cb, cp_h); if(result != GLOBUS_SUCCESS) { cp_h->read_handle_count--; } if(cp_h->read_handle_count <= 0) { gfs_l_xio_close_write_handles(cp_h); } } } globus_mutex_unlock(&cp_h->mutex); return; error: globus_free(read_buf); gfs_l_xio_cp_error(cp_h, result); globus_mutex_unlock(&cp_h->mutex); }
/** * Request referred. * @ingroup globus_gass_transfer_protocol * * This function notifies the GASS Transfer Library that new request * generated by a client calling one of the functions in the * "@ref globus_gass_transfer_client" section of the manual has been * referred to another URL by the server, and so processing has stopped. * * @param request * The request handle used for this request. This was created by * the user calling one of the functions in the "@ref * globus_gass_transfer_client" section of this manual. * @param url * An array of url strings containing alternate locations for this * file. The GASS transfer library is responsible for freeing this * array. It must be allocated using one of the * memory allocators defined in the Globus Common Library. * @param num_urls * The length of the @a url array. * * @see globus_gass_transfer_proto_request_ready(), * globus_gass_transfer_proto_request_denied(), * globus_gass_transfer_proto_request_referred() */ void globus_gass_transfer_proto_request_referred( globus_gass_transfer_request_t request, char ** url, globus_size_t num_urls) { globus_gass_transfer_request_struct_t * req; globus_gass_transfer_callback_t callback; void * callback_arg; globus_size_t i; globus_gass_transfer_pending_t * head; globus_i_gass_transfer_lock(); req = globus_handle_table_lookup(&globus_i_gass_transfer_request_handles, request); if(req == GLOBUS_NULL) { goto finish; } switch(req->status) { case GLOBUS_GASS_TRANSFER_REQUEST_STARTING: req->status = GLOBUS_GASS_TRANSFER_REQUEST_REFERRED; req->referral_url = url; req->referral_count = num_urls; callback = req->callback; callback_arg = req->callback_arg; globus_i_gass_transfer_unlock(); callback(callback_arg, request); globus_i_gass_transfer_lock(); /* free up GASS's reference to the request */ globus_i_gass_transfer_request_destroy(request); break; case GLOBUS_GASS_TRANSFER_REQUEST_USER_FAIL: req->status = GLOBUS_GASS_TRANSFER_REQUEST_FAILED; req->referral_url = url; req->referral_count = num_urls; callback = req->callback; callback_arg = req->callback_arg; globus_i_gass_transfer_unlock(); callback(callback_arg, request); globus_i_gass_transfer_lock(); /* free up GASS's reference to the request */ globus_i_gass_transfer_request_destroy(request); break; case GLOBUS_GASS_TRANSFER_REQUEST_ACTING: /* request is in progress, when operation completes, * the callback queue will be drained */ req->status = GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_REFERRING; req->referral_url = url; req->referral_count = num_urls; break; case GLOBUS_GASS_TRANSFER_REQUEST_PENDING: req->status = GLOBUS_GASS_TRANSFER_REQUEST_REFERRING; while(!globus_fifo_empty(&req->pending_data)) { head = globus_fifo_dequeue(&req->pending_data); /* Call back to user */ globus_i_gass_transfer_unlock(); head->callback(head->callback_arg, request, head->bytes, 0, GLOBUS_TRUE); globus_i_gass_transfer_lock(); globus_free(head); req->status = GLOBUS_GASS_TRANSFER_REQUEST_REFERRED; } /* free up references to request and proto */ req->proto->destroy(req->proto, request); /* free up the GASS's reference to this request */ globus_i_gass_transfer_request_destroy(request); break; case GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_PENDING: /* user callback in progress */ req->status = GLOBUS_GASS_TRANSFER_REQUEST_REFERRING; req->referral_url = url; req->referral_count = num_urls; /* callbacks are going to occur after the current * one completes (in the operation_complete function * above) */ break; case GLOBUS_GASS_TRANSFER_REQUEST_FAILED: case GLOBUS_GASS_TRANSFER_REQUEST_REFERRED: case GLOBUS_GASS_TRANSFER_REQUEST_DENIED: case GLOBUS_GASS_TRANSFER_REQUEST_DONE: case GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL1: case GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL2: case GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL3: case GLOBUS_GASS_TRANSFER_REQUEST_STARTING2: case GLOBUS_GASS_TRANSFER_REQUEST_STARTING3: case GLOBUS_GASS_TRANSFER_REQUEST_ACCEPTING: case GLOBUS_GASS_TRANSFER_REQUEST_FAILING: case GLOBUS_GASS_TRANSFER_REQUEST_FINISHING: case GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_FAILING: /* free urls, no state change */ goto free_urls; case GLOBUS_GASS_TRANSFER_REQUEST_REFERRING: case GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_REFERRING: globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_REFERRING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_REFERRING); goto free_urls; case GLOBUS_GASS_TRANSFER_REQUEST_INVALID: goto finish; } finish: globus_i_gass_transfer_unlock(); return; free_urls: for(i = 0; i < num_urls; i++) { globus_free(url[i]); } globus_free(url); return; }
/** * Insert a range into a restart marker * @ingroup globus_ftp_client_restart_marker * * This function updates a restart marker with a new byte range, * suitable for using to restart an extended block mode transfer. * Adjacent ranges within the marker will be combined into a single * entry in the marker. * * The marker must first be initialized by calling * globus_ftp_client_restart_marker_init() or * globus_ftp_client_restart_marker_copy(). * * A marker can only hold a range list or a stream offset. Calling * this function after calling * globus_ftp_client_restart_marker_set_offset() will result in a marker * suitable only for use restarting an extended block mode transfer. * * @param marker * A restart marker * @param offset * The starting offset of the range. * @param end_offset * The ending offset of the range. * * @see globus_ftp_client_restart_marker_set_offset() * globus_ftp_client_operationattr_set_mode() */ globus_result_t globus_ftp_client_restart_marker_insert_range( globus_ftp_client_restart_marker_t * marker, globus_off_t offset, globus_off_t end_offset) { globus_fifo_t tmp; globus_i_ftp_client_range_t * range; globus_i_ftp_client_range_t * newrange; globus_object_t * err = GLOBUS_SUCCESS; GlobusFuncName(globus_ftp_client_insert_range); if(marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker")); } if(marker->type != GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK) { memset(marker, '\0', sizeof(globus_ftp_client_restart_extended_block_t)); marker->type = GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK; globus_fifo_init(&marker->extended_block.ranges); } globus_fifo_move(&tmp, &marker->extended_block.ranges); while(!globus_fifo_empty(&tmp)) { range = globus_fifo_dequeue(&tmp); if(offset <= range->offset) { if(end_offset+1 < range->offset) { newrange = globus_malloc(sizeof(globus_i_ftp_client_range_t)); if(newrange == NULL) { err = GLOBUS_I_FTP_CLIENT_ERROR_OUT_OF_MEMORY(); if(!err) err = GLOBUS_ERROR_NO_INFO; goto copy_rest; } newrange->offset = offset; newrange->end_offset = end_offset; globus_fifo_enqueue(&marker->extended_block.ranges, newrange); globus_fifo_enqueue(&marker->extended_block.ranges, range); goto copy_rest; } else if(end_offset+1 == range->offset) { end_offset = range->end_offset; globus_libc_free(range); } else { /* weird.... overlapping data */ if(end_offset < range->end_offset) { end_offset = range->end_offset; } globus_libc_free(range); } } else { if(range->end_offset < offset - 1) { globus_fifo_enqueue(&marker->extended_block.ranges, range); } else if(range->end_offset >= offset - 1) { offset = range->offset; if(end_offset < range->end_offset) { end_offset = range->end_offset; } globus_libc_free(range); } else { globus_fifo_enqueue(&marker->extended_block.ranges, range); } } } newrange = globus_malloc(sizeof(globus_i_ftp_client_range_t)); if(newrange == GLOBUS_NULL) { err = GLOBUS_I_FTP_CLIENT_ERROR_OUT_OF_MEMORY(); if(!err) err = GLOBUS_ERROR_NO_INFO; goto copy_rest; } newrange->offset = offset; newrange->end_offset = end_offset; globus_fifo_enqueue(&marker->extended_block.ranges, newrange); copy_rest: while(! globus_fifo_empty(&tmp)) { globus_fifo_enqueue(&marker->extended_block.ranges, globus_fifo_dequeue(&tmp)); } globus_fifo_destroy(&tmp); return err ? globus_error_put(err) : GLOBUS_SUCCESS; }
/** * Write the response to an HTTP request * @ingroup globus_i_xio_http_server * * Generates an HTTP response line from a handle, and passes it to the * transport. The globus_l_xio_http_server_write_response_callback() will * be called once the transport has sent the response. * * This call may be triggered by either the first write on a server handle, * or by calling the #GLOBUS_XIO_HTTP_HANDLE_SET_END_OF_ENTITY handle * control function. * * Called with my mutex lock. * * @param http_handle * Handle associated with this HTTP stream. * @param iovec * Array of globus_xio_iovec_t structs associated with the user's write. * @param iovec_count * Length of the @a iovec array. If this is zero, we assume that the * response is being generated by the * #GLOBUS_XIO_HTTP_HANDLE_SET_END_OF_ENTITY control. * @param op * Operation associated with the write. If this is NULL (in the case * of the GLOBUS_XIO_HTTP_HANDLE_SET_END_OF_ENTITY control), one * will be created in this function. * * This function returns GLOBUS_SUCCESS, GLOBUS_XIO_ERROR_MEMORY, or an * error result from globus_xio_driver_operation_create(), or * globus_xio_driver_pass_write(). * * @retval GLOBUS_SUCCESS * Response was passed to the transport for writing. If this was generated * by a user writing data, then the write will occur after the * globus_l_xio_http_server_write_response_callback() has been called. * @retval GLOBUS_XIO_ERROR_MEMORY * Unable to compose the response due to memory constraints. */ globus_result_t globus_i_xio_http_server_write_response( globus_i_xio_http_handle_t * http_handle, const globus_xio_iovec_t * iovec, int iovec_count, globus_xio_operation_t op) { globus_result_t result; globus_fifo_t iovecs; const char * str; char code_str[5]; globus_xio_iovec_t * iov; int rc; int i; int send_size; char * size_buffer = NULL; globus_bool_t free_op = GLOBUS_FALSE; globus_xio_http_header_t * current_header; GlobusXIOName(globus_i_xio_server_write_response); globus_assert(http_handle->send_state == GLOBUS_XIO_HTTP_STATUS_LINE); rc = globus_fifo_init(&iovecs); if (rc != GLOBUS_SUCCESS) { result = GlobusXIOErrorMemory("iovecs"); goto error_exit; } /* Compose HTTP Response: * HTTP-Version SP Status-Code SP Reason-Phrase CRLF */ if (http_handle->response_info.http_version == GLOBUS_XIO_HTTP_VERSION_1_0) { str = "HTTP/1.0 "; } else { http_handle->response_info.http_version = GLOBUS_XIO_HTTP_VERSION_1_1; str = "HTTP/1.1 "; } GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, str, 9, free_iovecs_error); sprintf(code_str, "%d ", http_handle->response_info.status_code); GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, code_str, 4, free_iovecs_error); if (http_handle->response_info.reason_phrase != NULL) { str = http_handle->response_info.reason_phrase; } else { str = globus_i_xio_http_lookup_reason( http_handle->response_info.status_code); } GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, str, strlen(str), free_iovecs_error); GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "\r\n", 2, free_iovecs_error); current_header = globus_hashtable_first( &http_handle->response_info.headers.headers); while (current_header) { GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, current_header->name, strlen(current_header->name), free_iovecs_error); GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, ": ", 2, free_iovecs_error); GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, current_header->value, strlen(current_header->value), free_iovecs_error); GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "\r\n", 2, free_iovecs_error); current_header = globus_hashtable_next( &http_handle->response_info.headers.headers); } /* * Special headers we generate. */ if (GLOBUS_I_XIO_HTTP_HEADER_IS_CONNECTION_CLOSE( &http_handle->response_info.headers) || (http_handle->request_info.http_version == GLOBUS_XIO_HTTP_VERSION_1_0) || (http_handle->response_info.headers.transfer_encoding == GLOBUS_XIO_HTTP_TRANSFER_ENCODING_IDENTITY && GLOBUS_I_XIO_HTTP_HEADER_IS_CONTENT_LENGTH_SET( &http_handle->response_info.headers))) { http_handle->response_info.headers.flags |= GLOBUS_I_XIO_HTTP_HEADER_CONNECTION_CLOSE; GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "Connection: close\r\n", 19, free_iovecs_error); } if (iovec_count > 0) { /* * We are sending a body, so we'll set the appropriate entity-related * headers */ if (http_handle->request_info.http_version == GLOBUS_XIO_HTTP_VERSION_1_0 || (http_handle->response_info.headers.transfer_encoding == GLOBUS_XIO_HTTP_TRANSFER_ENCODING_IDENTITY && GLOBUS_I_XIO_HTTP_HEADER_IS_CONTENT_LENGTH_SET( &http_handle->response_info.headers))) { http_handle->response_info.headers.transfer_encoding = GLOBUS_XIO_HTTP_TRANSFER_ENCODING_IDENTITY; /* Transfer-Encoding mustn't be sent to a HTTP/1.0 client */ if (http_handle->request_info.http_version != GLOBUS_XIO_HTTP_VERSION_1_0) { GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "Transfer-Encoding: identity\r\n", 29, free_iovecs_error); } /* * When we know the content-length beforehand we can set it here, * otherwise, we will use the connection: close header */ if (GLOBUS_I_XIO_HTTP_HEADER_IS_CONTENT_LENGTH_SET( &http_handle->response_info.headers)) { GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "Content-Length: ", 16, free_iovecs_error); size_buffer = globus_common_create_string( "%lu\r\n", (unsigned long) http_handle->response_info.headers.content_length); if (size_buffer == NULL) { result = GlobusXIOErrorMemory("iovec.iov_base"); goto free_iovecs_error; } GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, size_buffer, strlen(size_buffer), free_iovecs_error); free(size_buffer); size_buffer = NULL; } } else { http_handle->response_info.headers.transfer_encoding = GLOBUS_XIO_HTTP_TRANSFER_ENCODING_CHUNKED; GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "Transfer-Encoding: chunked\r\n", 28, free_iovecs_error); } } GLOBUS_XIO_HTTP_COPY_BLOB(&iovecs, "\r\n", 2, free_iovecs_error); http_handle->header_iovcnt = globus_fifo_size(&iovecs); http_handle->header_iovec = globus_libc_malloc( http_handle->header_iovcnt * sizeof(globus_xio_iovec_t)); if (http_handle->header_iovec == NULL) { goto free_iovecs_error; } /* Convert fifo to iovec array, counting up size for wait_for_nbytes * parameter to globus_xio_driver_pass_write. */ for (i = 0, send_size = 0; i < http_handle->header_iovcnt; i++) { iov = globus_fifo_dequeue(&iovecs); globus_assert(iov != NULL); http_handle->header_iovec[i].iov_base = iov->iov_base; http_handle->header_iovec[i].iov_len = iov->iov_len; send_size += iov->iov_len; globus_libc_free(iov); } if (op == NULL) { result = globus_xio_driver_operation_create( &op, http_handle->handle); free_op = GLOBUS_TRUE; if (result != GLOBUS_SUCCESS) { goto free_headers_exit; } } /* Stash user buffer info until we've sent response headers */ http_handle->write_operation.operation = op; http_handle->write_operation.iov = (globus_xio_iovec_t *) iovec; http_handle->write_operation.iovcnt = iovec_count; http_handle->write_operation.wait_for = 0; result = globus_xio_driver_pass_write( http_handle->write_operation.operation, http_handle->header_iovec, http_handle->header_iovcnt, send_size, globus_l_xio_http_server_write_response_callback, http_handle); if (result != GLOBUS_SUCCESS) { goto free_operation_exit; } globus_fifo_destroy(&iovecs); if (iovec_count == 0) { http_handle->send_state = GLOBUS_XIO_HTTP_EOF; } else if (http_handle->response_info.headers.transfer_encoding == GLOBUS_XIO_HTTP_TRANSFER_ENCODING_CHUNKED) { http_handle->send_state = GLOBUS_XIO_HTTP_CHUNK_BODY; } else { http_handle->send_state = GLOBUS_XIO_HTTP_IDENTITY_BODY; } return GLOBUS_SUCCESS; free_operation_exit: if (free_op) { globus_xio_driver_operation_destroy( http_handle->write_operation.operation); } free_headers_exit: http_handle->write_operation.operation = NULL; http_handle->write_operation.driver_handle = NULL; http_handle->write_operation.iov = NULL; http_handle->write_operation.iovcnt = 0; http_handle->write_operation.wait_for = 0; for (i = 0; i < http_handle->header_iovcnt; i++) { globus_libc_free(http_handle->header_iovec[i].iov_base); } globus_libc_free(http_handle->header_iovec); http_handle->header_iovec = NULL; http_handle->header_iovcnt = 0; free_iovecs_error: while (!globus_fifo_empty(&iovecs)) { iov = globus_fifo_dequeue(&iovecs); globus_libc_free(iov->iov_base); globus_libc_free(iov); } globus_fifo_destroy(&iovecs); if (size_buffer != NULL) { free(size_buffer); } error_exit: return result; }
static void globus_l_gass_transfer_drain_callbacks( void * arg) { globus_gass_transfer_request_t request; globus_gass_transfer_request_struct_t * req; globus_gass_transfer_callback_t callback; void * callback_arg; request = (globus_gass_transfer_request_t) arg; req = globus_handle_table_lookup(&globus_i_gass_transfer_request_handles, request); if(req == GLOBUS_NULL) { return; } if(globus_i_gass_transfer_deactivating) { callback = globus_i_gass_transfer_deactivate_callback; callback_arg = GLOBUS_NULL; } else { callback = req->fail_callback; callback_arg = req->fail_callback_arg; } /* drain queue of pending data requests */ while(!globus_fifo_empty(&req->pending_data)) { globus_gass_transfer_pending_t * pending; pending = globus_fifo_dequeue(&req->pending_data); if(!globus_i_gass_transfer_deactivating) { globus_i_gass_transfer_unlock(); pending->callback(pending->callback_arg, request, pending->bytes, 0, GLOBUS_TRUE); globus_i_gass_transfer_lock(); } globus_free(pending); } /* free up references to request and proto */ req->proto->destroy(req->proto, request); /* free up GASS's reference to this request */ globus_i_gass_transfer_request_destroy(request); if(callback) { callback(callback_arg, request); } }
static void globus_l_gass_transfer_operation_complete( globus_gass_transfer_request_t request, globus_byte_t * bytes, globus_size_t nbytes, globus_bool_t failed, globus_bool_t last_data, globus_gass_transfer_dispatch_func_t dispatcher) { globus_gass_transfer_request_struct_t * req; globus_gass_transfer_pending_t * head; globus_gass_transfer_callback_t fail_callback=GLOBUS_NULL; void * callback_arg; globus_i_gass_transfer_lock(); req = globus_handle_table_lookup(&globus_i_gass_transfer_request_handles, request); if(req == GLOBUS_NULL) { goto finish; } switch(req->status) { case GLOBUS_GASS_TRANSFER_REQUEST_ACTING: if(! last_data) { /* * normal operation, go back to pending state, callback * to user */ req->status = GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_PENDING; while(req->status == GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_PENDING || ( (req->status == GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_FAILING || req->status == GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_REFERRING) && !globus_fifo_empty(&req->pending_data))) { head = globus_fifo_dequeue(&req->pending_data); /* Call back to user */ globus_i_gass_transfer_unlock(); head->callback(head->callback_arg, request, head->bytes, nbytes, last_data); globus_i_gass_transfer_lock(); nbytes = 0; last_data = GLOBUS_TRUE; globus_free(head); if(req->status == GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_PENDING) { req->status = GLOBUS_GASS_TRANSFER_REQUEST_PENDING; } } if(req->status == GLOBUS_GASS_TRANSFER_REQUEST_PENDING) { /* dispatch next, if available */ dispatcher(request); break; } else if(req->status == GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_FAILING) { req->status = GLOBUS_GASS_TRANSFER_REQUEST_FAILED; fail_callback = req->fail_callback; callback_arg = req->fail_callback_arg; /* free up references to request and proto */ req->proto->destroy(req->proto, request); /* free up the GASS's reference to this request */ globus_i_gass_transfer_request_destroy(request); globus_i_gass_transfer_unlock(); if(fail_callback != GLOBUS_NULL) { fail_callback(callback_arg, request); } return; } } else { /* failed or done */ if(failed) { req->status = GLOBUS_GASS_TRANSFER_REQUEST_FAILING; last_data = GLOBUS_TRUE; } else { req->status = GLOBUS_GASS_TRANSFER_REQUEST_FINISHING; } while(!globus_fifo_empty(&req->pending_data)) { head = globus_fifo_dequeue(&req->pending_data); /* Call back to user */ globus_i_gass_transfer_unlock(); head->callback(head->callback_arg, request, head->bytes, nbytes, last_data); globus_i_gass_transfer_lock(); nbytes = 0; globus_free(head); } fail_callback = req->fail_callback; callback_arg = req->fail_callback_arg; /* free up references to request and proto */ req->proto->destroy(req->proto, request); /* free up the proto's and GASS's reference to this request */ globus_i_gass_transfer_request_destroy(request); if(globus_i_gass_transfer_deactivating) { globus_i_gass_transfer_request_destroy(request); } globus_i_gass_transfer_unlock(); if(fail_callback != GLOBUS_NULL) { fail_callback(callback_arg, request); } return; } break; case GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_FAILING: req->status = GLOBUS_GASS_TRANSFER_REQUEST_FAILING; last_data = GLOBUS_TRUE; while(!globus_fifo_empty(&req->pending_data)) { head = globus_fifo_dequeue(&req->pending_data); /* Call back to user */ globus_i_gass_transfer_unlock(); head->callback(head->callback_arg, request, head->bytes, nbytes, last_data); globus_free(head); nbytes = 0; globus_i_gass_transfer_lock(); } fail_callback = req->fail_callback; callback_arg = req->fail_callback_arg; /* free up references to request and proto */ req->proto->destroy(req->proto, request); /* free up the proto's and GASS's reference to this request */ globus_i_gass_transfer_request_destroy(request); globus_i_gass_transfer_unlock(); fail_callback(callback_arg, request); return; case GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_REFERRING: req->status = GLOBUS_GASS_TRANSFER_REQUEST_REFERRING; last_data = GLOBUS_TRUE; while(!globus_fifo_empty(&req->pending_data)) { head = globus_fifo_dequeue(&req->pending_data); /* Call back to user */ globus_i_gass_transfer_unlock(); head->callback(head->callback_arg, request, head->bytes, nbytes, last_data); globus_free(head); nbytes = 0; globus_i_gass_transfer_lock(); } /* free up references to request and proto */ req->proto->destroy(req->proto, request); /* free up the proto's and GASS's reference to this request */ globus_i_gass_transfer_request_destroy(request); globus_i_gass_transfer_unlock(); return; case GLOBUS_GASS_TRANSFER_REQUEST_PENDING: case GLOBUS_GASS_TRANSFER_REQUEST_FAILING: case GLOBUS_GASS_TRANSFER_REQUEST_FAILED: case GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL1: case GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL2: case GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL3: case GLOBUS_GASS_TRANSFER_REQUEST_USER_FAIL: case GLOBUS_GASS_TRANSFER_REQUEST_REFERRED: case GLOBUS_GASS_TRANSFER_REQUEST_DENIED: case GLOBUS_GASS_TRANSFER_REQUEST_DONE: case GLOBUS_GASS_TRANSFER_REQUEST_STARTING: case GLOBUS_GASS_TRANSFER_REQUEST_STARTING2: case GLOBUS_GASS_TRANSFER_REQUEST_STARTING3: case GLOBUS_GASS_TRANSFER_REQUEST_ACCEPTING: case GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_PENDING: case GLOBUS_GASS_TRANSFER_REQUEST_FINISHING: case GLOBUS_GASS_TRANSFER_REQUEST_REFERRING: globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_PENDING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_FAILING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_FAILED); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL1); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL2); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_SERVER_FAIL3); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_USER_FAIL); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_REFERRED); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_DENIED); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_DONE); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_STARTING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_STARTING2); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_STARTING3); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_ACCEPTING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_ACTING_TO_PENDING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_FINISHING); globus_assert(req->status != GLOBUS_GASS_TRANSFER_REQUEST_REFERRING); goto finish; case GLOBUS_GASS_TRANSFER_REQUEST_INVALID: goto finish; } finish: globus_i_gass_transfer_unlock(); return; }
static void gfs_l_xio_cp_write_cb( globus_xio_handle_t handle, globus_result_t result, globus_byte_t * buffer, globus_size_t len, globus_size_t nbytes, globus_xio_data_descriptor_t data_desc, void * user_arg) { gfs_l_xio_read_buffer_t * read_buf; gfs_i_xio_cp_handle_t * cp_h; read_buf = (gfs_l_xio_read_buffer_t *) user_arg; cp_h = read_buf->whos_my_daddy; globus_free(read_buf); globus_mutex_lock(&cp_h->mutex); { if(result != GLOBUS_SUCCESS) { goto error; } if(cp_h->state == GFS_CIO_CP_STATE_ERROR) { goto error; } /* if there are outstanding read buffers left use this handle to write one */ if(!globus_fifo_empty(cp_h->read_buffer_q)) { read_buf = (gfs_l_xio_read_buffer_t *) globus_fifo_dequeue(&cp_h->write_q); globus_xio_handle_cntl( read_buf->write_xio, NULL, /* QUERY MAYBE? */ GLOBUS_XIO_SEEK, read_buf->offset); result = globus_xio_register_write( read_buf->write_xio, read_buf->buffer, read_buf->nbytes, read_buf->nbytes, NULL, gfs_l_xio_cp_write_cb, read_buf); if(result != GLOBUS_SUCCESS) { goto error; } } /* if read buffers are gone and all read handles are gone then we are at eof and can start cloising the writes */ else if(globus_fifo_empty(cp_h->read_all_q)) { gfs_l_xio_close_write_handles(cp_h); } /* if still going but nothing to write just put this back in the queue */ else { globus_fifo_enqueue(&cp_h->write_q, read_buf->write_xio); } } globus_mutex_unlock(&cp_h->mutex); if(cp_h->update_cb) { cp_h->update_cb(offset, nbytes, cp_h->user_arg); } return; error: globus_free(read_buf); gfs_l_xio_cp_error(cp_h, result); globus_mutex_unlock(&cp_h->mutex); }
static void globus_l_xio_telnet_request_data( globus_l_xio_telnet_handle_t * handle, globus_xio_operation_t op) { globus_bool_t complete; globus_size_t end; globus_size_t len; globus_result_t res = GLOBUS_SUCCESS; globus_size_t remainder; globus_size_t diff; if(!globus_fifo_empty(&handle->write_q)) { handle->write_iovec.iov_base = globus_fifo_dequeue(&handle->write_q); handle->write_iovec.iov_len = 3; res = globus_xio_driver_pass_write( op, &handle->write_iovec, 1, 3, globus_l_xio_telnet_cmd_write_cb, handle); if(res != GLOBUS_SUCCESS) { goto err; } return; } /* is there a full command in there, updates read_buffer_ndx */ complete = globus_l_xio_telnet_check_data(handle, &end); if(complete) { remainder = handle->read_buffer_ndx - end; if(handle->create_buffer_mode) { len = end; handle->user_read_iovec->iov_base = globus_malloc(len); memcpy(handle->user_read_iovec->iov_base, handle->read_buffer, len); handle->user_read_iovec->iov_len = len; } else { if(handle->user_read_iovec->iov_len >= end) { len = end; } else { diff = end - handle->user_read_iovec->iov_len; len = handle->user_read_iovec->iov_len; end -= diff; remainder += diff; } memcpy(handle->user_read_iovec->iov_base, handle->read_buffer, len); } /* move remainder to the begining of the buffer */ if(remainder > 0) { memmove(handle->read_buffer, &handle->read_buffer[end], remainder); } handle->read_buffer_ndx = remainder; handle->finish = GLOBUS_TRUE; handle->finish_len = len; handle->finish_res = GLOBUS_SUCCESS; } else { if(handle->read_buffer_ndx + 1 >= handle->read_buffer_length) { handle->read_buffer_length *= 2; handle->read_buffer = globus_libc_realloc( handle->read_buffer, handle->read_buffer_length); } handle->read_iovec.iov_base = &handle->read_buffer[handle->read_buffer_ndx]; handle->read_iovec.iov_len = handle->read_buffer_length - handle->read_buffer_ndx; res = globus_xio_driver_pass_read( op, &handle->read_iovec, 1, 1, globus_l_xio_telnet_read_cb, handle); if(res != GLOBUS_SUCCESS) { goto err; } } return; err: handle->finish = GLOBUS_TRUE; handle->finish_len = 0; handle->finish_res = res; }
/** * Create a string representation of a restart marker. * @ingroup globus_ftp_client_restart_marker * * This function sets the @a marker_string parameter to point to * a freshly allocated string suitable for sending as an argument to * the FTP REST command, or for a later call to * globus_ftp_client_restart_marker_from_string(). * * The string pointed to by marker_string must be freed by the caller. * * @param marker * An initialized FTP client restart marker. * @param marker_string * A pointer to a char * to be set to a freshly allocated marker * string. * * @see globus_ftp_client_restart_marker */ globus_result_t globus_ftp_client_restart_marker_to_string( globus_ftp_client_restart_marker_t * marker, char ** marker_string) { int length = 0, mylen; char * buf = GLOBUS_NULL; char * tbuf; globus_i_ftp_client_range_t * range; globus_fifo_t * tmp; globus_off_t offset; globus_size_t digits; globus_object_t * err; GlobusFuncName(globus_ftp_client_restart_marker_to_string); if(marker == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker")); } else if(marker_string == GLOBUS_NULL) { return globus_error_put( GLOBUS_I_FTP_CLIENT_ERROR_NULL_PARAMETER("marker_string")); } (*marker_string) = GLOBUS_NULL; if(marker->type == GLOBUS_FTP_CLIENT_RESTART_NONE) { return GLOBUS_SUCCESS; } else if(marker->type == GLOBUS_FTP_CLIENT_RESTART_STREAM) { if(marker->stream.ascii_offset > marker->stream.offset) { offset = marker->stream.ascii_offset; } else { offset = marker->stream.offset; } digits = globus_i_ftp_client_count_digits(offset); (*marker_string) = globus_libc_malloc(digits+1); if(!(*marker_string)) { err = GLOBUS_I_FTP_CLIENT_ERROR_OUT_OF_MEMORY(); if(!err) { err = GLOBUS_ERROR_NO_INFO; } goto error_exit; } globus_libc_sprintf((*marker_string), "%lu", (unsigned long) offset); } else if(marker->type == GLOBUS_FTP_CLIENT_RESTART_EXTENDED_BLOCK && !globus_fifo_empty(&marker->extended_block.ranges)) { tmp = globus_fifo_copy(&marker->extended_block.ranges); while((! globus_fifo_empty(tmp))) { range = (globus_i_ftp_client_range_t *) globus_fifo_dequeue(tmp); mylen = globus_i_ftp_client_count_digits(range->offset); mylen++; mylen += globus_i_ftp_client_count_digits(range->end_offset); mylen++; if(buf) { tbuf = realloc(buf, length + mylen + 1); } else { tbuf = malloc(length + mylen + 1); } if(!tbuf) { err = GLOBUS_I_FTP_CLIENT_ERROR_OUT_OF_MEMORY(); if(!err) { err = GLOBUS_ERROR_NO_INFO; } goto buf_err; } else { buf = tbuf; } length += globus_libc_sprintf( buf + length, "%"GLOBUS_OFF_T_FORMAT"-%"GLOBUS_OFF_T_FORMAT",", range->offset, range->end_offset); } buf[strlen(buf)-1] = '\0'; (*marker_string) = buf; globus_fifo_destroy(tmp); globus_libc_free(tmp); } return GLOBUS_SUCCESS; buf_err: globus_fifo_destroy(tmp); globus_libc_free(buf); error_exit: return globus_error_put(err); }