static int aio_ref_get(struct aio_output *output, struct mref_object *mref) { loff_t total_size; if (unlikely(!output->brick->power.led_on)) return -EBADFD; if (unlikely(!output->mf)) { MARS_ERR("brick is not switched on\n"); return -EILSEQ; } if (unlikely(mref->ref_len <= 0)) { MARS_ERR("bad ref_len=%d\n", mref->ref_len); return -EILSEQ; } total_size = get_total_size(output); if (unlikely(total_size < 0)) { return total_size; } mref->ref_total_size = total_size; if (mref->ref_initialized) { _mref_get(mref); return mref->ref_len; } /* Buffered IO. */ if (!mref->ref_data) { struct aio_mref_aspect *mref_a = aio_mref_get_aspect(output->brick, mref); if (unlikely(!mref_a)) { MARS_ERR("bad mref_a\n"); return -EILSEQ; } if (unlikely(mref->ref_len <= 0)) { MARS_ERR("bad ref_len = %d\n", mref->ref_len); return -ENOMEM; } mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len)); if (unlikely(!mref->ref_data)) { MARS_ERR("ENOMEM %d bytes\n", mref->ref_len); return -ENOMEM; } #if 0 // ??? mref->ref_flags = 0; #endif mref_a->do_dealloc = true; atomic_inc(&output->total_alloc_count); atomic_inc(&output->alloc_count); } _mref_get_first(mref); return mref->ref_len; }
static int client_output_construct(struct client_output *output) { int i; output->hash_table = brick_block_alloc(0, PAGE_SIZE); if (unlikely(!output->hash_table)) { MARS_ERR("cannot allocate hash table\n"); return -ENOMEM; } for (i = 0; i < CLIENT_HASH_MAX; i++) { INIT_LIST_HEAD(&output->hash_table[i]); } spin_lock_init(&output->lock); INIT_LIST_HEAD(&output->mref_list); INIT_LIST_HEAD(&output->wait_list); init_waitqueue_head(&output->event); init_waitqueue_head(&output->sender.run_event); init_waitqueue_head(&output->receiver.run_event); init_waitqueue_head(&output->info_event); return 0; }
static int client_ref_get(struct client_output *output, struct mref_object *mref) { int maxlen; if (mref->ref_initialized) { _mref_get(mref); return mref->ref_len; } #if 1 /* Limit transfers to page boundaries. * Currently, this is more restrictive than necessary. * TODO: improve performance by doing better when possible. * This needs help from the server in some efficient way. */ maxlen = PAGE_SIZE - (mref->ref_pos & (PAGE_SIZE-1)); if (mref->ref_len > maxlen) mref->ref_len = maxlen; #endif if (!mref->ref_data) { // buffered IO struct client_mref_aspect *mref_a = client_mref_get_aspect(output->brick, mref); if (!mref_a) return -EILSEQ; mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len)); if (!mref->ref_data) return -ENOMEM; mref_a->do_dealloc = true; mref->ref_flags = 0; } _mref_get_first(mref); return 0; }
static int aio_ref_get(struct aio_output *output, struct mref_object *mref) { struct file *file; struct inode *inode; loff_t total_size; if (unlikely(!output->mf)) { MARS_ERR("brick is not switched on\n"); return -EILSEQ; } if (unlikely(mref->ref_len <= 0)) { MARS_ERR("bad ref_len=%d\n", mref->ref_len); return -EILSEQ; } if (mref->ref_initialized) { _mref_get(mref); return mref->ref_len; } file = output->mf->mf_filp; if (unlikely(!file)) { MARS_ERR("file is not open\n"); return -EILSEQ; } if (unlikely(!file->f_mapping)) { MARS_ERR("file %p has no mapping\n", file); return -EILSEQ; } inode = file->f_mapping->host; if (unlikely(!inode)) { MARS_ERR("file %p has no inode\n", file); return -EILSEQ; } total_size = i_size_read(inode); mref->ref_total_size = total_size; /* Only check reads. * Writes behind EOF are always allowed (sparse files) */ if (!mref->ref_may_write) { loff_t len = total_size - mref->ref_pos; if (unlikely(len <= 0)) { /* Special case: allow reads starting _exactly_ at EOF when a timeout is specified. */ if (len < 0 || mref->ref_timeout <= 0) { MARS_DBG("ENODATA %lld\n", len); return -ENODATA; } } // Shorten below EOF, but allow special case if (mref->ref_len > len && len > 0) { mref->ref_len = len; } } /* Buffered IO. */ if (!mref->ref_data) { struct aio_mref_aspect *mref_a = aio_mref_get_aspect(output->brick, mref); if (unlikely(!mref_a)) { MARS_ERR("bad mref_a\n"); return -EILSEQ; } if (unlikely(mref->ref_len <= 0)) { MARS_ERR("bad ref_len = %d\n", mref->ref_len); return -ENOMEM; } mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len)); if (unlikely(!mref->ref_data)) { MARS_ERR("ENOMEM %d bytes\n", mref->ref_len); return -ENOMEM; } #if 0 // ??? mref->ref_flags = 0; #endif mref_a->do_dealloc = true; atomic_inc(&output->total_alloc_count); atomic_inc(&output->alloc_count); } _mref_get_first(mref); return mref->ref_len; }