Exemple #1
0
void blkfront_sync(struct blkfront_dev *dev)
{
    unsigned long flags;
    DEFINE_WAIT(w);

    if (dev->info.mode == O_RDWR) {
        if (dev->info.barrier == 1)
            blkfront_push_operation(dev, BLKIF_OP_WRITE_BARRIER, 0);

        if (dev->info.flush == 1)
            blkfront_push_operation(dev, BLKIF_OP_FLUSH_DISKCACHE, 0);
    }

    /* Note: This won't finish if another thread enqueues requests.  */
    local_irq_save(flags);
    while (1) {
	blkfront_aio_poll(dev);
	if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
	    break;

	add_waiter(w, blkfront_queue);
	local_irq_restore(flags);
	schedule();
	local_irq_save(flags);
    }
    remove_waiter(w, blkfront_queue);
    local_irq_restore(flags);
}
Exemple #2
0
/* walk dirty map and enqueue read requests.
 * returns:  0 when entire bitmap has been enqueued,
 *           1 when the ring is full
 *          -1 on error
 */
static int writelog_enqueue_requests(struct writelog* wl)
{
  struct disk_range* range = wl->shm;
  log_request_t* req;

  for (range = wl->cur; (void*)range < bmend(wl->shm); range++) {
    if (!range->count)
      break;

    if (RING_FULL(&wl->fring))
	break;

    /* insert range into request stream */
    /* 1. get next request slot from ring */
    /* 2. ensure enough shm space is available */
    
    BDPRINTF("enqueueing dirty extent: %"PRIu64":%u (ring space: %d/%d)",
	     range->sector, range->count, RING_FREE_REQUESTS(&wl->fring),
	     RING_SIZE(&wl->fring));

    req = RING_GET_REQUEST(&wl->fring, wl->fring.req_prod_pvt);

    req->sector = range->sector;
    req->count = range->count;
    /* ... */
    req->offset = 0;

    wl->fring.req_prod_pvt++;
    wl->inflight++;
  }

  wl->cur = range;

  if (range->count)
    return 1;

  return 0;
}
Exemple #3
0
/* read_loop:
 * 1. extract dirty bitmap
 * 2. feed as much as possible onto ring
 * 3. kick
 * 4. as responses come back, feed more of the dirty bitmap
 *    into the ring
 * 5. when entire bitmap has been queued, go to 1?
 */
int read_loop(struct writelog* wl, int fd)
{
  int rc;

  if (get_writes(wl, fd, 1) < 0)
    return -1;
  writelog_dump(wl);

  do {
    rc = writelog_enqueue_requests(wl);

    if (RING_FREE_REQUESTS(&wl->fring) < RING_SIZE(&wl->fring))
      RING_PUSH_REQUESTS(&wl->fring);
    if (ctl_kick(fd) < 0)
      return -1;

    /* collect responses */
    if (wl->inflight && await_responses(wl, fd) < 0)
      return -1;
  } while (rc > 0);

  return rc;
}
Exemple #4
0
/**
 * Open network device
 *
 * @v netdev		Network device
 * @ret rc		Return status code
 */
static int netfront_open ( struct net_device *netdev ) {
	struct netfront_nic *netfront = netdev->priv;
	struct xen_device *xendev = netfront->xendev;
	int rc;

	/* Ensure device is in a suitable initial state */
	if ( ( rc = netfront_reset ( netfront ) ) != 0 )
		goto err_reset;

	/* Create transmit descriptor ring */
	if ( ( rc = netfront_create_ring ( netfront, &netfront->tx ) ) != 0 )
		goto err_create_tx;
	SHARED_RING_INIT ( netfront->tx_sring );
	FRONT_RING_INIT ( &netfront->tx_fring, netfront->tx_sring, PAGE_SIZE );
	assert ( RING_SIZE ( &netfront->tx_fring ) >= netfront->tx.count );

	/* Create receive descriptor ring */
	if ( ( rc = netfront_create_ring ( netfront, &netfront->rx ) ) != 0 )
		goto err_create_rx;
	SHARED_RING_INIT ( netfront->rx_sring );
	FRONT_RING_INIT ( &netfront->rx_fring, netfront->rx_sring, PAGE_SIZE );
	assert ( RING_SIZE ( &netfront->rx_fring ) >= netfront->rx.count );

	/* Create event channel */
	if ( ( rc = netfront_create_event ( netfront ) ) != 0 )
		goto err_create_event;

	/* "Request" the rx-copy feature.  Current versions of
	 * xen_netback.ko will fail silently if this parameter is not
	 * present.
	 */
	if ( ( rc = netfront_write_flag ( netfront, "request-rx-copy" ) ) != 0 )
		goto err_request_rx_copy;

	/* Disable checksum offload, since we will always do the work anyway */
	if ( ( rc = netfront_write_flag ( netfront,
					  "feature-no-csum-offload" ) ) != 0 )
		goto err_feature_no_csum_offload;

	/* Inform backend that we will send notifications for RX requests */
	if ( ( rc = netfront_write_flag ( netfront,
					  "feature-rx-notify" ) ) != 0 )
		goto err_feature_rx_notify;

	/* Set state to Connected */
	if ( ( rc = xenbus_set_state ( xendev, XenbusStateConnected ) ) != 0 ) {
		DBGC ( netfront, "NETFRONT %s could not set state=\"%d\": %s\n",
		       xendev->key, XenbusStateConnected, strerror ( rc ) );
		goto err_set_state;
	}

	/* Wait for backend to connect */
	if ( ( rc = xenbus_backend_wait ( xendev, XenbusStateConnected ) ) !=0){
		DBGC ( netfront, "NETFRONT %s could not connect to backend: "
		       "%s\n", xendev->key, strerror ( rc ) );
		goto err_backend_wait;
	}

	/* Refill receive descriptor ring */
	netfront_refill_rx ( netdev );

	/* Set link up */
	netdev_link_up ( netdev );

	return 0;

 err_backend_wait:
	netfront_reset ( netfront );
 err_set_state:
	netfront_rm ( netfront, "feature-rx-notify" );
 err_feature_rx_notify:
	netfront_rm ( netfront, "feature-no-csum-offload" );
 err_feature_no_csum_offload:
	netfront_rm ( netfront, "request-rx-copy" );
 err_request_rx_copy:
	netfront_destroy_event ( netfront );
 err_create_event:
	netfront_destroy_ring ( netfront, &netfront->rx, NULL );
 err_create_rx:
	netfront_destroy_ring ( netfront, &netfront->tx, NULL );
 err_create_tx:
 err_reset:
	return rc;
}
Exemple #5
0
static void *handle_mount(void *data)
{
    int more, notify;
    struct fs_mount *mount = (struct fs_mount *)data;
    
    printf("Starting a thread for mount: %d\n", mount->mount_id);
    allocate_request_array(mount);

    for(;;)
    {
        int nr_consumed=0;
        RING_IDX cons, rp;
        struct fsif_request *req;

        handle_aio_events(mount);
moretodo:
        rp = mount->ring.sring->req_prod;
        xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

        while ((cons = mount->ring.req_cons) != rp)
        {
            int i;
            struct fs_op *op;

            printf("Got a request at %d (of %d)\n", 
                    cons, RING_SIZE(&mount->ring));
            req = RING_GET_REQUEST(&mount->ring, cons);
            printf("Request type=%d\n", req->type); 
            for(i=0;;i++)
            {
                op = fsops[i];
                if(op == NULL)
                {
                    /* We've reached the end of the array, no appropirate
                     * handler found. Warn, ignore and continue. */
                    printf("WARN: Unknown request type: %d\n", req->type);
                    mount->ring.req_cons++; 
                    break;
                }
                if(op->type == req->type)
                {
                    /* There needs to be a dispatch handler */
                    assert(op->dispatch_handler != NULL);
                    op->dispatch_handler(mount, req);
                    break;
                }
             }

            nr_consumed++;
        }
        printf("Backend consumed: %d requests\n", nr_consumed);
        RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more);
        if(more) goto moretodo;

        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
        printf("Pushed responces and notify=%d\n", notify);
        if(notify)
            xc_evtchn_notify(mount->evth, mount->local_evtchn);
    }
 
    printf("Destroying thread for mount: %d\n", mount->mount_id);
    xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1);
    xc_gnttab_close(mount->gnth);
    xc_evtchn_unbind(mount->evth, mount->local_evtchn);
    xc_evtchn_close(mount->evth);
    free(mount->frontend);
    pthread_exit(NULL);
}