Beispiel #1
0
int main(void){
    vq_t * vq = vq_init("/vqtest", 20);
    if(vq == NULL){
        printf("Error creating vqueue: %s\n", strerror(errno));
        return -1;
    }

    size_t t = vq_write(vq, test_str, test_str_length);

    if(t == test_str_length)
        printf("Return value correct.\n");

    if(vq->length == test_str_length)
        printf("Length set correctly.\n");

    if(memcmp(vq->_buffer_start, test_str, test_str_length) == 0)
        printf("Reading from start succeeded.\n");

    if(memcmp(vq->_buffer_middle, test_str, test_str_length) == 0)
        printf("Reading from middle succeeded.\n");

    vq_destroy(vq);

    example();

    return 0;
}
int
vfs_stdac_init(struct mount *mp)
{
	const char* fs_type;
	int i, fstype_ok = 0;

	/* if mounted fs is read-only, do not do anything */
	if (mp->mnt_flag & MNT_RDONLY)
		return (0);

	/* is mounted fs type one we want to do some accounting for ? */
	for (i=0; i<ACCOUNTING_NB_FSTYPES; i++) {
		fs_type = accounting_fstypes[i];
		if (strncmp(mp->mnt_stat.f_fstypename, fs_type,
					sizeof(mp->mnt_stat)) == 0) {
			fstype_ok = 1;
			break;
		}
	}
	if (fstype_ok == 0)
		return (0);

	vq_init(mp);
	return (0);
}
Beispiel #3
0
static void example() {
    vq_t * vq = vq_init("/vqexample", 4096);
    int r = 0;
    (void) (r);

    // Write 2000 bytes
    char buffer[2000] = "Test data! Test data!";
    r = vq_write(vq, buffer, 2000);
    assert(r == 2000);
    
    // Read & verify data
    char buffer_check[2000];
    r = vq_read(vq, buffer_check, 2000);
    assert(r == 2000);
    assert(memcmp(buffer_check, buffer, 2000) == 0);

    for(int i = 0; i < 100; i++){
        // Write more data
        r = vq_write(vq, buffer, 2000);
        assert(r == 2000);
        r = vq_write(vq, buffer, 2000);
        assert(r == 2000);

        // Read & verify data, spanning the gap
        r = vq_read(vq, buffer_check, 2000);
        assert(r == 2000);
        assert(memcmp(buffer_check, buffer, 2000) == 0);

        r = vq_read(vq, buffer_check, 2000);
        assert(r == 2000);
        assert(memcmp(buffer_check, buffer, 2000) == 0);
    }

    vq_destroy(vq);

    printf("Successfully ran example!\n");
}
Beispiel #4
0
static void
vtbe_intr(void *arg)
{
	struct vtbe_softc *sc;
	int pending;
	uint32_t reg;

	sc = arg;

	VTBE_LOCK(sc);

	reg = PIO_READ(sc->pio_recv);

	/* Ack */
	PIO_SET(sc->pio_recv, reg, 0);

	pending = htobe32(reg);
	if (pending & Q_SEL) {
		reg = READ4(sc, VIRTIO_MMIO_QUEUE_SEL);
		sc->vs_curq = be32toh(reg);
	}

	if (pending & Q_PFN) {
		vq_init(sc);
	}

	if (pending & Q_NOTIFY) {
		/* beri rx / arm tx notify */
		vtbe_txfinish_locked(sc);
	}

	if (pending & Q_NOTIFY1) {
		vtbe_rxfinish_locked(sc);
	}

	VTBE_UNLOCK(sc);
}
Beispiel #5
0
/*
 * Quantize pt[0..n_pt-1][0..veclen-1] into cb[0..vqsize-1][0..veclen-1] (where
 * vqsize < n_pt, presumably).  Do this with the following iterative procedure:
 *     1. Choose an initial VQ codebook by selecting vqsize random points from pt.
 *     2. Map each point in pt to the "nearest" codebook entry (currently based on
 * 	  Euclidean distance.
 *     3. Re-estimate each VQ entry by taking the centroid of all pt entries mapped
 * 	  to it in step 2.
 *     4. Repeat steps 2 and 3 until the "total error stabilizes".
 * In the end, replace each point in pt with the nearest VQ value.
 * Return value: final total error.
 */
static float64 vq (float32 **pt, float32 **cb, int32 n_pt, int32 vqsize, int32 veclen)
{
    int32 p, c, i, iter, bestc, *pt2cb, *n_newcb;
    float64 d, bestdist, err, prev_err;
    float32 **newcb;
    
    E_INFO("Clustering %d points into %d\n", n_pt, vqsize);

    /* Allocate some temporary variables */
    pt2cb = (int32 *) ckd_calloc (n_pt, sizeof(int32));
    newcb = (float32 **)ckd_calloc_2d (vqsize, veclen, sizeof(float32));
    n_newcb = (int32 *) ckd_calloc (vqsize, sizeof(int32));

    /* Choose an initial codebook */
    vq_init (pt, cb, n_pt, vqsize, veclen);
    
    for (iter = 0;; iter++) {
	timing_start (tmg);

	/* Map each point to closest codebook entry (using Euclidean distance metric) */
	err = 0.0;
	for (p = 0; p < n_pt; p++) {
	    bestdist = 1e+200;
	    for (c = 0; c < vqsize; c++) {
		d = vecdist (pt[p], cb[c], veclen);
		if (d < bestdist) {
		    bestdist = d;
		    bestc = c;
		}
	    }
	    
	    pt2cb[p] = bestc;
	    err += bestdist;
	}
	
	/* Update codebook entries with centroid of mapped points */
	for (c = 0; c < vqsize; c++) {
	    for (i = 0; i < veclen; i++)
		newcb[c][i] = 0.0;
	    n_newcb[c] = 0;
	}
	for (p = 0; p < n_pt; p++) {
	    c = pt2cb[p];
	    for (i = 0; i < veclen; i++)
		newcb[c][i] += pt[p][i];
	    n_newcb[c]++;
	}
	for (c = 0; c < vqsize; c++) {
	    if (n_newcb[c] == 0)
		E_ERROR("Nothing mapped to codebook entry %d; entry not updated\n", c);
	    else {
		float64 t;
		
		t = 1.0 / n_newcb[c];
		for (i = 0; i < veclen; i++)
		    cb[c][i] = newcb[c][i] * t;
	    }
	}

	timing_stop (tmg);
	
	E_INFO("%4d: Error = %e, %.2f sec CPU, %.2f sec elapsed\n",
	       iter, err, tmg->t_cpu, tmg->t_elapsed);

	timing_reset (tmg);

	/* Check if VQ codebook converged */
	if (iter > 10) {
	    if ((err == 0.0) || ((prev_err - err)/prev_err < 0.002))
		break;
	}
	prev_err = err;
    }

    /* Replace points with nearest VQ entries created */
    for (p = 0; p < n_pt; p++) {
	c = pt2cb[p];
	for (i = 0; i < veclen; i++)
	    pt[p][i] = cb[c][i];
    }
    
    ckd_free (pt2cb);
    ckd_free_2d ((void **) newcb);
    ckd_free (n_newcb);

    return err;
}
/*
 * Called by c2_probe to initialize the RNIC. This principally
 * involves initalizing the various limits and resouce pools that
 * comprise the RNIC instance.
 */
int __devinit c2_rnic_init(struct c2_dev *c2dev)
{
	int err;
	u32 qsize, msgsize;
	void *q1_pages;
	void *q2_pages;
	void __iomem *mmio_regs;

	/* Device capabilities */
	c2dev->device_cap_flags =
	    (IB_DEVICE_RESIZE_MAX_WR |
	     IB_DEVICE_CURR_QP_STATE_MOD |
	     IB_DEVICE_SYS_IMAGE_GUID |
	     IB_DEVICE_ZERO_STAG |
	     IB_DEVICE_MEM_WINDOW);

	/* Allocate the qptr_array */
	c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
	if (!c2dev->qptr_array) {
		return -ENOMEM;
	}

	/* Inialize the qptr_array */
	memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
	c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
	c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
	c2dev->qptr_array[2] = (void *) &c2dev->aeq;

	/* Initialize data structures */
	init_waitqueue_head(&c2dev->req_vq_wo);
	spin_lock_init(&c2dev->vqlock);
	spin_lock_init(&c2dev->lock);

	/* Allocate MQ shared pointer pool for kernel clients. User
	 * mode client pools are hung off the user context
	 */
	err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
	if (err) {
		goto bail0;
	}

	/* Allocate shared pointers for Q0, Q1, and Q2 from
	 * the shared pointer pool.
	 */

	c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					     &c2dev->hint_count_dma,
					     GFP_KERNEL);
	c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					     &c2dev->req_vq.shared_dma,
					     GFP_KERNEL);
	c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					     &c2dev->rep_vq.shared_dma,
					     GFP_KERNEL);
	c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					  &c2dev->aeq.shared_dma, GFP_KERNEL);
	if (!c2dev->hint_count || !c2dev->req_vq.shared ||
	    !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
		err = -ENOMEM;
		goto bail1;
	}

	mmio_regs = c2dev->kva;
	/* Initialize the Verbs Request Queue */
	c2_mq_req_init(&c2dev->req_vq, 0,
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
		       mmio_regs +
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
		       mmio_regs +
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
		       C2_MQ_ADAPTER_TARGET);

	/* Initialize the Verbs Reply Queue */
	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
	q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
				      &c2dev->rep_vq.host_dma, GFP_KERNEL);
	if (!q1_pages) {
		err = -ENOMEM;
		goto bail1;
	}
	pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
	pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
		 (unsigned long long) c2dev->rep_vq.host_dma);
	c2_mq_rep_init(&c2dev->rep_vq,
		   1,
		   qsize,
		   msgsize,
		   q1_pages,
		   mmio_regs +
		   be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
		   C2_MQ_HOST_TARGET);

	/* Initialize the Asynchronus Event Queue */
	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
	q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
				      &c2dev->aeq.host_dma, GFP_KERNEL);
	if (!q2_pages) {
		err = -ENOMEM;
		goto bail2;
	}
	pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
	pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
		 (unsigned long long) c2dev->aeq.host_dma);
	c2_mq_rep_init(&c2dev->aeq,
		       2,
		       qsize,
		       msgsize,
		       q2_pages,
		       mmio_regs +
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
		       C2_MQ_HOST_TARGET);

	/* Initialize the verbs request allocator */
	err = vq_init(c2dev);
	if (err)
		goto bail3;

	/* Enable interrupts on the adapter */
	writel(0, c2dev->regs + C2_IDIS);

	/* create the WR init message */
	err = c2_adapter_init(c2dev);
	if (err)
		goto bail4;
	c2dev->init++;

	/* open an adapter instance */
	err = c2_rnic_open(c2dev);
	if (err)
		goto bail4;

	/* Initialize cached the adapter limits */
	if (c2_rnic_query(c2dev, &c2dev->props))
		goto bail5;

	/* Initialize the PD pool */
	err = c2_init_pd_table(c2dev);
	if (err)
		goto bail5;

	/* Initialize the QP pool */
	c2_init_qp_table(c2dev);
	return 0;

      bail5:
	c2_rnic_close(c2dev);
      bail4:
	vq_term(c2dev);
      bail3:
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
			  q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
      bail2:
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
			  q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
      bail1:
	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
      bail0:
	vfree(c2dev->qptr_array);

	return err;
}