Ejemplo n.º 1
0
int CFileParser::GetInt()
{
	GetToken();

	if( GetTokenType() == HEX )	
	{ 
		m_wszBuff += 2; 
		wchar_t* pPos = m_wszBuff; 
		
		while(!IsWhiteSpace(*pPos) && *pPos ) 
			pPos++; 
		
			m_wszToken = wstring( m_wszBuff, pPos - m_wszBuff); 
			m_wszBuff = pPos; 
			return (int)GetHex(); 
	} 
	else 
	{
		wchar_t* pPos = m_wszBuff;

		while(!IsWhiteSpace(*pPos) && *pPos )
			pPos++;



		m_wszToken = wstring( m_wszBuff, pPos - m_wszBuff );

		m_wszBuff = pPos;

		if( m_wszToken[0] ) 
			READ_TABLE( _wtoi ); 
	}

	return 0;
}
Ejemplo n.º 2
0
double CFileParser::GetDouble()
{
	if(GetTokenType() != NUMBER)
		return 0.0f;

	if( m_wszToken[0] )
	{
		READ_TABLE( _wtof );
	}

	return 0.0f;
}
Ejemplo n.º 3
0
/* Store data in the compact image. The argument 'soft_write' means
 * the store was caused by copy-on-read or prefetching, which need not
 * update metadata immediately. */
static BlockDriverAIOCB *store_data_in_compact_image (FvdAIOCB * acb,
                                                      int soft_write,
                                                      FvdAIOCB * parent_acb,
                                                      BlockDriverState * bs,
                                                      int64_t sector_num,
                                                      QEMUIOVector * orig_qiov,
                                                      const int nb_sectors,
                                                      BlockDriverCompletionFunc
                                                      * cb, void *opaque)
{
    BDRVFvdState *s = bs->opaque;

    const uint32_t first_chunk = sector_num / s->chunk_size;
    const uint32_t last_chunk = (sector_num + nb_sectors - 1) / s->chunk_size;
    int table_dirty = FALSE;
    uint32_t chunk;
    int64_t start_sec;

    /* Check if storag space is allocated. */
    for (chunk = first_chunk; chunk <= last_chunk; chunk++) {
        if (IS_EMPTY (s->table[chunk])) {
            uint32_t id = allocate_chunk (bs);
            if (IS_EMPTY (id)) {
                return NULL;
            }
            id |= DIRTY_TABLE;
            WRITE_TABLE (s->table[chunk], id);

            table_dirty = TRUE;
        } else if (IS_DIRTY (s->table[chunk])) {
            /* This is possible if a previous soft-write allocated the storage
             * space but did not flush the table entry change to the journal
             * and hence did not clean the dirty bit. This is also possible
             * with two concurrent hard-writes. The first hard-write allocated
             * the storage space but has not flushed the table entry change to
             * the journal yet and hence the table entry remains dirty. In
             * this case, the second hard-write will also try to flush this
             * dirty table entry to the journal. The outcome is correct since
             * they store the same metadata change in the journal (although
             * twice). For this race condition, we prefer to have two writes
             * to the journal rather than introducing a locking mechanism,
             * because this happens rarely and those two writes to the journal
             * are likely to be merged by the kernel into a single write since
             * they are likely to update back-to-back sectors in the journal.
             * A locking mechanism would be less efficient, because the large
             * size of chunks would cause unnecessary locking due to ``false
             * sharing'' of a chunk by two writes. */
            table_dirty = TRUE;
        }
    }

    const int update_table = (!soft_write && table_dirty);
    size_t iov_left;
    uint8_t *iov_buf;
    int nb, iov_index, nqiov, niov;
    uint32_t prev;

    if (first_chunk == last_chunk) {
        goto handle_one_continuous_region;
    }

    /* Count the number of qiov and iov needed to cover the continuous regions
     * of the compact image. */
    iov_left = orig_qiov->iov[0].iov_len;
    iov_buf = orig_qiov->iov[0].iov_base;
    iov_index = 0;
    nqiov = 0;
    niov = 0;
    prev = READ_TABLE (s->table[first_chunk]);

    /* Data in the first chunk. */
    nb = s->chunk_size - (sector_num % s->chunk_size);

    for (chunk = first_chunk + 1; chunk <= last_chunk; chunk++) {
        uint32_t current = READ_TABLE (s->table[chunk]);
        int64_t data_size;
        if (chunk < last_chunk) {
            data_size = s->chunk_size;
        } else {
            data_size = (sector_num + nb_sectors) % s->chunk_size;
            if (data_size == 0) {
                data_size = s->chunk_size;
            }
        }

        if (current == prev + 1) {
            nb += data_size;        /* Continue the previous region. */
        } else {
            /* Terminate the previous region. */
            niov +=
                count_iov (orig_qiov->iov, &iov_index, &iov_buf, &iov_left,
                           nb * 512);
            nqiov++;
            nb = data_size;        /* Data in the new region. */
        }
        prev = current;
    }

    if (nqiov == 0) {
      handle_one_continuous_region:
        /* A simple case. All data can be written out in one qiov and no new
         * chunks are allocated. */
        start_sec = READ_TABLE (s->table[first_chunk]) * s->chunk_size +
                                        (sector_num % s->chunk_size);

        if (!update_table && !acb) {
            if (parent_acb) {
                QDEBUG ("STORE: acb%llu-%p  "
                        "store_directly_without_table_update\n",
                        parent_acb->uuid, parent_acb);
            }
            return bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec,
                                    orig_qiov, nb_sectors, cb, opaque);
        }

        if (!acb && !(acb = init_store_acb (soft_write, orig_qiov, bs,
                            sector_num, nb_sectors, parent_acb, cb, opaque))) {
            return NULL;
        }

        QDEBUG ("STORE: acb%llu-%p  store_directly  sector_num=%" PRId64
                " nb_sectors=%d\n", acb->uuid, acb, acb->sector_num,
                acb->nb_sectors);

        acb->store.update_table = update_table;
        acb->store.num_children = 1;
        acb->store.one_child.hd_acb =
            bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec, orig_qiov,
                             nb_sectors, finish_store_data_in_compact_image,
                             &acb->store.one_child);
        if (acb->store.one_child.hd_acb) {
            acb->store.one_child.acb = acb;
            return &acb->common;
        } else {
            my_qemu_aio_unref (acb);
            return NULL;
        }
    }

    /* qiov for the last continuous region. */
    niov += count_iov (orig_qiov->iov, &iov_index, &iov_buf,
                       &iov_left, nb * 512);
    nqiov++;
    ASSERT (iov_index == orig_qiov->niov - 1 && iov_left == 0);

    /* Need to submit multiple requests to the lower layer. */
    if (!acb && !(acb = init_store_acb (soft_write, orig_qiov, bs, sector_num,
                                        nb_sectors, parent_acb, cb, opaque))) {
        return NULL;
    }
    acb->store.update_table = update_table;
    acb->store.num_children = nqiov;

    if (!parent_acb) {
        QDEBUG ("STORE: acb%llu-%p  start  sector_num=%" PRId64
                " nb_sectors=%d\n", acb->uuid, acb, acb->sector_num,
                acb->nb_sectors);
    }

    /* Allocate memory and create multiple requests. */
    const size_t metadata_size = nqiov * (sizeof (CompactChildCB) +
                                          sizeof (QEMUIOVector))
                                    + niov * sizeof (struct iovec);
    acb->store.children = (CompactChildCB *) my_qemu_malloc (metadata_size);
    QEMUIOVector *q = (QEMUIOVector *) (acb->store.children + nqiov);
    struct iovec *v = (struct iovec *) (q + nqiov);

    start_sec = READ_TABLE (s->table[first_chunk]) * s->chunk_size +
                                        (sector_num % s->chunk_size);
    nqiov = 0;
    iov_index = 0;
    iov_left = orig_qiov->iov[0].iov_len;
    iov_buf = orig_qiov->iov[0].iov_base;
    prev = READ_TABLE (s->table[first_chunk]);

    /* Data in the first chunk. */
    if (first_chunk == last_chunk) {
        nb = nb_sectors;
    }
    else {
        nb = s->chunk_size - (sector_num % s->chunk_size);
    }

    for (chunk = first_chunk + 1; chunk <= last_chunk; chunk++) {
        uint32_t current = READ_TABLE (s->table[chunk]);
        int64_t data_size;
        if (chunk < last_chunk) {
            data_size = s->chunk_size;
        } else {
            data_size = (sector_num + nb_sectors) % s->chunk_size;
            if (data_size == 0) {
                data_size = s->chunk_size;
            }
        }

        if (current == prev + 1) {
            nb += data_size;        /* Continue the previous region. */
        } else {
            /* Terminate the previous continuous region. */
            niov = setup_iov (orig_qiov->iov, v, &iov_index,
                              &iov_buf, &iov_left, nb * 512);
            qemu_iovec_init_external (q, v, niov);
            QDEBUG ("STORE: acb%llu-%p  create_child %d sector_num=%" PRId64
                    " nb_sectors=%d niov=%d\n", acb->uuid, acb, nqiov,
                    start_sec, q->size / 512, q->niov);
            acb->store.children[nqiov].hd_acb =
                bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec, q,
                                 q->size / 512,
                                 finish_store_data_in_compact_image,
                                 &acb->store.children[nqiov]);
            if (!acb->store.children[nqiov].hd_acb) {
                goto fail;
            }
            acb->store.children[nqiov].acb = acb;
            v += niov;
            q++;
            nqiov++;
            start_sec = current * s->chunk_size; /* Begin of the new region. */
            nb = data_size;        /* Data in the new region. */
        }
        prev = current;
    }

    /* Requst for the last chunk. */
    niov = setup_iov (orig_qiov->iov, v, &iov_index, &iov_buf,
                      &iov_left, nb * 512);
    ASSERT (iov_index == orig_qiov->niov - 1 && iov_left == 0);
    qemu_iovec_init_external (q, v, niov);

    QDEBUG ("STORE: acb%llu-%p  create_child_last %d sector_num=%" PRId64
            " nb_sectors=%d niov=%d\n", acb->uuid, acb, nqiov, start_sec,
            q->size / 512, q->niov);
    acb->store.children[nqiov].hd_acb =
        bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec, q,
                         q->size / 512, finish_store_data_in_compact_image,
                         &acb->store.children[nqiov]);
    if (acb->store.children[nqiov].hd_acb) {
        acb->store.children[nqiov].acb = acb;
        return &acb->common;
    }

    int i;
  fail:
    QDEBUG ("STORE: acb%llu-%p  failed\n", acb->uuid, acb);
    for (i = 0; i < nqiov; i++) {
        bdrv_aio_cancel (acb->store.children[i].hd_acb);
    }
    my_qemu_free (acb->store.children);
    my_qemu_aio_unref (acb);
    return NULL;
}