Beispiel #1
0
int store_writev(struct store_req_vec *vec, int nr)
{
	DBT db_key;
	int ret;
	size_t buf_size = 0;
	static char *buf;
	void *opaque;
	int i;

	for (i = 0; i < nr; i++)
		buf_size += strlen(vec[i].key) + vec[i].data_len + 256;

	buf = malloc(buf_size);

	memset(&db_key, 0, sizeof(db_key));

	db_key.ulen = buf_size;
	db_key.data = buf;
	db_key.flags = DB_DBT_USERMEM;

	DB_MULTIPLE_WRITE_INIT(opaque, &db_key);

	for (i = 0; i < nr; i++)
		DB_MULTIPLE_KEY_WRITE_NEXT(opaque, &db_key, vec[i].key,
					   strlen(vec[i].key) + 1, vec[i].data,
					   vec[i].data_len);

	ret = dbp->put(dbp, NULL, &db_key, NULL, DB_MULTIPLE_KEY);
	if (ret != 0) {
		switch (ret) {
		default:
			dbp->err(dbp, ret, "DB->put");
			return ACRD_ERR_UNKNOWN;
		}
	}

	free(buf);

	return ACRD_SUCCESS;
}
Beispiel #2
0
/*
 * Use Bulk Get/Put to copy the given number of pages worth of
 * records from the source database to the destination database,
 * this function should be called until all tables are copied, at
 * which point it will return SQLITE_DONE.  Both Btrees need to
 * have transactions before calling this function.
 * p->pSrc - Source Btree
 * p->tables - Contains a list of iTables to copy, gotten using
 *          btreeGetTables().
 * p->currentTable - Index in tables of the current table being copied.
 * p->srcCur -  Cursor on the current source table being copied.
 * p->pDest - Destiniation Btree.
 * p->destCur - BtCursor on the destination table being copied into.
 * pages - Number of pages worth of data to copy.
 */
static int btreeCopyPages(sqlite3_backup *p, int *pages)
{
	DB *dbp;
	DBT dataOut, dataIn;
	char bufOut[MULTI_BUFSIZE], bufIn[MULTI_BUFSIZE];
	int ret, rc, copied, srcIsDupIndex;
	void *in, *out, *app;

	ret = 0;
	rc = SQLITE_OK;
	dbp = NULL;
	copied = 0;
	memset(&dataOut, 0, sizeof(dataOut));
	memset(&dataIn, 0, sizeof(dataIn));
	dataOut.flags = DB_DBT_USERMEM;
	dataIn.flags = DB_DBT_USERMEM;
	dataOut.data = bufOut;
	dataOut.ulen = sizeof(bufOut);
	dataIn.data = bufIn;
	dataIn.ulen = sizeof(bufIn);

	while (*pages < 0 || *pages > copied) {
		/* No tables left to copy */
		if (p->tables[p->currentTable] == -1) {
			u32 val;
			/*
			 * Update the schema file format and largest rootpage
			 * in the meta data.  Other meta data values should
			 * not be changed.
			 */
			sqlite3BtreeGetMeta(p->pSrc, 1, &val);
			if (p->pSrc->db->errCode == SQLITE_BUSY) {
				rc = SQLITE_BUSY;
				goto err;
			}
			rc = sqlite3BtreeUpdateMeta(p->pDest, 1, val);
			if (rc != SQLITE_OK)
				goto err;
			sqlite3BtreeGetMeta(p->pSrc, 3, &val);
		       if (p->pSrc->db->errCode == SQLITE_BUSY) {
				rc = SQLITE_BUSY;
				goto err;
			}
			rc = sqlite3BtreeUpdateMeta(p->pDest, 3, val);
			if (rc != SQLITE_OK)
				goto err;
			ret = SQLITE_DONE;
			goto err;
		}
		/* If not currently copying a table, get the next table. */
		if (!p->srcCur) {
			rc = btreeGetUserTable(p->pSrc, p->srcTxn, &dbp,
			    p->tables[p->currentTable]);
			if (rc != SQLITE_OK)
				goto err;
			assert(dbp);
			memset(&p->destCur, 0, sizeof(p->destCur));
			/*
			 * Open a cursor on the destination table, this will
			 * create the table and allow the Btree to manage the
			 * DB object.
			 */
			sqlite3BtreeCursor(p->pDest, p->tables[p->currentTable],
			    1, dbp->app_private, &p->destCur);
			if ((rc = p->destCur.error) != SQLITE_OK) {
				app = dbp->app_private;
				dbp->close(dbp, DB_NOSYNC);
				if (app)
					sqlite3DbFree(p->pSrcDb, app);
				goto err;
			}
			/* Open a cursor on the source table. */
			if ((ret = dbp->cursor(dbp,
			    p->srcTxn, &p->srcCur, 0)) != 0)
				goto err;
			dbp = 0;
		}
		srcIsDupIndex = isDupIndex((p->tables[p->currentTable] & 1) ?
		    BTREE_INTKEY : 0, p->pSrc->pBt->dbStorage,
		    p->srcCur->dbp->app_private, p->srcCur->dbp);
		/*
		 * Copy the current table until the given number of
		 * pages is copied, or the entire table has been copied.
		 */
		while (*pages < 0 || *pages > copied) {
			DBT key, data;
			memset(&key, 0, sizeof(key));
			memset(&data, 0, sizeof(data));
			/* Do a Bulk Get from the source table. */
			ret = p->srcCur->get(p->srcCur, &key, &dataOut,
			    DB_NEXT | DB_MULTIPLE_KEY);
			if (ret == DB_NOTFOUND)
				break;
			if (ret != 0)
				goto err;
			/* Copy the records into the Bulk buffer. */
			DB_MULTIPLE_INIT(out, &dataOut);
			DB_MULTIPLE_WRITE_INIT(in, &dataIn);
			DB_MULTIPLE_KEY_NEXT(out, &dataOut, key.data,
			    key.size, data.data, data.size);
			while (out) {
				/*
				 * Have to translate the index formats if they
				 * are not the same.
				 */
				if (p->destCur.isDupIndex != srcIsDupIndex) {
					if (srcIsDupIndex) {
						p->destCur.key = key;
						p->destCur.data = data;
						if (!btreeCreateIndexKey(
						    &p->destCur)) {
							rc = SQLITE_NOMEM;
							goto err;
						}
						DB_MULTIPLE_KEY_WRITE_NEXT(in,
						    &dataIn,
						    p->destCur.index.data,
						    p->destCur.index.size,
						    p->destCur.data.data, 0);
					} else {
						/* Copy the key into the cursor
						 * index since spliting the key
						 * requires changing the
						 * internal memory.
						 */
						if (!allocateCursorIndex(
						    &p->destCur, key.size)) {
							rc = SQLITE_NOMEM;
							goto err;
						}
						memcpy(p->destCur.index.data,
						    key.data, key.size);
						p->destCur.index.size =
						    key.size;
						p->destCur.key.data =
						    p->destCur.index.data;
						p->destCur.key.size =
						    p->destCur.index.size;
						splitIndexKey(&p->destCur);
						DB_MULTIPLE_KEY_WRITE_NEXT(
						    in, &dataIn,
						    p->destCur.key.data,
						    p->destCur.key.size,
						    p->destCur.data.data,
						    p->destCur.data.size);
					}
				} else
					DB_MULTIPLE_KEY_WRITE_NEXT(in, &dataIn,
					    key.data, key.size,
					    data.data, data.size);
				DB_MULTIPLE_KEY_NEXT(out, &dataOut,
				    key.data, key.size,
				    data.data, data.size);
			}
			/* Insert into the destination table. */
			dbp = p->destCur.cached_db->dbp;
			if ((ret = dbp->put(dbp, p->pDest->savepoint_txn,
			    &dataIn, 0, DB_MULTIPLE_KEY)) != 0)
				goto err;
			dbp = NULL;
			copied += MULTI_BUFSIZE/SQLITE_DEFAULT_PAGE_SIZE;
		}
		/*
		 * Done copying the current table, time to look for a new
		 * table to copy.
		 */
		if (ret == DB_NOTFOUND) {
			ret = 0;
			rc = sqlite3BtreeCloseCursor(&p->destCur);
			if (p->srcCur) {
				app = p->srcCur->dbp->app_private;
				dbp = p->srcCur->dbp;
				p->srcCur->close(p->srcCur);
				ret = dbp->close(dbp, DB_NOSYNC);
				if (app)
					sqlite3DbFree(p->pSrcDb, app);
			}
			p->srcCur = NULL;
			if (ret != 0 || rc != SQLITE_OK)
				goto err;
			p->currentTable += 1;
		}
	}
	goto done;
err:	if (ret == SQLITE_DONE)
		return ret;
done:	return MAP_ERR(rc, ret);
}
Beispiel #3
0
DbMultipleBuilder::DbMultipleBuilder(Dbt &dbt) : dbt_(dbt)
{
	DB_MULTIPLE_WRITE_INIT(p_, dbt_.get_DBT());
}
Beispiel #4
0
/* 批量插入示例函数。*/
void *
run_bulk_insert()
{
    int raw_key[NUM_KEY_INT];
    char raw_data[DATA_SIZE];
    int bulk_size = 100;
    DBT key, data;
    int *insert_load;
    int insert_count, i, ret, op_flag;
    insert_count = 10000;

    char *key_buf, *data_buf;
    void *p;
    int j;

    /* Initialize structs and arrays */
    memset(raw_key, '1', KEY_SIZE);
    memset(raw_data, '1', DATA_SIZE);
    memset(&key, 0, sizeof(DBT));
    memset(&data, 0, sizeof(DBT));
    tid = NULL;

    /* Initialize bulk insertion buffers */
    key_buf = (char*) malloc(KEY_SIZE * bulk_size * 2);
    data_buf = (char*) malloc(DATA_SIZE * bulk_size * 2);
    memset(key_buf, 0, KEY_SIZE * bulk_size * 2);
    memset(data_buf, 0, DATA_SIZE * bulk_size * 2);

    /*
     * 初始化Bulk buffer.使用批量操作(bulk operations) 也就是
     * 批量插入/删除/更新/读取的时候,必须使用用户提供的内存。
     * 所以需要设置DBT对象的flags为DB_DBT_USERMEM,并且设置ulen成员而不是size成员。
     */
    key.data = key_buf;
    key.ulen = KEY_SIZE * bulk_size * 2;
    key.flags = DB_DBT_USERMEM;
    data.data = data_buf;
    data.ulen = DATA_SIZE * bulk_size * 2;
    data.flags = DB_DBT_USERMEM;

    op_flag = DB_MULTIPLE;/* 这个flag给put/get/del 表示执行批量插入/更新/读取/删除。 */

    /*
     * 填充一个bulk buffer DBT 对象. 先调用DB_MULTIPLE_WRITE_INIT初始化该
     * DBT。必须传入一个工作指针p和data buffer DBT 对象。
     */
    DB_MULTIPLE_WRITE_INIT(p, &data);
    for (i = 0; i < bulk_size; i++) {
        /*
         * 调用DB_MULTIPLE_WRITE_NEXT这个宏来向bulk buffer当中插入数据。
         * 需要确保bulk buffer足够大,否则会出现内存访问越界错误。
         *
         * 各参数说明:
         * p: 是这个宏内部使用的工作变量,由DB_MULTIPLE_WRITE_INIT初始化,并且必须在此处一直使用。
         * data: 是data buffer DBT对象。
         * raw_data: 是一个数据项所在的内存地址。你需要把你要装入的数据项传入这个参数。每个数据项
         * 可以含有任意长度的字节,长度限制是一个DBT的总长度限制,也就是2的32次方。
         * DATA_SIZE: 是本次宏调用的数据项长度。本例当中所有数据项长度相同,只是特例。完全可以
         * 使用变长的数据项。
         *
         * 循环结束后填充完成,这个data buffer当中有bulk_size个data,
         */
        raw_data[20] = i;
        DB_MULTIPLE_WRITE_NEXT(p, &data, raw_data, DATA_SIZE);
    }

    /*
     * 批量插入insert_count条key/data pairs, 每一批插入bulk_size条key/data pairs.
     * 本例当中我们只准备了一批数据,所以最终插入的数据是重复的,不过这不影响示例本身。
     */
    for (i = 0; i < insert_count / bulk_size; ) {
        /*
         * 填充key buffer。填好后,这个key buffer当中有bulk_size个key,
         * 并且第i个key与data buffer 当中的第i个data做为一对key/data pair
         * 被插入数据库当中(i = 0, 1, 2, ... bulk_size).
         */
        DB_MULTIPLE_WRITE_INIT(p, &key);
        for (j = i * bulk_size; j < (i + 1) * bulk_size; j++) {
            raw_key[0] = j;
            /* 在循环当中使用DB_MULTIPLE_WRITE_NEXT依次插入每条data到data buffer当中。
             * 循环结束后填充完成。*/
            DB_MULTIPLE_WRITE_NEXT(p, &key, raw_key, KEY_SIZE);
        }

        /* 启动事务准备批量插入。 */
        if ((ret = envp->txn_begin(envp, NULL, &tid, 0)) != 0) {
            envp->err(envp, ret, "[insert] DB_ENV->txn_begin");
            exit(EXIT_FAILURE);
        }

        /*
         * 执行批量插入。key和data DBT 对象分别是key buffer和data buffer,
         * 其中必然含有相同书目的key和data items,key buffer当中的第i个
         * key item与data buffer当中的第i个data item 作为一个Key/data pair
         * 被插入数据库中。(i = 0, 1, 2, ... bulk_size).
         */
        switch(ret = dbp->put(dbp, tid, &key, &data, op_flag)) {
        case 0: /* 批量插入操作成功,提交事务。*/
            if ((ret = tid->commit(tid, 0)) != 0) {
                envp->err(envp, ret, "[insert] DB_TXN->commit");
                exit(EXIT_FAILURE);
            }
            break;
        case DB_LOCK_DEADLOCK:
            /* 如果数据库操作发生死锁,那么必须abort事务。然后,可以选择重新执行该操作。*/
            if ((ret = tid->abort(tid)) != 0) {
                envp->err(envp, ret, "[insert] DB_TXN->abort");
                exit(EXIT_FAILURE);
            }
            continue;
        default:
            envp->err(envp, ret, "[insert] DB->put ([%d]%d)", i, insert_load[i]);
            exit(EXIT_FAILURE);
        }

        i++;
    }

    (void)free(key_buf);
    (void)free(data_buf);

    return (NULL);
}
Beispiel #5
0
/* 批量插入示例函数。*/
void *
run_bulk_delete()
{
    int raw_key[NUM_KEY_INT];
    DBT key;
    DB_ENV *envp;
    int bulk_size = 100;
    DB *dbp;
    DB_TXN *tid;
    int *delete_load;
    int delete_count, i, ret, op_flag;

    char *key_buf;
    void *p;
    int j;

    /* Initialize structs and arrays */
    memset(raw_key, 0, KEY_SIZE);
    memset(&key, 0, sizeof(DBT));
    tid = NULL;

    /*
     * 初始化批量删除使用的key buffer。由于批量删除不需要data,
     * 所以只需要初始化和填充key buffer。我们同样需要使用自己分配的内存。
     */
    key_buf = (char*) malloc(KEY_SIZE * bulk_size * 2);
    memset(key_buf, 0, KEY_SIZE * bulk_size * 2);

    /* 初始化key buffer DBT 对象,设置正确的flags和ulen成员。 */
    key.data = key_buf;
    key.ulen = KEY_SIZE * bulk_size * 2;
    key.flags = DB_DBT_USERMEM;
    op_flag = DB_MULTIPLE; /* 批量删除同样需要这个flag。*/

    /*
     * 批量删除所有的数据。每一批删除由key buffer DBT 当中的key
     * 指定的bulk_size条key/data pair. 这两个宏的详细用法见上文。
     */
    for (i = 0; i < delete_count / bulk_size; ) {
        /* 为批量删除初始化并填充一个key buffer DBT 对象。 */
        DB_MULTIPLE_WRITE_INIT(p, &key);
        for (j = i * bulk_size; j < (i + 1) * bulk_size; j++) {
            raw_key[0] = delete_load[j];
            DB_MULTIPLE_WRITE_NEXT(p, &key, raw_key, KEY_SIZE);
        }
        /* 启动事务。*/
        if ((ret = envp->txn_begin(envp, NULL, &tid, 0)) != 0) {
            envp->err(envp, ret, "[delete] DB_ENV->txn_begin");
            exit(EXIT_FAILURE);
        }

        /*
         * 执行批量删除。key buffer DBT
         * 当中的bulk_size条key指定的key/data pairs会被从数据库当中删除。
         */
        switch(ret = dbp->del(dbp, tid, &key, op_flag)) {
        case 0: /* 批量删除操作成功,提交事务。*/
            if ((ret = tid->commit(tid, 0)) != 0) {
                envp->err(envp, ret, "[delete] DB_TXN->commit");
                exit(EXIT_FAILURE);
            }
            break;
        case DB_LOCK_DEADLOCK:
            /* 如果数据库操作发生死锁,那么必须abort事务。然后,可以选择重新执行该操作。*/
            if ((ret = tid->abort(tid)) != 0) {
                envp->err(envp, ret, "[delete] DB_TXN->abort");
                exit(EXIT_FAILURE);
            }
            continue;
        default:
            envp->err(envp, ret, "[delete] DB->del ([%d]%d)", i, delete_load[i]);
            exit(EXIT_FAILURE);
        }
        i++;
    }

    (void)free(key_buf);

    return (NULL);
}