summaryrefslogtreecommitdiff
path: root/lang/sql/adapter
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2015-02-17 17:25:57 +0000
committer <>2015-03-17 16:26:24 +0000
commit780b92ada9afcf1d58085a83a0b9e6bc982203d1 (patch)
tree598f8b9fa431b228d29897e798de4ac0c1d3d970 /lang/sql/adapter
parent7a2660ba9cc2dc03a69ddfcfd95369395cc87444 (diff)
downloadberkeleydb-master.tar.gz
Imported from /home/lorry/working-area/delta_berkeleydb/db-6.1.23.tar.gz.HEADdb-6.1.23master
Diffstat (limited to 'lang/sql/adapter')
-rw-r--r--lang/sql/adapter/backup.c59
-rw-r--r--lang/sql/adapter/backup.h2
-rw-r--r--lang/sql/adapter/btmutex.c2
-rw-r--r--lang/sql/adapter/btree.c1406
-rw-r--r--lang/sql/adapter/btreeInt.h31
-rw-r--r--lang/sql/adapter/db_encrypt.c30
-rw-r--r--lang/sql/adapter/db_pragma.c386
-rw-r--r--lang/sql/adapter/db_sequence.c103
-rw-r--r--lang/sql/adapter/db_shell.c2
-rw-r--r--lang/sql/adapter/pager.c56
-rw-r--r--lang/sql/adapter/pager.h27
-rw-r--r--lang/sql/adapter/pcache.c2
-rw-r--r--lang/sql/adapter/pcache.h6
-rw-r--r--lang/sql/adapter/pcache1.c10
-rw-r--r--lang/sql/adapter/sqlite-patches/01_sqlite_excl_test.patch87
-rw-r--r--lang/sql/adapter/sqlite-patches/02_sqlite_test.patch332
-rw-r--r--lang/sql/adapter/sqlite-patches/03_editline.patch126
-rw-r--r--lang/sql/adapter/sqlite-patches/04_build_config.patch8
-rw-r--r--lang/sql/adapter/sqlite-patches/05_shell_config.patch4
-rw-r--r--lang/sql/adapter/sqlite-patches/07_shell_prompt.patch10
-rw-r--r--lang/sql/adapter/sqlite-patches/08_errno_header.patch16
-rw-r--r--lang/sql/adapter/sqlite-patches/09_comment_tests.patch70
-rw-r--r--lang/sql/adapter/sqlite-patches/10_compile_options.patch2
-rw-r--r--lang/sql/adapter/sqlite-patches/11_android_shell.patch6
-rw-r--r--lang/sql/adapter/sqlite-patches/12_e_fts3_test.patch2
-rw-r--r--lang/sql/adapter/sqlite-patches/13_malloc_test.patch12
-rw-r--r--lang/sql/adapter/sqlite-patches/14_custom_pragma.patch41
-rw-r--r--lang/sql/adapter/sqlite-patches/15_bdb_stat.patch18
-rw-r--r--lang/sql/adapter/sqlite-patches/16_bdb_deadlock.patch28
-rw-r--r--lang/sql/adapter/sqlite-patches/17_encryption.patch26
-rw-r--r--lang/sql/adapter/sqlite-patches/18_vacuum_test.patch6
-rw-r--r--lang/sql/adapter/sqlite-patches/19_backup_test.patch147
-rw-r--r--lang/sql/adapter/sqlite-patches/22_unique_key.patch79
-rw-r--r--lang/sql/adapter/sqlite-patches/23_sequence_functions.patch76
-rw-r--r--lang/sql/adapter/sqlite-patches/24_exclusive_error_handling.patch52
-rw-r--r--lang/sql/adapter/sqlite-patches/25_tester.patch269
-rw-r--r--lang/sql/adapter/sqlite-patches/26_solaris_build.patch4
-rw-r--r--lang/sql/adapter/sqlite-patches/27_sqlthread.patch97
-rw-r--r--lang/sql/adapter/sqlite-patches/28_wal_pragma.patch21
-rw-r--r--lang/sql/adapter/sqlite-patches/29_manydb_test.patch2
-rw-r--r--lang/sql/adapter/sqlite-patches/30_handle_cache.patch35
-rw-r--r--lang/sql/adapter/sqlite-patches/31_eqp_test.patch18
-rw-r--r--lang/sql/adapter/sqlite-patches/32_permutations_test.patch63
-rw-r--r--lang/sql/adapter/sqlite-patches/33_vdbe_assert.patch27
-rw-r--r--lang/sql/adapter/vacuum.c4
-rw-r--r--lang/sql/adapter/wal.c2
-rw-r--r--lang/sql/adapter/wal.h2
47 files changed, 2015 insertions, 1799 deletions
diff --git a/lang/sql/adapter/backup.c b/lang/sql/adapter/backup.c
index 0ebe42b7..29b9e203 100644
--- a/lang/sql/adapter/backup.c
+++ b/lang/sql/adapter/backup.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
-* Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
** This file contains the implementation of the sqlite3_backup_XXX()
@@ -323,7 +323,7 @@ int btreeDeleteEnvironment(Btree *p, const char *home, int rename)
int rc, ret, iDb, storage;
sqlite3 *db;
DB_ENV *tmp_env;
- char path[512];
+ char path[BT_MAX_PATH];
#ifdef BDBSQL_FILE_PER_TABLE
int numFiles;
char **files;
@@ -355,6 +355,7 @@ int btreeDeleteEnvironment(Btree *p, const char *home, int rename)
if (home == NULL)
goto done;
+ sqlite3_snprintf(sizeof(path), path, "%s-journal", home);
ret = btreeCleanupEnv(path);
/* EFAULT can be returned on Windows when the file does not exist.*/
if (ret == ENOENT || ret == EFAULT)
@@ -471,7 +472,7 @@ static int backupCleanup(sqlite3_backup *p)
if (p->rc == SQLITE_DONE)
rc2 = sqlite3BtreeCommit(p->pDest);
else
- rc2 = sqlite3BtreeRollback(p->pDest);
+ rc2 = sqlite3BtreeRollback(p->pDest, SQLITE_OK);
if (rc2 != SQLITE_OK)
rc = rc2;
}
@@ -493,36 +494,30 @@ static int backupCleanup(sqlite3_backup *p)
if (!__os_exists(NULL, path, 0))
__os_rename(NULL, path, p->fullName, 0);
}
+ if (rc2 != SQLITE_BUSY) {
+ p->pDest = p->pDestDb->aDb[p->iDb].pBt = NULL;
+ p->pDestDb->aDb[p->iDb].pSchema = NULL;
+ }
if (rc == SQLITE_OK)
rc = rc2;
if (rc == SQLITE_OK) {
p->pDest = NULL;
p->pDestDb->aDb[p->iDb].pBt = NULL;
p->openDest = 0;
- rc = sqlite3BtreeOpen(p->fullName, p->pDestDb,
+ rc = sqlite3BtreeOpen(NULL, p->fullName, p->pDestDb,
&p->pDest,
SQLITE_DEFAULT_CACHE_SIZE | SQLITE_OPEN_MAIN_DB,
p->pDestDb->openFlags);
p->pDestDb->aDb[p->iDb].pBt = p->pDest;
- if (rc == SQLITE_OK) {
- p->pDestDb->aDb[p->iDb].pSchema =
+ if (p->pDest) {
+ p->pDestDb->aDb[p->iDb].pSchema =
sqlite3SchemaGet(p->pDestDb, p->pDest);
- if (!p->pDestDb->aDb[p->iDb].pSchema)
- p->rc = SQLITE_NOMEM;
- } else
- p->pDestDb->aDb[p->iDb].pSchema = NULL;
+ if (p->pDestDb->aDb[p->iDb].pSchema == NULL)
+ rc = SQLITE_NOMEM;
+ }
if (rc == SQLITE_OK)
p->pDest->pBt->db_oflags |= DB_CREATE;
- /*
- * Have to delete the schema here on error to avoid
- * assert failure.
- */
- if (p->pDest == NULL &&
- p->pDestDb->aDb[p->iDb].pSchema != NULL) {
- sqlite3SchemaClear(
- p->pDestDb->aDb[p->iDb].pSchema);
- p->pDestDb->aDb[p->iDb].pSchema = NULL;
- }
+
#ifdef SQLITE_HAS_CODEC
if (rc == SQLITE_OK) {
if (p->iDb == 0)
@@ -612,13 +607,19 @@ int sqlite3_backup_step(sqlite3_backup *p, int nPage) {
storage = p->pDest->pBt->dbStorage;
if (storage == DB_STORE_NAMED)
p->openDest = 1;
+ if (strcmp(p->destName, "temp") == 0)
+ p->pDest->schema = NULL;
+ else
+ p->pDestDb->aDb[p->iDb].pSchema = NULL;
+
p->rc = btreeDeleteEnvironment(p->pDest, p->fullName, 1);
if (storage == DB_STORE_INMEM && strcmp(p->destName, "temp")
!= 0)
home = inmem;
else
home = p->fullName;
- p->pDest = p->pDestDb->aDb[p->iDb].pBt;
+ if (p->rc != SQLITE_BUSY)
+ p->pDest = p->pDestDb->aDb[p->iDb].pBt = NULL;
if (p->rc != SQLITE_OK)
goto err;
/*
@@ -632,18 +633,20 @@ int sqlite3_backup_step(sqlite3_backup *p, int nPage) {
parse.db = p->pDestDb;
p->rc = sqlite3OpenTempDatabase(&parse);
p->pDest = p->pDestDb->aDb[p->iDb].pBt;
+ if (p->pDest && p->iDb != 1)
+ p->pDest->schema =
+ p->pDestDb->aDb[p->iDb].pSchema;
} else {
- p->rc = sqlite3BtreeOpen(home, p->pDestDb,
+ p->rc = sqlite3BtreeOpen(NULL, home, p->pDestDb,
&p->pDest, SQLITE_DEFAULT_CACHE_SIZE |
SQLITE_OPEN_MAIN_DB, p->pDestDb->openFlags);
p->pDestDb->aDb[p->iDb].pBt = p->pDest;
- if (p->rc == SQLITE_OK) {
- p->pDestDb->aDb[p->iDb].pSchema =
+ if (p->pDest) {
+ p->pDestDb->aDb[p->iDb].pSchema =
sqlite3SchemaGet(p->pDestDb, p->pDest);
- if (!p->pDestDb->aDb[p->iDb].pSchema)
+ if (p->pDestDb->aDb[p->iDb].pSchema == NULL)
p->rc = SQLITE_NOMEM;
- } else
- p->pDestDb->aDb[p->iDb].pSchema = NULL;
+ }
}
if (p->pDest)
@@ -726,7 +729,7 @@ int sqlite3_backup_step(sqlite3_backup *p, int nPage) {
p->rc = btreeCopyPages(p, &pages);
if (p->rc == SQLITE_DONE) {
p->nRemaining = 0;
- sqlite3ResetInternalSchema(p->pDestDb, p->iDb);
+ sqlite3ResetOneSchema(p->pDestDb, p->iDb);
memset(&parse, 0, sizeof(parse));
parse.db = p->pDestDb;
p->rc = sqlite3ReadSchema(&parse);
diff --git a/lang/sql/adapter/backup.h b/lang/sql/adapter/backup.h
index b5d0db5e..c1a96732 100644
--- a/lang/sql/adapter/backup.h
+++ b/lang/sql/adapter/backup.h
@@ -1,6 +1,6 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
typedef struct sqlite3_backup sqlite3_backup;
diff --git a/lang/sql/adapter/btmutex.c b/lang/sql/adapter/btmutex.c
index ad8d5d17..9e55e373 100644
--- a/lang/sql/adapter/btmutex.c
+++ b/lang/sql/adapter/btmutex.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/lang/sql/adapter/btree.c b/lang/sql/adapter/btree.c
index 6878f005..6ee85237 100644
--- a/lang/sql/adapter/btree.c
+++ b/lang/sql/adapter/btree.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -89,10 +89,8 @@ typedef struct {
#define ROWIDMAXSIZE 10
/* Forward declarations for internal functions. */
-static int btreeCleanupCachedHandles(Btree *p, cleanup_mode_t cleanup);
static int btreeCloseCursor(BtCursor *pCur, int removeList);
-static int btreeCompressInt(u_int8_t *buf, u_int64_t i);
-static int btreeConfigureDbHandle(Btree *p, int iTable, DB **dbpp);
+static int btreeConfigureDbHandle(Btree *p, int iTable, DB **dbpp, int);
static int btreeCreateDataTable(Btree *, int, CACHED_DB **);
static int btreeCreateSharedBtree(
Btree *, const char *, u_int8_t *, sqlite3 *, int, storage_mode_t);
@@ -102,11 +100,12 @@ static void btreeHandleDbError(
static int btreeDbHandleIsLocked(CACHED_DB *cached_db);
static int btreeDbHandleLock(Btree *p, CACHED_DB *cached_db);
static int btreeDbHandleUnlock(Btree *p, CACHED_DB *cached_db);
-static int btreeDecompressInt(const u_int8_t *buf, u_int64_t *i);
static void btreeFreeSharedBtree(BtShared *p, int clear_cache);
static int btreeGetSharedBtree(
BtShared **, u_int8_t *, sqlite3 *, storage_mode_t, int);
-static int btreeInvalidateHandleCache(Btree *p);
+static int btreeHandleCacheCleanup(Btree *p, cleanup_mode_t cleanup);
+static int btreeHandleCacheClear(Btree *p);
+static int btreeHandleCacheLockUpdate(Btree *p, cleanup_mode_t cleanup);
static int btreeLoadBufferIntoTable(BtCursor *pCur);
static int btreeMoveto(BtCursor *pCur,
const void *pKey, i64 nKey, int bias, int *pRes);
@@ -194,10 +193,11 @@ u_int32_t g_uid_next = 0;
#define GET_BTREE_ISOLATION(p) 0
#else
#define GET_BTREE_ISOLATION(p) (!p->pBt->transactional ? 0 : \
- ((p->db->flags & SQLITE_ReadUncommitted) ? \
+ ((p->pBt->blobs_enabled) ? DB_READ_COMMITTED : \
+ (((p->db->flags & SQLITE_ReadUncommitted) ? \
DB_READ_UNCOMMITTED : DB_READ_COMMITTED) | \
((p->pBt->read_txn_flags & DB_TXN_SNAPSHOT) ? \
- DB_TXN_SNAPSHOT : 0))
+ DB_TXN_SNAPSHOT : 0))))
#endif
/* The transaction for incrblobs is held in the cursor, so when deadlock
@@ -419,9 +419,9 @@ void log_msg(loglevel_t level, const char *fmt, ...)
if (level >= CURRENT_LOG_LEVEL) {
va_list ap;
va_start(ap, fmt);
- vfprintf(stdout, fmt, ap);
- fputc('\n', stdout);
- fflush(stdout);
+ vfprintf(stderr, fmt, ap);
+ fputc('\n', stderr);
+ fflush(stderr);
va_end(ap);
}
}
@@ -470,12 +470,93 @@ static char *btreeStrdup(const char *sq)
}
#endif
-static int btreeCompareIntKey(DB *dbp, const DBT *dbt1, const DBT *dbt2)
+
+/*
+ * Handles various events in BDB.
+ */
+static void btreeEventNotification(
+ DB_ENV *dbenv,
+ u_int32_t event,
+ void *event_info
+) {
+
+ BtShared *pBt;
+ DB_SITE *site;
+ char *address, addr_buf[BT_MAX_PATH], *old_addr;
+ const char *host;
+ int *eid;
+ u_int port;
+
+ pBt = (BtShared *)dbenv->app_private;
+ eid = NULL;
+ old_addr = NULL;
+
+ if (!pBt)
+ return;
+
+ switch (event) {
+ case DB_EVENT_REP_CLIENT:
+ sqlite3_mutex_enter(pBt->mutex);
+ pBt->repRole = BDBSQL_REP_CLIENT;
+ sqlite3_mutex_leave(pBt->mutex);
+ break;
+ case DB_EVENT_REP_INIT_DONE:
+ break;
+ case DB_EVENT_REP_JOIN_FAILURE:
+ break;
+ case DB_EVENT_REP_MASTER:
+ pDbEnv->repmgr_local_site(pDbEnv, &site);
+ site->get_address(site, &host, &port);
+ sqlite3_snprintf(sizeof(addr_buf), addr_buf,
+ "%s:%i", host, port);
+ if ((address = sqlite3_strdup(addr_buf)) == NULL)
+ return;
+ sqlite3_mutex_enter(pBt->mutex);
+ if (pBt->master_address)
+ old_addr = pBt->master_address;
+ pBt->repRole = BDBSQL_REP_MASTER;
+ pBt->master_address = address;
+ sqlite3_mutex_leave(pBt->mutex);
+ if (old_addr)
+ sqlite3_free(old_addr);
+ break;
+ case DB_EVENT_REP_NEWMASTER:
+ eid = (int *)event_info;
+ pDbEnv->repmgr_site_by_eid(pDbEnv, *eid, &site);
+ site->get_address(site, &host, &port);
+ sqlite3_snprintf(sizeof(addr_buf), addr_buf,
+ "%s:%i", host, port);
+ if ((address = sqlite3_strdup(addr_buf)) == NULL)
+ return;
+ sqlite3_mutex_enter(pBt->mutex);
+ if (pBt->master_address)
+ old_addr = pBt->master_address;
+ pBt->master_address = address;
+ sqlite3_mutex_leave(pBt->mutex);
+ if (old_addr)
+ sqlite3_free(old_addr);
+ break;
+ case DB_EVENT_REP_PERM_FAILED:
+ sqlite3_mutex_enter(pBt->mutex);
+ pBt->permFailures++;
+ sqlite3_mutex_leave(pBt->mutex);
+ break;
+ case DB_EVENT_REP_STARTUPDONE:
+ break;
+ default:
+ break;
+ }
+}
+
+static int btreeCompareIntKey(DB *dbp,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp)
{
i64 v1,v2;
assert(dbt1->size == sizeof(i64));
assert(dbt2->size == sizeof(i64));
+ locp = NULL;
+
memcpy(&v1, dbt1->data, sizeof(i64));
memcpy(&v2, dbt2->data, sizeof(i64));
if (v1 < v2)
@@ -483,6 +564,36 @@ static int btreeCompareIntKey(DB *dbp, const DBT *dbt1, const DBT *dbt2)
return v1 > v2;
}
+static int btreeCompareDup(DB *dbp,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp)
+{
+ Mem mem1, mem2;
+ const unsigned char *data1, *data2;
+ u32 serial_type1, serial_type2;
+
+ /* This path can happen when loading an in memory index onto disk. */
+ if (dbt1->size == 0) {
+ assert(dbt2->size == 0);
+ return 0;
+ }
+
+ memset(&mem1, 0, sizeof(Mem));
+ memset(&mem2, 0, sizeof(Mem));
+
+ data1 = (const unsigned char *) dbt1->data;
+ data2 = (const unsigned char *) dbt2->data;
+
+ /* Get the value types. */
+ getVarint32(data1, serial_type1);
+ getVarint32(data2, serial_type2);
+
+ /* Set the values in the Mems. */
+ sqlite3VdbeSerialGet(&data1[1], serial_type1, &mem1);
+ sqlite3VdbeSerialGet(&data2[1], serial_type2, &mem2);
+
+ return sqlite3MemCompare(&mem1, &mem2, NULL);
+}
+
#ifdef BDBSQL_CONVERT_SQLITE
static int btreeConvertSqlite(BtShared *pBt, DB_ENV *tmp_env)
{
@@ -558,6 +669,7 @@ int btreeOpenMetaTables(Btree *p, int *pCreating)
DB_ENV *tmp_env;
char *fileName;
int i, idx, rc, ret, t_ret;
+ u_int32_t blob_threshold, oflags;
u32 val;
#ifdef BDBSQL_FILE_PER_TABLE
char **dirnames;
@@ -628,6 +740,10 @@ int btreeOpenMetaTables(Btree *p, int *pCreating)
* returning the error.
*/
i = 0;
+ /* DB_READ_UNCOMMITTED is incompatible with blobs. */
+ if (pBt->blobs_enabled)
+ pBt->db_oflags &= ~DB_READ_UNCOMMITTED;
+ oflags = pBt->db_oflags;
do {
if ((ret = db_create(&pMetaDb, pDbEnv, 0)) != 0)
goto err;
@@ -657,6 +773,20 @@ int btreeOpenMetaTables(Btree *p, int *pCreating)
pBt->pageSizeFixed = 1;
+ /*
+ * Set blob threshold. The blob threshold has to be set
+ * when the database file is created, or else no other
+ * database in the file can support blobs. That is why
+ * it is set on the metadata table, even though it will
+ * never have any items large enough to become a blob.
+ */
+ if (pBt->dbStorage == DB_STORE_NAMED
+ && pBt->blob_threshold > 0) {
+ if ((ret = pMetaDb->set_blob_threshold(
+ pMetaDb, UINT32_MAX, 0)) != 0)
+ goto err;
+ }
+
#ifdef BDBSQL_FILE_PER_TABLE
fileName = BDBSQL_META_DATA_TABLE;
#else
@@ -664,14 +794,29 @@ int btreeOpenMetaTables(Btree *p, int *pCreating)
#endif
ret = pMetaDb->open(pMetaDb, NULL, fileName,
pBt->dbStorage == DB_STORE_NAMED ? "metadb" : NULL,
- DB_BTREE,
- pBt->db_oflags | GET_AUTO_COMMIT(pBt, NULL) |
+ DB_BTREE, oflags | GET_AUTO_COMMIT(pBt, NULL) |
GET_ENV_READONLY(pBt), 0);
if (ret == DB_LOCK_DEADLOCK || ret == DB_LOCK_NOTGRANTED) {
(void)pMetaDb->close(pMetaDb, DB_NOSYNC);
pMetaDb = NULL;
}
+ /*
+ * It is possible that the meta database already exists, and
+ * that it supports blobs even though the current blob
+ * threshold is 0. If that is the case, opening it
+ * with DB_READ_UNCOMMITTED will return EINVAL. If EINVAL
+ * is returned, strip out DB_READ_UNCOMMITTED and try again.
+ */
+ if (ret == EINVAL &&
+ (pBt->db_oflags & DB_READ_UNCOMMITTED)
+ && pBt->blob_threshold == 0) {
+ oflags &= ~DB_READ_UNCOMMITTED;
+ pBt->db_oflags = oflags;
+ (void)pMetaDb->close(pMetaDb, DB_NOSYNC);
+ pMetaDb = NULL;
+ continue;
+ }
} while ((ret == DB_LOCK_DEADLOCK || ret == DB_LOCK_NOTGRANTED) &&
++i < BUSY_RETRY_COUNT);
@@ -682,6 +827,22 @@ int btreeOpenMetaTables(Btree *p, int *pCreating)
rc = SQLITE_NOTADB;
goto err;
}
+ /* Cache the page size for later queries. */
+ pMetaDb->get_pagesize(pMetaDb, &pBt->pageSize);
+
+ /*
+ * Check if this database supports blobs. If the meta database
+ * has a blob threshold of 0, then no other database in the file
+ * can support blobs.
+ */
+ if ((ret =
+ pMetaDb->get_blob_threshold(pMetaDb, &blob_threshold)) != 0)
+ goto err;
+ if (blob_threshold > 0) {
+ pBt->blobs_enabled = 1;
+ pBt->db_oflags &= ~DB_READ_UNCOMMITTED;
+ } else
+ pBt->blobs_enabled = 0;
/* Set the default max_page_count */
sqlite3BtreeMaxPageCount(p, pBt->pageCount);
@@ -753,6 +914,11 @@ addmeta:/*
for (idx = 0; idx < NUMMETA; idx++) {
if (pBt->meta[idx].cached)
val = pBt->meta[idx].value;
+ /*
+ * TODO: Are the following two special cases still required?
+ * I think they have been replaced by a sqlite3BtreeUpdateMeta
+ * call in sqlite3BtreeSetAutoVacuum.
+ */
else if (idx == BTREE_LARGEST_ROOT_PAGE && *pCreating)
val = pBt->autoVacuum;
else if (idx == BTREE_INCR_VACUUM && *pCreating)
@@ -896,6 +1062,8 @@ static void btreeFreeSharedBtree(BtShared *p, int clear_cache)
sqlite3_free(p->err_file);
if (p->err_msg != NULL)
sqlite3_free(p->err_msg);
+ if (p->master_address != NULL)
+ sqlite3_free(p->master_address);
sqlite3_free(p);
}
@@ -904,6 +1072,7 @@ static int btreeCheckEnvPrepare(Btree *p)
{
BtShared *pBt;
int f_exists, f_isdir, rc;
+ char fullPathBuf[BT_MAX_PATH + 2];
#ifndef BDBSQL_FILE_PER_TABLE
int attrs;
sqlite3_file *fp;
@@ -912,6 +1081,7 @@ static int btreeCheckEnvPrepare(Btree *p)
pBt = p->pBt;
rc = SQLITE_OK;
f_exists = f_isdir = 0;
+ memset(fullPathBuf, 0, sizeof(fullPathBuf));
assert(pBt->dbStorage == DB_STORE_NAMED);
assert(pBt->dir_name != NULL);
@@ -943,7 +1113,9 @@ static int btreeCheckEnvPrepare(Btree *p)
goto err;
}
memset(fp, 0, p->db->pVfs->szOsFile);
- rc = sqlite3OsOpen(p->db->pVfs, pBt->full_name, fp,
+ sqlite3_snprintf(sizeof(fullPathBuf), fullPathBuf,
+ "%s\0\0", pBt->full_name);
+ rc = sqlite3OsOpen(p->db->pVfs, fullPathBuf, fp,
SQLITE_OPEN_MAIN_DB | SQLITE_OPEN_READWRITE,
&attrs);
if (attrs & SQLITE_OPEN_READONLY)
@@ -952,13 +1124,14 @@ static int btreeCheckEnvPrepare(Btree *p)
(void)sqlite3OsClose(fp);
sqlite3_free(fp);
#endif
- /*
- * Always open existing tables, even if the matching
- * env does not exist (yet).
- */
- pBt->env_oflags |= DB_CREATE;
- pBt->need_open = 1;
}
+ /*
+ * Always open an environment, even if it doesn't exist yet. Versions
+ * of DB SQL prior to 5.3 delayed this decision and relied on the
+ * SQLite virtual machine to trigger a create.
+ */
+ pBt->env_oflags |= DB_CREATE;
+ pBt->need_open = 1;
err: return rc;
}
@@ -1011,16 +1184,13 @@ static int btreeCheckEnvOpen(Btree *p, int createdDir, u8 replicate)
#endif
if (pBt->single_process) {
pBt->env_oflags |= DB_PRIVATE | DB_CREATE;
- } else if (!replicate && !pBt->repForceRecover) {
+ } else if (pBt->repForceRecover == 1) {
/*
- * FAILCHK_ISALIVE doesn't currently work with
- * replication. Also, replication can't use DB_REGISTER
- * because it assumes actual recoveries between
- * sessions. Avoid adding these flags if we are running
- * with replication or if this is the first time we are
- * opening the env after turning off replication
- * (repForceRecover).
+ * Force recovery when turning on replication
+ * for the first time, or when turning it off.
*/
+ pBt->env_oflags |= DB_FAILCHK_ISALIVE;
+ } else {
pBt->env_oflags |= DB_FAILCHK_ISALIVE | DB_REGISTER;
}
}
@@ -1201,25 +1371,7 @@ err:
/* See if environment is currently configured as a replication client. */
static int btreeRepIsClient(Btree *p)
{
- DB_REP_STAT *rep_stat;
- BtShared *pBt;
- int is_client;
-
- pBt = p->pBt;
- is_client = 0;
-
- if (!pBt->repStarted)
- return (0);
-
- if (pDbEnv->rep_stat(pDbEnv, &rep_stat, 0) != 0) {
- sqlite3Error(p->db, SQLITE_ERROR,
- "Unable to determine if site is a replication client");
- return (0);
- }
- if (rep_stat->st_status == DB_REP_CLIENT)
- is_client = 1;
- sqlite3_free(rep_stat);
- return (is_client);
+ return (p->pBt->repRole == BDBSQL_REP_CLIENT);
}
/*
@@ -1284,8 +1436,8 @@ static int btreeRepStartupFinished(Btree *p)
* Wait indefinitely because this can take a very long time if a full
* internal initialization is needed.
*/
- if (!startupComplete && repStat->st_status == DB_REP_CLIENT &&
- repStat->st_master != DB_EID_INVALID)
+ if (!startupComplete && pBt->repRole == BDBSQL_REP_CLIENT &&
+ pBt->master_address != NULL)
do {
__os_yield(pDbEnv->env, 2, 0);
if (pDbEnv->rep_stat(pDbEnv, &repStat, 0) != 0) {
@@ -1341,6 +1493,7 @@ static int btreePrepareEnvironment(Btree *p)
pDbEnv->set_errpfx(pDbEnv, pBt->full_name);
pDbEnv->app_private = pBt;
pDbEnv->set_errcall(pDbEnv, btreeHandleDbError);
+ pDbEnv->set_event_notify(pDbEnv, btreeEventNotification);
#ifndef BDBSQL_SINGLE_THREAD
#ifndef BDBSQL_CONCURRENT_CONNECTIONS
pDbEnv->set_flags(pDbEnv, DB_DATABASE_LOCKING, 1);
@@ -1367,7 +1520,7 @@ static int btreePrepareEnvironment(Btree *p)
#endif
if ((ret = pDbEnv->set_lg_max(pDbEnv, pBt->logFileSize)) != 0)
goto err;
-#ifndef BDBSQL_OMIT_LOG_REMOVE
+ #ifndef BDBSQL_OMIT_LOG_REMOVE
if ((ret = pDbEnv->log_set_config(pDbEnv,
DB_LOG_AUTO_REMOVE, 1)) != 0)
goto err;
@@ -1380,7 +1533,7 @@ static int btreePrepareEnvironment(Btree *p)
/* Reuse envDirNameBuf. */
dirPathName = dirPathBuf;
memset(dirPathName, 0, BT_MAX_PATH);
- sqlite3_snprintf(sizeof(dirPathName), dirPathName,
+ sqlite3_snprintf(BT_MAX_PATH, dirPathName,
"../%s", pBt->short_name);
pDbEnv->set_data_dir(pDbEnv, dirPathName);
pDbEnv->set_create_dir(pDbEnv, dirPathName);
@@ -1409,6 +1562,7 @@ static int btreePrepareEnvironment(Btree *p)
pDbEnv->set_errpfx(pDbEnv, "<temp>");
pDbEnv->app_private = pBt;
pDbEnv->set_errcall(pDbEnv, btreeHandleDbError);
+ pDbEnv->set_event_notify(pDbEnv, btreeEventNotification);
pBt->env_oflags |= DB_CREATE | DB_INIT_TXN | DB_PRIVATE;
/*
@@ -1424,6 +1578,12 @@ static int btreePrepareEnvironment(Btree *p)
sqlite3_free);
#endif
pDbEnv->log_set_config(pDbEnv, DB_LOG_IN_MEMORY, 1);
+ /*
+ * SQLite allows users to open in memory databases read only.
+ * Play along by attempting a create.
+ */
+ if (!IS_BTREE_READONLY(p))
+ pBt->need_open = 1;
} else
rc = btreeOpenEnvironment(p, 0);
@@ -1542,7 +1702,7 @@ int btreeReopenEnvironment(Btree *p, int removingRep)
mutexOpen = sqlite3MutexAlloc(OPEN_MUTEX(pBt->dbStorage));
sqlite3_mutex_enter(mutexOpen);
/* Close open DB handles and clear related hash table */
- if ((rc = btreeCleanupCachedHandles(p, CLEANUP_CLOSE)) != SQLITE_OK)
+ if ((rc = btreeHandleCacheCleanup(p, CLEANUP_CLOSE)) != SQLITE_OK)
goto err;
sqlite3HashClear(&pBt->db_cache);
/* close tables and meta databases */
@@ -1665,6 +1825,7 @@ int btreeOpenEnvironment(Btree *p, int needLock)
goto err;
pDbEnv->set_mp_mmapsize(pDbEnv, 0);
pDbEnv->set_errcall(pDbEnv, btreeHandleDbError);
+ pDbEnv->set_event_notify(pDbEnv, btreeEventNotification);
if (pBt->dir_name != NULL) {
createdDir =
(__os_mkdir(NULL, pBt->dir_name, 0777) == 0);
@@ -1751,9 +1912,9 @@ int btreeOpenEnvironment(Btree *p, int needLock)
* shortly during replication client synchronization.
*/
if (replicate) {
- if ((ret = pDbEnv->repmgr_start(pDbEnv, 1,
- pBt->repStartMaster ?
- DB_REP_MASTER : DB_REP_ELECTION)) != 0) {
+ ret = pDbEnv->repmgr_start(pDbEnv, 1,
+ pBt->repStartMaster ? DB_REP_MASTER : DB_REP_ELECTION);
+ if (ret != 0 && ret != DB_REP_IGNORE) {
sqlite3Error(db, SQLITE_CANTOPEN, "Error in "
"replication call repmgr_start");
rc = SQLITE_CANTOPEN;
@@ -1761,7 +1922,7 @@ int btreeOpenEnvironment(Btree *p, int needLock)
}
pBt->repStarted = 1;
- if (!pBt->repStartMaster) {
+ if (!pBt->repStartMaster && ret != DB_REP_IGNORE) {
/*
* Allow time for replication client to hold an
* election and synchronize with the master.
@@ -1794,16 +1955,18 @@ int btreeOpenEnvironment(Btree *p, int needLock)
}
pBt->repStartMaster = 0;
- if (!IS_ENV_READONLY(pBt) && p->vfsFlags & SQLITE_OPEN_CREATE)
+ if ((!IS_ENV_READONLY(pBt) && p->vfsFlags & SQLITE_OPEN_CREATE) ||
+ pBt->dbStorage == DB_STORE_INMEM)
pBt->db_oflags |= DB_CREATE;
creating = 1;
if (pBt->dbStorage == DB_STORE_NAMED &&
(rc = btreeOpenMetaTables(p, &creating)) != SQLITE_OK)
goto err;
+
if (creating) {
/*
- * Update the fileid now that the file has been created.
+ * Update the fileid now that the file has been created.
* Ignore error returns - the fileid isn't critical.
*/
if (pBt->dbStorage == DB_STORE_NAMED) {
@@ -1854,6 +2017,7 @@ aftercreatemeta:
#ifdef BDBSQL_SHARE_PRIVATE
pBt->lockfile.in_env_open = 1;
#endif
+ /* Call the public begin transaction. */
if ((writeLock || txn_mode == TRANS_WRITE) &&
!btreeRepIsClient(p) &&
(rc = sqlite3BtreeBeginTrans(p,
@@ -1994,8 +2158,9 @@ static int btreeCreateSharedBtree(
if (store == DB_STORE_NAMED) {
/* Store full path of zfilename */
dirPathName = dirPathBuf;
- sqlite3OsFullPathname(
- db->pVfs, zFilename, sizeof(dirPathBuf), dirPathName);
+ if (sqlite3OsFullPathname(db->pVfs,
+ zFilename, sizeof(dirPathBuf), dirPathName) != SQLITE_OK)
+ goto err_nomem;
if ((new_bt->full_name = sqlite3_strdup(dirPathName)) == NULL)
goto err_nomem;
if ((new_bt->orig_name = sqlite3_strdup(zFilename)) == NULL)
@@ -2023,6 +2188,12 @@ static int btreeCreateSharedBtree(
new_bt->nRef = 1;
new_bt->uid = g_uid_next++;
new_bt->logFileSize = SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT;
+ new_bt->repRole = BDBSQL_REP_UNKNOWN;
+ /* Only on disk databases can support blobs. */
+ if (store != DB_STORE_NAMED)
+ new_bt->blob_threshold = 0;
+ else
+ new_bt->blob_threshold = BDBSQL_LARGE_RECORD_OPTIMIZATION;
#ifdef SQLITE_SECURE_DELETE
new_bt->secureDelete = 1;
#endif
@@ -2042,8 +2213,12 @@ err_nomem:
** zFilename is the name of the database file. If zFilename is NULL a new
** database with a random name is created. This randomly named database file
** will be deleted when sqlite3BtreeClose() is called.
+**
+** Note: SQLite uses the parameter VFS to lookup a database in the shared cache
+** we ignore that parameter.
*/
int sqlite3BtreeOpen(
+ sqlite3_vfs *pVfs, /* VFS to use for this b-tree */
const char *zFilename, /* Name of the file containing the database */
sqlite3 *db, /* Associated database connection */
Btree **ppBtree, /* Pointer to new Btree object written here */
@@ -2271,119 +2446,6 @@ static int btreeCloseAllCursors(Btree *p, DB_TXN *txn)
return rc;
}
-static int btreeCleanupCachedHandles(Btree *p, cleanup_mode_t cleanup)
-{
- DB *dbp;
- DB_SEQUENCE *seq;
- DBT key;
- CACHED_DB *cached_db;
- BtShared *pBt;
- HashElem *e, *e_next;
- SEQ_COOKIE *sc;
- int remove, ret, rc;
-
- log_msg(LOG_VERBOSE, "btreeCleanupCachedHandles(%p, %d)",
- p, (int)cleanup);
-
- pBt = p->pBt;
- e = NULL;
- rc = SQLITE_OK;
- remove = 0;
-
- /* If a backup is in progress, we can't drop handle locks. */
- if ((cleanup == CLEANUP_GET_LOCKS || cleanup == CLEANUP_DROP_LOCKS) &&
- p->nBackup > 0)
- return (SQLITE_OK);
-
- if ((cleanup == CLEANUP_GET_LOCKS || cleanup == CLEANUP_DROP_LOCKS))
- sqlite3_mutex_enter(pBt->mutex);
-
- for (e = sqliteHashFirst(&pBt->db_cache); e != NULL;
- e = e_next) {
- /*
- * Grab the next value now rather than in the for loop so that
- * it's possible to remove elements from the list inline.
- */
- e_next = sqliteHashNext(e);
- cached_db = sqliteHashData(e);
-
- if (cached_db == NULL)
- continue;
-
- if (cleanup == CLEANUP_DROP_LOCKS ||
- cleanup == CLEANUP_GET_LOCKS) {
- if (cached_db->is_sequence || cached_db->dbp == NULL ||
- strcmp(cached_db->key, "1") == 0)
- continue;
- if (cleanup == CLEANUP_GET_LOCKS)
- btreeDbHandleLock(p, cached_db);
- else if (cleanup == CLEANUP_DROP_LOCKS) {
- btreeDbHandleUnlock(p, cached_db);
- }
- continue;
- }
-
- if (cached_db->is_sequence) {
- sc = (SEQ_COOKIE *)cached_db->cookie;
- if (cleanup == CLEANUP_ABORT && sc != NULL) {
- memset(&key, 0, sizeof(key));
- key.data = sc->name;
- key.size = key.ulen = sc->name_len;
- key.flags = DB_DBT_USERMEM;
- if (pMetaDb->exists(pMetaDb,
- pFamilyTxn, &key, 0) == DB_NOTFOUND) {
- /*
- * This abort removed a sequence -
- * remove the matching cache entry.
- */
- remove = 1;
- }
- }
- seq = (DB_SEQUENCE *)cached_db->dbp;
- if (seq != NULL && (ret = seq->close(seq, 0)) != 0 &&
- rc == SQLITE_OK)
- rc = dberr2sqlite(ret, p);
- } else if ((dbp = cached_db->dbp) != NULL) {
- /*
- * We have to clear the cache of any stale DB handles.
- * If a transaction has been aborted, the handle will
- * no longer be open. We peek inside the handle at
- * the flags to find out: otherwise, we would need to
- * track all parent / child relationships when
- * rolling back transactions.
- */
- if (cleanup == CLEANUP_ABORT &&
- (dbp->flags & DB_AM_OPEN_CALLED) != 0)
- continue;
-
-#ifndef BDBSQL_SINGLE_THREAD
- if (dbp->app_private != NULL)
- sqlite3_free(dbp->app_private);
-#endif
- if ((ret = closeDB(p, dbp, DB_NOSYNC)) == 0 &&
- rc == SQLITE_OK)
- rc = dberr2sqlite(ret, p);
- remove = 1;
- }
- if (cleanup == CLEANUP_CLOSE || remove) {
- if (remove)
- sqlite3HashInsert(&pBt->db_cache,
- cached_db->key,
- (int)strlen(cached_db->key), NULL);
- if (cached_db->cookie != NULL)
- sqlite3_free(cached_db->cookie);
- sqlite3_free(cached_db);
- remove = 0;
- } else
- cached_db->dbp = NULL;
- }
-
- if ((cleanup == CLEANUP_GET_LOCKS || cleanup == CLEANUP_DROP_LOCKS))
- sqlite3_mutex_leave(pBt->mutex);
-
- return rc;
-}
-
/*
** Close an open database and invalidate all cursors.
*/
@@ -2432,7 +2494,7 @@ int sqlite3BtreeClose(Btree *p)
#endif
if (pMainTxn != NULL &&
- (t_rc = sqlite3BtreeRollback(p)) != SQLITE_OK && rc == SQLITE_OK)
+ (t_rc = sqlite3BtreeRollback(p, rc)) != SQLITE_OK && rc == SQLITE_OK)
rc = t_rc;
assert(pMainTxn == NULL);
@@ -2504,7 +2566,7 @@ int sqlite3BtreeClose(Btree *p)
* list, so it cannot be reused and it is safe to close any
* handles.
*/
- t_rc = btreeCleanupCachedHandles(p, CLEANUP_CLOSE);
+ t_rc = btreeHandleCacheCleanup(p, CLEANUP_CLOSE);
if (t_rc != SQLITE_OK && rc == SQLITE_OK)
rc = t_rc;
sqlite3HashClear(&pBt->db_cache);
@@ -2541,6 +2603,7 @@ int sqlite3BtreeClose(Btree *p)
#endif
if ((t_ret = pDbEnv->close(pDbEnv, 0)) != 0 && ret == 0)
ret = t_ret;
+ pDbEnv = NULL;
pBt->repStarted = 0;
}
#ifdef BDBSQL_SHARE_PRIVATE
@@ -2548,7 +2611,10 @@ int sqlite3BtreeClose(Btree *p)
if (needsunlock)
btreeScopedFileUnlock(p, 1);
#endif
+ if (ret != 0)
+ dberr2sqlite(ret, p);
btreeFreeSharedBtree(pBt, 0);
+ p->pBt = NULL;
}
sqlite3_mutex_leave(mutexOpen);
@@ -2585,6 +2651,22 @@ int sqlite3BtreeSetCacheSize(Btree *p, int mxPage)
}
/*
+** Change the limit on the amount of the database file that may be
+** memory mapped.
+*/
+int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap)
+{
+ BtShared *pBt;
+ int ret;
+
+ pBt = p->pBt;
+ log_msg(LOG_VERBOSE, "sqlite3BtreeSetMmapLimit(%p, %u)", p, szMmap);
+ if ((ret = pDbEnv->set_mp_mmapsize(pDbEnv, szMmap)) != 0)
+ ret = dberr2sqlite(ret, p);
+ return ret;
+}
+
+/*
** Change the way data is synced to disk in order to increase or decrease how
** well the database resists damage due to OS crashes and power failures.
** Level 1 is the same as asynchronous (no syncs() occur and there is a high
@@ -2592,54 +2674,56 @@ int sqlite3BtreeSetCacheSize(Btree *p, int mxPage)
** non-zero probability of damage. Level 3 reduces the probability of damage
** to near zero but with a write performance reduction.
**
-** Berkeley DB always does the equivalent of "fullSync".
+** The levels mentioned above have the names of
+** PAGER_SYNCHRONOUS_{OFF,NORMAL,FULL}. Special treatment of checkpoints
+** is in PAGER{_CKPT,}_FULLFSYNC.
+** Berkeley DB always does the equivalent of PAGER_FULLSYNC.
*/
-int sqlite3BtreeSetSafetyLevel(
+int sqlite3BtreeSetPagerFlags(
Btree *p,
- int level,
- int fullSync,
- int ckptFullSync)
+ unsigned pgFlags)
{
BtShared *pBt;
- log_msg(LOG_VERBOSE,
- "sqlite3BtreeSetSafetyLevel(%p, %u, %u, %u)",
- p, level, fullSync, ckptFullSync);
+ int level;
+
+ level = pgFlags & PAGER_SYNCHRONOUS_MASK;
+ log_msg(LOG_VERBOSE, "sqlite3BtreeSetPagerFlags(%p, %d:%x)",
+ p, level, pgFlags & PAGER_FLAGS_MASK);
pBt = p->pBt;
- /* TODO: Ignore ckptFullSync for now - it corresponds to:
+ /* TODO: Ignore pgFlags for now - it corresponds to:
* PRAGMA checkpoint_fullfsync
* Berkeley DB doesn't allow you to disable that, so ignore the pragma.
*/
if (GET_DURABLE(p->pBt)) {
- pDbEnv->set_flags(pDbEnv, DB_TXN_NOSYNC, (level == 1));
- pDbEnv->set_flags(pDbEnv, DB_TXN_WRITE_NOSYNC, (level == 2));
+ pDbEnv->set_flags(pDbEnv,
+ DB_TXN_NOSYNC, level == PAGER_SYNCHRONOUS_OFF);
+ pDbEnv->set_flags(pDbEnv,
+ DB_TXN_WRITE_NOSYNC, level == PAGER_SYNCHRONOUS_NORMAL);
}
return SQLITE_OK;
}
-int sqlite3BtreeHandleCacheUpdate(Btree *p, int schema_changed)
-{
- int rc;
-
- if (schema_changed != 0 && (rc = btreeInvalidateHandleCache(p)) != 0)
- return rc;
- return btreeCleanupCachedHandles(p, CLEANUP_GET_LOCKS);
+/*
+ * Called from OP_VerifyCookie. Call specific to Berkeley DB.
+ * Closes cached handles if the schema changed, grabs handle locks always.
+ */
+int sqlite3BtreeHandleCacheFixup(Btree *p, int schema_changed) {
+ if (schema_changed != 0)
+ btreeHandleCacheClear(p);
+ return (btreeHandleCacheLockUpdate(p, CLEANUP_GET_LOCKS));
}
/*
- * If the schema version has changed since the last transaction we need to
- * close all handles in the handle cache that aren't holding a handle lock.
- * Ideally we could do this via the sqlite3ResetInternalSchema method
- * but there is no obvious hook there, and.. since we do the GET_LOCKS
- * call here, we need to close handles now or we can't tell if they need to be
- * closed.
- * TODO: We'll probably be best altering the sqlite code to make this work
- * more efficiently.
+ * The schema changed - unconditionally close all cached handles except
+ * metadata and sequences. We need to do this since we drop all handle locks
+ * when there are no active transactions, and it's possible that a database
+ * has been deleted in another process if the schema changed.
*/
-static int btreeInvalidateHandleCache(Btree *p) {
+static int btreeHandleCacheClear(Btree *p) {
BtShared *pBt;
- int cookie, i, rc, ret;
+ int i, rc, ret;
CACHED_DB *cached_db, **tables_to_close;
DB *dbp;
HashElem *e, *e_next;
@@ -2648,89 +2732,214 @@ static int btreeInvalidateHandleCache(Btree *p) {
rc = ret = 0;
pBt = p->pBt;
- if (p->inTrans == TRANS_NONE && p->db != NULL && p->db->aDb != NULL) {
- sqlite3BtreeGetMeta(p, BTREE_SCHEMA_VERSION, (u32 *)&cookie);
- if (p->db->aDb[0].pSchema != NULL &&
- p->db->aDb[0].pSchema->schema_cookie != cookie) {
- /*
- * TODO: Is it possible that this function is called
- * while already holding the mutex? Maybe from the
- * sequence code.
- */
- sqlite3_mutex_enter(pBt->mutex);
- /*
- * We can't call DB->close while holding the mutex, so
- * record which handles we want to close and do the
- * actual close after the mutex is released.
- */
- for (e = sqliteHashFirst(&pBt->db_cache), i = 0;
- e != NULL; e = sqliteHashNext(e), i++) {}
+ log_msg(LOG_VERBOSE, "sqlite3BtreeClearHandleCache(%p)", p);
- if (i == 0) {
- sqlite3_mutex_leave(pBt->mutex);
- return (0);
- }
+ if (p->inTrans == TRANS_NONE || p->db == NULL || p->db->aDb == NULL)
+ return (SQLITE_OK);
- tables_to_close =
- sqlite3_malloc(i * sizeof(CACHED_DB *));
- if (tables_to_close == NULL) {
- sqlite3_mutex_leave(pBt->mutex);
- return SQLITE_NOMEM;
+ sqlite3_mutex_enter(pBt->mutex);
+
+ for (e = sqliteHashFirst(&pBt->db_cache), i = 0;
+ e != NULL; e = sqliteHashNext(e), i++) {}
+
+ if (i == 0) {
+ sqlite3_mutex_leave(pBt->mutex);
+ return (0);
+ }
+
+ tables_to_close =
+ sqlite3_malloc(i * sizeof(CACHED_DB *));
+ if (tables_to_close == NULL) {
+ sqlite3_mutex_leave(pBt->mutex);
+ return SQLITE_NOMEM;
+ }
+ memset(tables_to_close, 0, i * sizeof(CACHED_DB *));
+ /*
+ * Ideally we'd be able to find out if the Berkeley DB
+ * fileid is still valid, but that's not currently
+ * simple, so close all handles.
+ */
+ for (e = sqliteHashFirst(&pBt->db_cache), i = 0;
+ e != NULL; e = e_next) {
+ e_next = sqliteHashNext(e);
+ cached_db = sqliteHashData(e);
+
+ /* Skip table name db and in memory tables. */
+ if (cached_db == NULL || cached_db->dbp == NULL ||
+ strcmp(cached_db->key, "1") == 0)
+ continue;
+ dbp = cached_db->dbp;
+ dbp->dbenv->get_open_flags(dbp->dbenv, &flags);
+ if (flags & DB_PRIVATE)
+ continue;
+ if (btreeDbHandleIsLocked(cached_db))
+ continue;
+ tables_to_close[i++] = cached_db;
+ sqlite3HashInsert(&pBt->db_cache,
+ cached_db->key,
+ (int)strlen(cached_db->key), NULL);
+ }
+ sqlite3_mutex_leave(pBt->mutex);
+ for (i = 0; tables_to_close[i] != NULL; i++) {
+ cached_db = tables_to_close[i];
+ dbp = cached_db->dbp;
+#ifndef BDBSQL_SINGLE_THREAD
+ if (dbp->app_private != NULL)
+ sqlite3_free(dbp->app_private);
+#endif
+ if ((ret = closeDB(p, dbp, DB_NOSYNC)) == 0 &&
+ rc == SQLITE_OK)
+ rc = dberr2sqlite(ret, p);
+ if (cached_db->cookie != NULL)
+ sqlite3_free(cached_db->cookie);
+ sqlite3_free(cached_db);
+ }
+ sqlite3_free(tables_to_close);
+ if (rc != 0)
+ return (rc);
+ return (SQLITE_OK);
+}
+
+static int btreeHandleCacheLockUpdate(Btree *p, cleanup_mode_t cleanup)
+{
+
+ CACHED_DB *cached_db;
+ BtShared *pBt;
+ HashElem *e;
+
+ log_msg(LOG_VERBOSE, "btreeHandleCacheLockUpdate(%p, %d)",
+ p, (int)cleanup);
+
+ pBt = p->pBt;
+ e = NULL;
+
+ assert(cleanup == CLEANUP_DROP_LOCKS || cleanup == CLEANUP_GET_LOCKS);
+
+ /* If a backup is in progress we can't drop handle locks. */
+ if (p->nBackup > 0)
+ return (SQLITE_OK);
+
+ sqlite3_mutex_enter(pBt->mutex);
+ for (e = sqliteHashFirst(&pBt->db_cache); e != NULL;
+ e = sqliteHashNext(e)) {
+
+ cached_db = sqliteHashData(e);
+
+ if (cached_db == NULL || cached_db->is_sequence ||
+ cached_db->dbp == NULL || strcmp(cached_db->key, "1") == 0)
+ continue;
+
+ if (cleanup == CLEANUP_GET_LOCKS)
+ btreeDbHandleLock(p, cached_db);
+ else if (cleanup == CLEANUP_DROP_LOCKS)
+ btreeDbHandleUnlock(p, cached_db);
+ }
+ sqlite3_mutex_leave(pBt->mutex);
+ return (SQLITE_OK);
+}
+
+
+static int btreeHandleCacheCleanup(Btree *p, cleanup_mode_t cleanup)
+{
+ DB *dbp;
+ DB_SEQUENCE *seq;
+ DBT key;
+ CACHED_DB *cached_db;
+ BtShared *pBt;
+ HashElem *e, *e_next;
+ SEQ_COOKIE *sc;
+ int remove, ret, rc;
+
+ log_msg(LOG_VERBOSE, "btreeHandleCacheCleanup(%p, %d)",
+ p, (int)cleanup);
+
+ pBt = p->pBt;
+ e = NULL;
+ rc = SQLITE_OK;
+ remove = 0;
+
+ for (e = sqliteHashFirst(&pBt->db_cache); e != NULL;
+ e = e_next) {
+ /*
+ * Grab the next value now rather than in the for loop so that
+ * it's possible to remove elements from the list inline.
+ */
+ e_next = sqliteHashNext(e);
+ cached_db = sqliteHashData(e);
+
+ if (cached_db == NULL)
+ continue;
+
+ if (cached_db->is_sequence) {
+ sc = (SEQ_COOKIE *)cached_db->cookie;
+ if (cleanup == CLEANUP_ABORT && sc != NULL) {
+ memset(&key, 0, sizeof(key));
+ key.data = sc->name;
+ key.size = key.ulen = sc->name_len;
+ key.flags = DB_DBT_USERMEM;
+ if (pMetaDb->exists(pMetaDb,
+ pFamilyTxn, &key, 0) == DB_NOTFOUND) {
+ /*
+ * This abort removed a sequence -
+ * remove the matching cache entry.
+ */
+ remove = 1;
+ }
}
- memset(tables_to_close, 0, i * sizeof(CACHED_DB *));
+ seq = (DB_SEQUENCE *)cached_db->dbp;
+ if (seq != NULL && (ret = seq->close(seq, 0)) != 0 &&
+ rc == SQLITE_OK)
+ rc = dberr2sqlite(ret, p);
+ } else if ((dbp = cached_db->dbp) != NULL) {
/*
- * Ideally we'd be able to find out if the Berkeley DB
- * fileid is still valid, but that's not currently
- * simple, so close all handles.
+ * We have to clear the cache of any stale DB handles.
+ * If a transaction has been aborted, the handle will
+ * no longer be open. We peek inside the handle at
+ * the flags to find out: otherwise, we would need to
+ * track all parent / child relationships when
+ * rolling back transactions.
*/
- for (e = sqliteHashFirst(&pBt->db_cache), i = 0;
- e != NULL; e = e_next) {
- e_next = sqliteHashNext(e);
- cached_db = sqliteHashData(e);
-
- /* Skip table name db and in memory tables. */
- if (cached_db == NULL ||
- strcmp(cached_db->key, "1") == 0 ||
- cached_db->dbp == NULL)
- continue;
- dbp = cached_db->dbp;
- dbp->dbenv->get_open_flags(dbp->dbenv, &flags);
- if (flags & DB_PRIVATE)
- continue;
- if (btreeDbHandleIsLocked(cached_db))
- continue;
- tables_to_close[i++] = cached_db;
- sqlite3HashInsert(&pBt->db_cache,
- cached_db->key,
- (int)strlen(cached_db->key), NULL);
- }
- sqlite3_mutex_leave(pBt->mutex);
- for (i = 0; tables_to_close[i] != NULL; i++) {
- cached_db = tables_to_close[i];
- dbp = cached_db->dbp;
+ if (cleanup == CLEANUP_ABORT &&
+ (dbp->flags & DB_AM_OPEN_CALLED) != 0)
+ continue;
+
#ifndef BDBSQL_SINGLE_THREAD
- if (dbp->app_private != NULL)
- sqlite3_free(dbp->app_private);
+ if (dbp->app_private != NULL)
+ sqlite3_free(dbp->app_private);
#endif
- if ((ret = closeDB(p, dbp, DB_NOSYNC)) == 0 &&
- rc == SQLITE_OK)
- rc = dberr2sqlite(ret, p);
- if (cached_db->cookie != NULL)
- sqlite3_free(cached_db->cookie);
- sqlite3_free(cached_db);
- }
- sqlite3_free(tables_to_close);
- if (rc != 0)
- return (rc);
+ if ((ret = closeDB(p, dbp, DB_NOSYNC)) == 0 &&
+ rc == SQLITE_OK)
+ rc = dberr2sqlite(ret, p);
+ remove = 1;
}
+ if (cleanup == CLEANUP_CLOSE || remove) {
+ if (remove)
+ sqlite3HashInsert(&pBt->db_cache,
+ cached_db->key,
+ (int)strlen(cached_db->key), NULL);
+ if (cached_db->cookie != NULL)
+ sqlite3_free(cached_db->cookie);
+ sqlite3_free(cached_db);
+ remove = 0;
+ } else
+ cached_db->dbp = NULL;
}
- return (0);
+
+ return rc;
}
+/*
+ * A version of BeginTrans for use by internal Berkeley DB functions. Calling
+ * the sqlite3BtreeBeginTrans function directly skips handle lock validation.
+ */
int btreeBeginTransInternal(Btree *p, int wrflag)
{
- btreeCleanupCachedHandles(p, CLEANUP_GET_LOCKS);
- return sqlite3BtreeBeginTrans(p, wrflag);
+ int ret;
+
+ if ((ret = sqlite3BtreeBeginTrans(p, wrflag)) != 0)
+ return ret;
+ ret = sqlite3BtreeHandleCacheFixup(p, 0);
+ return ret;
}
/*
@@ -2879,7 +3088,7 @@ int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster)
**
** NOTE: It's OK for Berkeley DB to ignore the bCleanup flag - it is only used
** by SQLite when it is safe for it to ignore stray journal files. That's not
-** a relevant consideration for Berkele DB.
+** a relevant consideration for Berkeley DB.
*/
int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup)
{
@@ -2898,8 +3107,7 @@ int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup)
#ifdef BDBSQL_FILE_PER_TABLE
DBT key;
#endif
- log_msg(LOG_VERBOSE,
- "sqlite3BtreeCommitPhaseTwo(%p) -- writer %s",
+ log_msg(LOG_VERBOSE, "sqlite3BtreeCommitPhaseTwo(%p) -- writer %s",
p, pReadTxn ? "active" : "inactive");
pBt = p->pBt;
@@ -2909,7 +3117,7 @@ int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup)
removeFlags = DB_AUTO_COMMIT | DB_LOG_NO_DATA | DB_NOSYNC | \
(GET_DURABLE(pBt) ? 0 : DB_TXN_NOT_DURABLE);
- if (pMainTxn && p->db->activeVdbeCnt <= 1) {
+ if (pMainTxn && p->db->nVdbeRead <= 1) {
#ifdef BDBSQL_SHARE_PRIVATE
needsunlock = 1;
#endif
@@ -3010,7 +3218,7 @@ next: if (ret != 0 && rc == SQLITE_OK)
if (pFamilyTxn)
pFamilyTxn->set_priority(pFamilyTxn, defaultTxnPriority);
- if (p->db->activeVdbeCnt > 1)
+ if (p->db->nVdbeRead > 1)
p->inTrans = TRANS_READ;
else {
p->inTrans = TRANS_NONE;
@@ -3034,11 +3242,11 @@ next: if (ret != 0 && rc == SQLITE_OK)
/* Drop any handle locks if this was the only active txn. */
if (in_trans == 0)
- btreeCleanupCachedHandles(p, CLEANUP_DROP_LOCKS);
+ btreeHandleCacheLockUpdate(p, CLEANUP_DROP_LOCKS);
}
if (needVacuum && rc == SQLITE_OK)
- rc = btreeVacuum(p, &p->db->zErrMsg);
+ rc = btreeVacuum(p, &p->db->pVdbe->zErrMsg);
return rc;
}
@@ -3069,7 +3277,7 @@ int sqlite3BtreeCommit(Btree *p)
** This will release the write lock on the database file. If there are no
** active cursors, it also releases the read lock.
*/
-int sqlite3BtreeRollback(Btree *p)
+int sqlite3BtreeRollback(Btree *p, int tripCode)
{
BtShared *pBt;
int rc, t_rc;
@@ -3182,8 +3390,8 @@ static int btreeCompare(
* DB, such as deferred delete of an item in a Btree.
*/
BtShared *pBt = NULL;
- UnpackedRecord *p;
- char aSpace[40 * sizeof(void *)];
+ UnpackedRecord *pIdxKey;
+ char aSpace[42 * sizeof(void *)], *pFree;
int locked = 0;
/* This case can happen when searching temporary tables. */
@@ -3222,8 +3430,14 @@ static int btreeCompare(
}
#endif
- p = sqlite3VdbeRecordUnpack(keyInfo, dbt2->size, dbt2->data,
- aSpace, sizeof(aSpace));
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(
+ keyInfo, aSpace, sizeof(aSpace), &pFree);
+ if (pIdxKey == NULL) {
+ res = SQLITE_NOMEM;
+ goto locked;
+ }
+ sqlite3VdbeRecordUnpack(
+ keyInfo, dbt2->size, dbt2->data, pIdxKey);
/*
* XXX If we are out of memory, the call to unpack the record
@@ -3235,26 +3449,29 @@ static int btreeCompare(
* We choose zero because it makes loops terminate (e.g., if
* we're called as part of a sort).
*/
- res = (p == NULL) ? 0 :
- sqlite3VdbeRecordCompare(dbt1->size, dbt1->data, p);
- if (p != NULL)
- sqlite3VdbeDeleteUnpackedRecord(p);
+ res = (pIdxKey == NULL) ? 0 :
+ sqlite3VdbeRecordCompare(dbt1->size, dbt1->data, pIdxKey);
+ if (pIdxKey != NULL)
+ sqlite3DbFree(keyInfo->db, pFree);
- if (locked)
+locked: if (locked)
sqlite3_mutex_leave(pBt->mutex);
}
return res;
}
-static int btreeCompareKeyInfo(DB *dbp, const DBT *dbt1, const DBT *dbt2)
+static int btreeCompareKeyInfo(DB *dbp,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp)
{
assert(dbp->app_private != NULL);
+ locp = NULL;
return btreeCompare(dbp, dbt1, dbt2,
(struct KeyInfo *)dbp->app_private);
}
#ifndef BDBSQL_SINGLE_THREAD
-static int btreeCompareShared(DB *dbp, const DBT *dbt1, const DBT *dbt2)
+static int btreeCompareShared(DB *dbp,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp)
{
/*
* In some cases (e.g., vacuum), a KeyInfo may have been stashed
@@ -3262,6 +3479,7 @@ static int btreeCompareShared(DB *dbp, const DBT *dbt1, const DBT *dbt2)
* to btreeCompareKeyInfo on an open DB handle. If so, use that in
* preference to searching for one.
*/
+ locp = NULL;
return btreeCompare(dbp, dbt1, dbt2,
((TableInfo *)dbp->app_private)->pKeyInfo);
}
@@ -3270,7 +3488,7 @@ static int btreeCompareShared(DB *dbp, const DBT *dbt1, const DBT *dbt2)
/*
* Configures a Berkeley DB database handle prior to calling open.
*/
-static int btreeConfigureDbHandle(Btree *p, int iTable, DB **dbpp)
+static int btreeConfigureDbHandle(Btree *p, int iTable, DB **dbpp, int skipdup)
{
BtShared *pBt;
DB *dbp;
@@ -3290,6 +3508,8 @@ static int btreeConfigureDbHandle(Btree *p, int iTable, DB **dbpp)
if ((ret = db_create(&dbp, pDbEnv, 0)) != 0)
goto err;
if ((flags & BTREE_INTKEY) == 0) {
+ if (pBt->dbStorage == DB_STORE_NAMED && !skipdup)
+ dbp->set_dup_compare(dbp, btreeCompareDup);
#ifdef BDBSQL_SINGLE_THREAD
dbp->set_bt_compare(dbp, btreeCompareKeyInfo);
#else
@@ -3462,11 +3682,13 @@ static int btreeCreateDataTable(
DBT d, k;
#endif
char *fileName, *tableName, tableNameBuf[DBNAME_SIZE];
- int ret, t_ret;
+ int ret, skipdup, t_ret;
+ u_int32_t flags;
log_msg(LOG_VERBOSE, "sqlite3BtreeCreateDataTable(%p, %u, %p)",
p, iTable, ppCachedDb);
+ skipdup = 0;
pBt = p->pBt;
assert(!pBt->resultsBuffer);
@@ -3494,16 +3716,17 @@ static int btreeCreateDataTable(
* creating the table, we should be holding the schema lock,
* which will protect the handle in cache until we are done.
*/
- if ((ret = btreeConfigureDbHandle(p, iTable, &dbp)) != 0)
+ if ((ret = btreeConfigureDbHandle(p, iTable, &dbp, 0)) != 0)
goto err;
ret = ENOENT;
- if (pBt->dbStorage == DB_STORE_NAMED &&
+redo: if (pBt->dbStorage == DB_STORE_NAMED &&
(pBt->db_oflags & DB_CREATE) != 0) {
ret = dbp->open(dbp, pFamilyTxn, fileName, tableName, DB_BTREE,
(pBt->db_oflags & ~DB_CREATE) | GET_ENV_READONLY(pBt) |
GET_AUTO_COMMIT(pBt, pFamilyTxn), 0);
/* Close and re-configure handle. */
- if (ret == ENOENT) {
+ if (ret == ENOENT ||
+ (ret == EINVAL && skipdup == 0 && !(iTable & 1))) {
#ifndef BDBSQL_SINGLE_THREAD
if (dbp->app_private != NULL)
sqlite3_free(dbp->app_private);
@@ -3512,11 +3735,16 @@ static int btreeCreateDataTable(
ret = t_ret;
goto err;
}
- if ((t_ret =
- btreeConfigureDbHandle(p, iTable, &dbp)) != 0) {
+ if (ret == EINVAL)
+ skipdup = 1;
+ if ((t_ret = btreeConfigureDbHandle(
+ p, iTable, &dbp, skipdup)) != 0) {
ret = t_ret;
goto err;
}
+
+ if (skipdup == 1)
+ goto redo;
}
}
if (ret == ENOENT) {
@@ -3528,9 +3756,19 @@ static int btreeCreateDataTable(
if (pBt->dbStorage == DB_STORE_NAMED && (iTable & 1) == 0)
dbp->set_flags(dbp, DB_DUPSORT);
- ret = dbp->open(dbp, pSavepointTxn, fileName, tableName,
- DB_BTREE, pBt->db_oflags | GET_ENV_READONLY(pBt) |
- GET_AUTO_COMMIT(pBt, pSavepointTxn), 0);
+ /* Set blob threshold. */
+ if (pBt->dbStorage == DB_STORE_NAMED
+ && (iTable & 1) != 0 && pBt->blob_threshold > 0 &&
+ iTable > 2) {
+ if ((ret = dbp->set_blob_threshold(
+ dbp, pBt->blob_threshold, 0)) != 0)
+ goto err;
+ }
+ flags = pBt->db_oflags | GET_ENV_READONLY(pBt) |
+ GET_AUTO_COMMIT(pBt, pSavepointTxn);
+ flags |= (GET_ENV_READONLY(pBt) ? 0 : DB_CREATE);
+ ret = dbp->open(dbp, pSavepointTxn,
+ fileName, tableName, DB_BTREE, flags, 0);
#ifdef BDBSQL_FILE_PER_TABLE
if (ret == 0 && pBt->dbStorage == DB_STORE_NAMED) {
memset(&k, 0, sizeof(k));
@@ -3643,6 +3881,10 @@ int isDupIndex(int flags, int storage, KeyInfo *keyInfo, DB *db)
** incorrect operations. If the comparison function is NULL, a default
** comparison function is used. The comparison function is always ignored
** for INTKEY tables.
+**
+** The original version of this function always returns SQLITE_OK. However, the
+** Berkeley DB version read from disk and allocates memory, and can return
+** a sqlite3 error code.
*/
int sqlite3BtreeCursor(
Btree *p, /* The btree */
@@ -3678,7 +3920,7 @@ int sqlite3BtreeCursor(
* sqlite3BtreeOpen is called, it would not be possible to
* honor cache size setting pragmas.
*/
- if (pBt->need_open &&
+ if ((pBt->need_open || !pBt->resultsBuffer) &&
(rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK)
goto err;
else if (pBt->dbStorage == DB_STORE_NAMED && !pBt->env_opened &&
@@ -3693,11 +3935,9 @@ int sqlite3BtreeCursor(
if ((rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK)
goto err;
} else if (pBt->dbStorage != DB_STORE_TMP &&
- !wrFlag && !pBt->env_opened)
+ !wrFlag && !pBt->env_opened) {
return SQLITE_EMPTY;
- else if (!pBt->resultsBuffer &&
- (rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK)
- goto err;
+ }
}
if (wrFlag && IS_BTREE_READONLY(p))
@@ -3790,7 +4030,7 @@ err: if (pDbc != NULL) {
}
pCur->eState = CURSOR_FAULT;
pCur->error = rc;
- return SQLITE_OK;
+ return rc;
}
/*
@@ -3840,7 +4080,7 @@ static int btreeCloseCursor(BtCursor *pCur, int listRemove)
*/
sqlite3_mutex_enter(pBt->mutex);
pCur->eState = CURSOR_FAULT;
- pCur->error = SQLITE_ABORT;
+ pCur->error = SQLITE_ABORT_ROLLBACK;
sqlite3_mutex_leave(pBt->mutex);
/*
@@ -3919,7 +4159,8 @@ int indexIsCollated(KeyInfo *keyInfo)
for (i = 0; i < keyInfo->nField; i++) {
if (keyInfo->aColl[i] != NULL &&
- (keyInfo->aColl[i]->type != SQLITE_COLL_BINARY))
+ (strncmp(keyInfo->aColl[i]->zName, "BINARY",
+ strlen("BINARY")) != 0))
break;
}
return ((i != keyInfo->nField) ? 1 : 0);
@@ -4031,19 +4272,24 @@ int sqlite3BtreeMovetoUnpacked(
* rowid part of the key needs to be put in the data DBT.
*/
if (pCur->isDupIndex &&
- (pUnKey->nField > pCur->keyInfo->nField)) {
+ (pUnKey->nField >= pCur->keyInfo->nField)) {
u8 serial_type;
+ Mem indexData;
Mem *rowid = &pUnKey->aMem[pUnKey->nField - 1];
int file_format =
pCur->pBtree->db->pVdbe->minWriteFileFormat;
- serial_type = sqlite3VdbeSerialType(rowid, file_format);
+ memset(&indexData, 0, sizeof(Mem));
+ indexData.u.i = sqlite3VdbeIntValue(rowid);
+ indexData.flags = MEM_Int;
+ serial_type =
+ sqlite3VdbeSerialType(&indexData, file_format);
+ pUnKey->flags = UNPACKED_PREFIX_MATCH;
pCur->data.size =
sqlite3VdbeSerialTypeLen(serial_type) + 1;
assert(pCur->data.size < ROWIDMAXSIZE);
pCur->data.data = &buf;
putVarint32(buf, serial_type);
- sqlite3VdbeSerialPut(&buf[1], ROWIDMAXSIZE - 1,
- rowid, file_format);
+ sqlite3VdbeSerialPut(&buf[1], &indexData, serial_type);
ret = pDbc->get(pDbc, &pCur->key, &pCur->data,
DB_GET_BOTH_RANGE | RMW(pCur));
/*
@@ -4092,9 +4338,10 @@ int sqlite3BtreeMovetoUnpacked(
if (index.data) {
#ifdef BDBSQL_SINGLE_THREAD
res = btreeCompareKeyInfo(
- pBDb, &index, &target);
+ pBDb, &index, &target, NULL);
#else
- res = btreeCompareShared(pBDb, &index, &target);
+ res = btreeCompareShared(
+ pBDb, &index, &target, NULL);
#endif
} else {
ret = ENOMEM;
@@ -4121,20 +4368,23 @@ done: if (pRes != NULL)
int btreeMoveto(BtCursor *pCur, const void *pKey, i64 nKey, int bias, int *pRes)
{
- UnpackedRecord *p;
- char aSpace[150];
+ UnpackedRecord *pIdxKey;
+ char aSpace[150], *pFree;
int res;
/*
* Cache an unpacked key in the DBT so we don't have to unpack
* it on every comparison.
*/
- p = sqlite3VdbeRecordUnpack(pCur->keyInfo, (int)nKey, pKey, aSpace,
- sizeof(aSpace));
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(
+ pCur->keyInfo, aSpace, sizeof(aSpace), &pFree);
+ if (pIdxKey == NULL)
+ return (SQLITE_NOMEM);
+ sqlite3VdbeRecordUnpack(pCur->keyInfo, (int)nKey, pKey, pIdxKey);
- res = sqlite3BtreeMovetoUnpacked(pCur, p, nKey, bias, pRes);
+ res = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes);
- sqlite3VdbeDeleteUnpackedRecord(p);
+ sqlite3DbFree(pCur->keyInfo->db, pFree);
pCur->key.app_data = NULL;
return res;
@@ -4153,9 +4403,6 @@ static int btreeTripCursor(BtCursor *pCur, int incrBlobUpdate)
*/
assert(sqlite3_mutex_held(pCur->pBtree->pBt->mutex));
- dbc = pDbc;
- pDbc = NULL;
-
/*
* Need to close here to so that the update happens unambiguously in
* the primary cursor. That means the memory holding our copy of the
@@ -4171,6 +4418,9 @@ static int btreeTripCursor(BtCursor *pCur, int incrBlobUpdate)
}
}
+ dbc = pDbc;
+ pDbc = NULL;
+
if (pCur->eState == CURSOR_VALID)
pCur->eState = (pCur->isIncrblobHandle && !incrBlobUpdate) ?
CURSOR_INVALID : CURSOR_REQUIRESEEK;
@@ -4259,12 +4509,17 @@ static int btreeRestoreCursorPosition(BtCursor *pCur, int skipMoveto)
/*
* SQLite should guarantee that an appropriate transaction is
* active.
+ * There is a real case in fts3 segment merging where this can
+ * happen.
*/
+#ifndef SQLITE_ENABLE_FTS3
assert(!pBt->transactional || pReadTxn != NULL);
assert(!pBt->transactional || !pCur->wrFlag ||
pSavepointTxn != NULL);
-
- pCur->txn = pCur->wrFlag ? pSavepointTxn : pReadTxn;
+#endif
+ /** A write blob cursor has its own dedicated transaction **/
+ if(!(pCur->isIncrblobHandle && pCur->wrFlag))
+ pCur->txn = pCur->wrFlag ? pSavepointTxn : pReadTxn;
if ((ret = pBDb->cursor(pBDb, pCur->txn, &pDbc,
GET_BTREE_ISOLATION(p) & ~DB_READ_COMMITTED)) != 0)
@@ -4380,6 +4635,9 @@ err: /*
**
** For a table with the INTKEY flag set, this routine returns the key itself,
** not the number of bytes in the key.
+**
+** The main sqlite3 version always returns SQLITE_OK; so, this version does too,
+** after printing a message.
*/
int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize)
{
@@ -4388,10 +4646,12 @@ int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize)
log_msg(LOG_VERBOSE, "sqlite3BtreeKeySize(%p, %p)", pCur, pSize);
if (pCur->eState != CURSOR_VALID &&
- (rc = btreeRestoreCursorPosition(pCur, 0)) != SQLITE_OK)
- return rc;
-
- if (pIntKey)
+ (rc = btreeRestoreCursorPosition(pCur, 0)) != SQLITE_OK) {
+ log_msg(LOG_RELEASE,
+ "sqlite3BtreeKeySize(%p, %p) restore position error %d",
+ pCur, pSize, rc);
+ *pSize = 0;
+ } else if (pIntKey)
*pSize = pCur->savedIntKey;
else {
if (pCur->isDupIndex)
@@ -4418,10 +4678,12 @@ int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize)
log_msg(LOG_VERBOSE, "sqlite3BtreeDataSize(%p, %p)", pCur, pSize);
if (pCur->eState != CURSOR_VALID &&
- (rc = btreeRestoreCursorPosition(pCur, 0)) != SQLITE_OK)
- return rc;
-
- if (pCur->isDupIndex)
+ (rc = btreeRestoreCursorPosition(pCur, 0)) != SQLITE_OK) {
+ log_msg(LOG_RELEASE,
+ "sqlite3BtreeDataSize(%p, %p) restore position error %d",
+ pCur, pSize, rc);
+ *pSize = 0;
+ } else if (pCur->isDupIndex)
*pSize = 0;
else
*pSize = (pCur->eState == CURSOR_VALID) ? pCur->data.size : 0;
@@ -4541,9 +4803,11 @@ void *btreeCreateIndexKey(BtCursor *pCur)
** on the next call to any Btree routine.
**
** These routines is used to get quick access to key and data in the common
-** case where no overflow pages are used.
+** case where no sqlite3 overflow pages are used. However, since the BDB API
+** does not expose that to sqlite3, we treat sizes >= 64K as if there were
+** no sqlite3 overflow pages involved.
*/
-const void *sqlite3BtreeKeyFetch(BtCursor *pCur, int *pAmt)
+const void *sqlite3BtreeKeyFetch(BtCursor *pCur, u32 *pAmt)
{
log_msg(LOG_VERBOSE, "sqlite3BtreeKeyFetch(%p, %p)", pCur, pAmt);
@@ -4556,7 +4820,7 @@ const void *sqlite3BtreeKeyFetch(BtCursor *pCur, int *pAmt)
return pCur->key.data;
}
-const void *sqlite3BtreeDataFetch(BtCursor *pCur, int *pAmt)
+const void *sqlite3BtreeDataFetch(BtCursor *pCur, u32 *pAmt)
{
log_msg(LOG_VERBOSE, "sqlite3BtreeDataFetch(%p, %p)", pCur, pAmt);
@@ -4877,9 +5141,31 @@ int sqlite3BtreeNext(BtCursor *pCur, int *pRes)
*/
int sqlite3BtreePrevious(BtCursor *pCur, int *pRes)
{
- int rc;
+ void *keyCopy;
+ int rc, size;
log_msg(LOG_VERBOSE, "sqlite3BtreePrevious(%p, %p)", pCur, pRes);
+ /*
+ * It is not possible to retrieve the previous entry in a buffer, so
+ * copy the key, dump the buffer into a database, and move the new
+ * cursor to the current position.
+ */
+ if (pIsBuffer) {
+ size = pCur->key.size;
+ keyCopy = sqlite3_malloc(size);
+ if (keyCopy == NULL)
+ return SQLITE_NOMEM;
+ memcpy(keyCopy, pCur->key.data, size);
+ if ((rc = btreeLoadBufferIntoTable(pCur)) != SQLITE_OK) {
+ sqlite3_free(keyCopy);
+ return rc;
+ }
+ rc = btreeMoveto(pCur, keyCopy, size, 0, &pCur->lastRes);
+ sqlite3_free(keyCopy);
+ if (rc != SQLITE_OK)
+ return rc;
+ }
+
if (pCur->eState != CURSOR_VALID &&
(rc = btreeRestoreCursorPosition(pCur, 0)) != SQLITE_OK)
return rc;
@@ -4901,10 +5187,19 @@ int sqlite3BtreePrevious(BtCursor *pCur, int *pRes)
static int insertData(BtCursor *pCur, int nZero, int nData)
{
int ret;
+ u_int32_t blob_threshold;
UPDATE_DURING_BACKUP(pCur->pBtree);
+
+ if (nZero > 0) {
+ (void)pBDb->get_blob_threshold(pBDb, &blob_threshold);
+ /* Blobs are <= 1GB in SQL, so this will not overflow. */
+ if (blob_threshold <= ((u_int32_t)nZero * nData))
+ pCur->data.flags |= DB_DBT_BLOB;
+ }
ret = pDbc->put(pDbc, &pCur->key, &pCur->data,
(pCur->isDupIndex) ? DB_NODUPDATA : DB_KEYLAST);
+ pCur->data.flags &= ~DB_DBT_BLOB;
if (ret == 0 && nZero > 0) {
DBT zeroData;
@@ -4941,8 +5236,8 @@ int sqlite3BtreeInsert(
{
int rc, ret;
i64 encKey;
- UnpackedRecord *p;
- char aSpace[150];
+ UnpackedRecord *pIdxKey;
+ char aSpace[150], *pFree;
log_msg(LOG_VERBOSE,
"sqlite3BtreeInsert(%p, %p, %u, %p, %u, %u, %u, %u)",
@@ -4951,7 +5246,7 @@ int sqlite3BtreeInsert(
if (!pCur->wrFlag)
return SQLITE_READONLY;
- p = NULL;
+ pIdxKey = NULL;
rc = SQLITE_OK;
/* Invalidate current cursor state. */
@@ -5016,8 +5311,15 @@ int sqlite3BtreeInsert(
* Cache an unpacked key in the DBT so we don't have to unpack
* it on every comparison.
*/
- pCur->key.app_data = p = sqlite3VdbeRecordUnpack(pCur->keyInfo,
- (int)nKey, pKey, aSpace, sizeof(aSpace));
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(
+ pCur->keyInfo, aSpace, sizeof(aSpace), &pFree);
+ if (pIdxKey == NULL) {
+ rc = SQLITE_NOMEM;
+ goto err;
+ }
+ sqlite3VdbeRecordUnpack(
+ pCur->keyInfo, (int)nKey, pKey, pIdxKey);
+ pCur->key.app_data = pIdxKey;
}
ret = insertData(pCur, nZero, nData);
@@ -5032,8 +5334,9 @@ int sqlite3BtreeInsert(
pCur->skipMulti = 0;
} else
pCur->eState = CURSOR_INVALID;
-err: if (p != NULL)
- sqlite3VdbeDeleteUnpackedRecord(p);
+ /* Free the unpacked record. */
+err: if (pIdxKey != NULL)
+ sqlite3DbFree(pCur->keyInfo->db, pFree);
pCur->key.app_data = NULL;
return MAP_ERR_LOCKED(rc, ret, pCur->pBtree);
}
@@ -5055,9 +5358,14 @@ int sqlite3BtreeDelete(BtCursor *pCur)
if (pIsBuffer) {
int res;
- rc = btreeMoveto(pCur, pCur->key.data, pCur->key.size, 0, &res);
- if (rc != SQLITE_OK)
+ if ((rc = btreeLoadBufferIntoTable(pCur)) != SQLITE_OK)
return rc;
+ if (pCur->keyInfo != NULL) {
+ if ((rc = btreeMoveto(
+ pCur, pCur->key.data,
+ pCur->key.size, 0, &res)) != SQLITE_OK)
+ return rc;
+ }
}
assert(!pIsBuffer);
@@ -5563,8 +5871,8 @@ void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta)
p->db->errCode = SQLITE_BUSY;
ret = 0;
*pMeta = 0;
- sqlite3BtreeRollback(p);
- }
+ sqlite3BtreeRollback(p, SQLITE_BUSY);
+ }
assert(ret == 0);
}
@@ -5917,6 +6225,9 @@ void sqlite3BtreeTripAllCursors(Btree* p, int errCode)
BtShared *pBt;
BtCursor *pCur;
+ if (!p)
+ return;
+
log_msg(LOG_VERBOSE, "sqlite3BtreeTripAllCursors(%p, %u)", p, errCode);
pBt = p->pBt;
@@ -6111,6 +6422,14 @@ int sqlite3BtreeGetPageSize(Btree *p)
log_msg(LOG_VERBOSE, "sqlite3BtreeGetPageSize(%p)", p);
pBt = p->pBt;
+ if (!p->connected && !pBt->database_existed) {
+ if (pBt->pageSize == 0)
+ return SQLITE_DEFAULT_PAGE_SIZE;
+ else
+ return pBt->pageSize;
+ }
+
+
if (!p->connected && pBt->need_open)
btreeOpenEnvironment(p, 1);
@@ -6120,6 +6439,7 @@ int sqlite3BtreeGetPageSize(Btree *p)
if (pBt->pageSize == 0)
return SQLITE_DEFAULT_PAGE_SIZE;
return p->pBt->pageSize;
+
}
/***************************************************************************
@@ -6265,7 +6585,7 @@ const char *sqlite3BtreeGetJournalname(Btree *p)
log_msg(LOG_VERBOSE, "sqlite3BtreeGetJournalname(%p)", p);
pBt = p->pBt;
- return (pBt->dir_name != 0 ? pBt->dir_name : "");
+ return (pBt->dir_name);
}
/*****************************************************************
@@ -6329,6 +6649,8 @@ int btreeGetKeyInfo(Btree *p, int iTable, KeyInfo **pKeyInfo)
{
Index *pIdx;
Parse parse;
+ KeyInfo *newKeyInfo;
+ int nBytes;
*pKeyInfo = 0;
/* Only indexes have a KeyInfo */
@@ -6347,10 +6669,18 @@ int btreeGetKeyInfo(Btree *p, int iTable, KeyInfo **pKeyInfo)
parse.db = p->db;
parse.nErr = 0;
- *pKeyInfo = sqlite3IndexKeyinfo(&parse, pIdx);
- if (!*pKeyInfo)
+ newKeyInfo = sqlite3KeyInfoOfIndex(&parse, pIdx);
+ if (newKeyInfo == NULL)
return SQLITE_NOMEM;
+ nBytes = sqlite3DbMallocSize(p->db, newKeyInfo);
+ *pKeyInfo = sqlite3DbMallocRaw(p->db, nBytes);
+ if (*pKeyInfo == NULL) {
+ sqlite3KeyInfoUnref(newKeyInfo);
+ return SQLITE_NOMEM;
+ }
+ memcpy(*pKeyInfo, newKeyInfo, nBytes);
(*pKeyInfo)->enc = ENC(p->db);
+ sqlite3KeyInfoUnref(newKeyInfo);
}
return SQLITE_OK;
}
@@ -6425,6 +6755,13 @@ int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum)
if (rc == SQLITE_OK && !p->connected && !pBt->resultsBuffer)
rc = btreeOpenEnvironment(p, 1);
+ /*
+ * The auto_vacuum and incr_vacuum pragmas use this persistent metadata
+ * field to control vacuum behavior.
+ */
+ if (autoVacuum != 0)
+ sqlite3BtreeUpdateMeta(p, BTREE_LARGEST_ROOT_PAGE, 1);
+
return rc;
#endif
}
@@ -6549,7 +6886,7 @@ int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint)
goto err;
if (op == SAVEPOINT_ROLLBACK &&
- (rc = btreeCleanupCachedHandles(p, CLEANUP_ABORT)) != SQLITE_OK)
+ (rc = btreeHandleCacheCleanup(p, CLEANUP_ABORT)) != SQLITE_OK)
return rc;
if (op == SAVEPOINT_ROLLBACK && p->txn_bulk && iSavepoint >= 0)
@@ -6586,7 +6923,7 @@ int sqlite3_enable_shared_cache(int enable)
int btreeGetUserTable(Btree *p, DB_TXN *pTxn, DB **pDb, int iTable)
{
char *fileName, *tableName, tableNameBuf[DBNAME_SIZE];
- int ret, rc;
+ int ret, rc, skipdup;
BtShared *pBt;
DB *dbp;
KeyInfo *keyInfo;
@@ -6596,6 +6933,7 @@ int btreeGetUserTable(Btree *p, DB_TXN *pTxn, DB **pDb, int iTable)
pBt = p->pBt;
dbp = *pDb;
keyInfo = NULL;
+ skipdup = 0;
/* Is the metadata table. */
if (iTable < 1) {
*pDb = NULL;
@@ -6608,7 +6946,7 @@ int btreeGetUserTable(Btree *p, DB_TXN *pTxn, DB **pDb, int iTable)
FIX_TABLENAME(pBt, fileName, tableName);
/* Open a DB handle on that table. */
- if ((ret = db_create(&dbp, pDbEnv, 0)) != 0)
+redo: if ((ret = db_create(&dbp, pDbEnv, 0)) != 0)
return dberr2sqlite(ret, p);
if (!GET_DURABLE(pBt) &&
@@ -6619,13 +6957,16 @@ int btreeGetUserTable(Btree *p, DB_TXN *pTxn, DB **pDb, int iTable)
if (!(iTable & 1)) {
/* Get the KeyInfo for the index */
- if ((rc = btreeGetKeyInfo(p, iTable, &keyInfo)) != SQLITE_OK)
+ if (keyInfo == NULL &&
+ (rc = btreeGetKeyInfo(p, iTable, &keyInfo)) != SQLITE_OK)
goto err;
if (keyInfo) {
dbp->app_private = keyInfo;
dbp->set_bt_compare(dbp, btreeCompareKeyInfo);
}
+ if (pBt->dbStorage == DB_STORE_NAMED && !skipdup)
+ dbp->set_dup_compare(dbp, btreeCompareDup);
} else
dbp->set_bt_compare(dbp, btreeCompareIntKey);
@@ -6633,8 +6974,21 @@ int btreeGetUserTable(Btree *p, DB_TXN *pTxn, DB **pDb, int iTable)
FIX_TABLENAME(pBt, fileName, tableName);
if ((ret = dbp->open(dbp, pTxn, fileName, tableName, DB_BTREE,
(pBt->db_oflags & ~DB_CREATE) | GET_ENV_READONLY(pBt), 0) |
- GET_AUTO_COMMIT(pBt, pTxn)) != 0)
- goto err;
+ GET_AUTO_COMMIT(pBt, pTxn)) != 0) {
+ /*
+ * Indexes created in BDB 5.0 do not support duplicates, so
+ * attempt to open the index again without btreeCompareDup
+ * set.
+ */
+ if (ret == EINVAL && skipdup == 0 &&
+ !(iTable & 1) && pBt->dbStorage == DB_STORE_NAMED) {
+ skipdup = 1;
+ dbp->close(dbp, DB_NOSYNC);
+ dbp = NULL;
+ goto redo;
+ }
+ goto err;
+ }
*pDb = dbp;
return rc;
@@ -7005,317 +7359,12 @@ static const u_int8_t __dbsql_marshaled_int_size[] = {
};
/*
- * btreeCompressInt --
- * Compresses the integer into the buffer, returning the number of
- * bytes occupied.
- *
- * An exact copy of __db_compress_int
- */
-static int btreeCompressInt(u_int8_t *buf, u_int64_t i)
-{
- if (i <= CMP_INT_1BYTE_MAX) {
- /* no swapping for one byte value */
- buf[0] = (u_int8_t)i;
- return 1;
- } else {
- u_int8_t *p = (u_int8_t*)&i;
- if (i <= CMP_INT_2BYTE_MAX) {
- i -= CMP_INT_1BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = p[6] | CMP_INT_2BYTE_VAL;
- buf[1] = p[7];
- } else {
- buf[0] = p[1] | CMP_INT_2BYTE_VAL;
- buf[1] = p[0];
- }
- return 2;
- } else if (i <= CMP_INT_3BYTE_MAX) {
- i -= CMP_INT_2BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = p[5] | CMP_INT_3BYTE_VAL;
- buf[1] = p[6];
- buf[2] = p[7];
- } else {
- buf[0] = p[2] | CMP_INT_3BYTE_VAL;
- buf[1] = p[1];
- buf[2] = p[0];
- }
- return 3;
- } else if (i <= CMP_INT_4BYTE_MAX) {
- i -= CMP_INT_3BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = p[4] | CMP_INT_4BYTE_VAL;
- buf[1] = p[5];
- buf[2] = p[6];
- buf[3] = p[7];
- } else {
- buf[0] = p[3] | CMP_INT_4BYTE_VAL;
- buf[1] = p[2];
- buf[2] = p[1];
- buf[3] = p[0];
- }
- return 4;
- } else if (i <= CMP_INT_5BYTE_MAX) {
- i -= CMP_INT_4BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = p[3] | CMP_INT_5BYTE_VAL;
- buf[1] = p[4];
- buf[2] = p[5];
- buf[3] = p[6];
- buf[4] = p[7];
- } else {
- buf[0] = p[4] | CMP_INT_5BYTE_VAL;
- buf[1] = p[3];
- buf[2] = p[2];
- buf[3] = p[1];
- buf[4] = p[0];
- }
- return 5;
- } else if (i <= CMP_INT_6BYTE_MAX) {
- i -= CMP_INT_5BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = CMP_INT_6BYTE_VAL;
- buf[1] = p[3];
- buf[2] = p[4];
- buf[3] = p[5];
- buf[4] = p[6];
- buf[5] = p[7];
- } else {
- buf[0] = CMP_INT_6BYTE_VAL;
- buf[1] = p[4];
- buf[2] = p[3];
- buf[3] = p[2];
- buf[4] = p[1];
- buf[5] = p[0];
- }
- return 6;
- } else if (i <= CMP_INT_7BYTE_MAX) {
- i -= CMP_INT_6BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = CMP_INT_7BYTE_VAL;
- buf[1] = p[2];
- buf[2] = p[3];
- buf[3] = p[4];
- buf[4] = p[5];
- buf[5] = p[6];
- buf[6] = p[7];
- } else {
- buf[0] = CMP_INT_7BYTE_VAL;
- buf[1] = p[5];
- buf[2] = p[4];
- buf[3] = p[3];
- buf[4] = p[2];
- buf[5] = p[1];
- buf[6] = p[0];
- }
- return 7;
- } else if (i <= CMP_INT_8BYTE_MAX) {
- i -= CMP_INT_7BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = CMP_INT_8BYTE_VAL;
- buf[1] = p[1];
- buf[2] = p[2];
- buf[3] = p[3];
- buf[4] = p[4];
- buf[5] = p[5];
- buf[6] = p[6];
- buf[7] = p[7];
- } else {
- buf[0] = CMP_INT_8BYTE_VAL;
- buf[1] = p[6];
- buf[2] = p[5];
- buf[3] = p[4];
- buf[4] = p[3];
- buf[5] = p[2];
- buf[6] = p[1];
- buf[7] = p[0];
- }
- return 8;
- } else {
- i -= CMP_INT_8BYTE_MAX + 1;
- if (__db_isbigendian() != 0) {
- buf[0] = CMP_INT_9BYTE_VAL;
- buf[1] = p[0];
- buf[2] = p[1];
- buf[3] = p[2];
- buf[4] = p[3];
- buf[5] = p[4];
- buf[6] = p[5];
- buf[7] = p[6];
- buf[8] = p[7];
- } else {
- buf[0] = CMP_INT_9BYTE_VAL;
- buf[1] = p[7];
- buf[2] = p[6];
- buf[3] = p[5];
- buf[4] = p[4];
- buf[5] = p[3];
- buf[6] = p[2];
- buf[7] = p[1];
- buf[8] = p[0];
- }
- return 9;
- }
- }
-}
-
-/*
- * btreeDecompressInt --
- * Decompresses the compressed integer pointer to by buf into i,
- * returning the number of bytes read.
- *
- * An exact copy of __db_decompress_int
- */
-static int btreeDecompressInt(const u_int8_t *buf, u_int64_t *i)
-{
- int len;
- u_int64_t tmp;
- u_int8_t *p;
- u_int8_t c;
-
- tmp = 0;
- p = (u_int8_t*)&tmp;
- c = buf[0];
- len = __dbsql_marshaled_int_size[c];
-
- switch (len) {
- case 1:
- *i = c;
- return 1;
- case 2:
- if (__db_isbigendian() != 0) {
- p[6] = (c & CMP_INT_2BYTE_MASK);
- p[7] = buf[1];
- } else {
- p[1] = (c & CMP_INT_2BYTE_MASK);
- p[0] = buf[1];
- }
- tmp += CMP_INT_1BYTE_MAX + 1;
- break;
- case 3:
- if (__db_isbigendian() != 0) {
- p[5] = (c & CMP_INT_3BYTE_MASK);
- p[6] = buf[1];
- p[7] = buf[2];
- } else {
- p[2] = (c & CMP_INT_3BYTE_MASK);
- p[1] = buf[1];
- p[0] = buf[2];
- }
- tmp += CMP_INT_2BYTE_MAX + 1;
- break;
- case 4:
- if (__db_isbigendian() != 0) {
- p[4] = (c & CMP_INT_4BYTE_MASK);
- p[5] = buf[1];
- p[6] = buf[2];
- p[7] = buf[3];
- } else {
- p[3] = (c & CMP_INT_4BYTE_MASK);
- p[2] = buf[1];
- p[1] = buf[2];
- p[0] = buf[3];
- }
- tmp += CMP_INT_3BYTE_MAX + 1;
- break;
- case 5:
- if (__db_isbigendian() != 0) {
- p[3] = (c & CMP_INT_5BYTE_MASK);
- p[4] = buf[1];
- p[5] = buf[2];
- p[6] = buf[3];
- p[7] = buf[4];
- } else {
- p[4] = (c & CMP_INT_5BYTE_MASK);
- p[3] = buf[1];
- p[2] = buf[2];
- p[1] = buf[3];
- p[0] = buf[4];
- }
- tmp += CMP_INT_4BYTE_MAX + 1;
- break;
- case 6:
- if (__db_isbigendian() != 0) {
- p[3] = buf[1];
- p[4] = buf[2];
- p[5] = buf[3];
- p[6] = buf[4];
- p[7] = buf[5];
- } else {
- p[4] = buf[1];
- p[3] = buf[2];
- p[2] = buf[3];
- p[1] = buf[4];
- p[0] = buf[5];
- }
- tmp += CMP_INT_5BYTE_MAX + 1;
- break;
- case 7:
- if (__db_isbigendian() != 0) {
- p[2] = buf[1];
- p[3] = buf[2];
- p[4] = buf[3];
- p[5] = buf[4];
- p[6] = buf[5];
- p[7] = buf[6];
- } else {
- p[5] = buf[1];
- p[4] = buf[2];
- p[3] = buf[3];
- p[2] = buf[4];
- p[1] = buf[5];
- p[0] = buf[6];
- }
- tmp += CMP_INT_6BYTE_MAX + 1;
- break;
- case 8:
- if (__db_isbigendian() != 0) {
- p[1] = buf[1];
- p[2] = buf[2];
- p[3] = buf[3];
- p[4] = buf[4];
- p[5] = buf[5];
- p[6] = buf[6];
- p[7] = buf[7];
- } else {
- p[6] = buf[1];
- p[5] = buf[2];
- p[4] = buf[3];
- p[3] = buf[4];
- p[2] = buf[5];
- p[1] = buf[6];
- p[0] = buf[7];
- }
- tmp += CMP_INT_7BYTE_MAX + 1;
- break;
- case 9:
- if (__db_isbigendian() != 0) {
- p[0] = buf[1];
- p[1] = buf[2];
- p[2] = buf[3];
- p[3] = buf[4];
- p[4] = buf[5];
- p[5] = buf[6];
- p[6] = buf[7];
- p[7] = buf[8];
- } else {
- p[7] = buf[1];
- p[6] = buf[2];
- p[5] = buf[3];
- p[4] = buf[4];
- p[3] = buf[5];
- p[2] = buf[6];
- p[1] = buf[7];
- p[0] = buf[8];
- }
- tmp += CMP_INT_8BYTE_MAX + 1;
- break;
- default:
- break;
- }
-
- *i = tmp;
- return len;
+** set the mask of hint flags for cursor pCsr. Currently the only valid
+** values are 0 and BTREE_BULKLOAD.
+*/
+void sqlite3BtreeCursorHints(BtCursor *pCsr, unsigned int mask){
+ assert( mask==BTREE_BULKLOAD || mask==0 );
+ pCsr->hints = mask;
}
#ifdef BDBSQL_OMIT_LEAKCHECK
@@ -7370,7 +7419,12 @@ static int openPrivateEnvironment(Btree *p, int startFamily)
pDbEnv->set_mp_mtxcount(pDbEnv, pBt->mp_mutex_count);
pDbEnv->app_private = pBt;
pDbEnv->set_errcall(pDbEnv, btreeHandleDbError);
-
+ pDbEnv->set_event_notify(pDbEnv, btreeEventNotification);
+#ifndef BDBSQL_OMIT_LOG_REMOVE
+ if ((ret = pDbEnv->log_set_config(pDbEnv,
+ DB_LOG_AUTO_REMOVE, 1)) != 0)
+ goto err;
+#endif
ret = pDbEnv->open(pDbEnv, pBt->dir_name, pBt->env_oflags, 0);
/* There is no acceptable failure for this reopen. */
if (ret != 0)
@@ -7463,7 +7517,7 @@ static int btreeReopenPrivateEnvironment(Btree *p)
if (!pBt->lockfile.in_env_open)
sqlite3_mutex_enter(mutexOpen);
/* close open DB handles and clear related hash table */
- t_rc = btreeCleanupCachedHandles(p, CLEANUP_CLOSE);
+ t_rc = btreeHandleCacheCleanup(p, CLEANUP_CLOSE);
if (t_rc != SQLITE_OK && rc == SQLITE_OK)
rc = t_rc;
sqlite3HashClear(&pBt->db_cache);
@@ -7491,6 +7545,7 @@ static int btreeReopenPrivateEnvironment(Btree *p)
if (t_ret != DB_RUNRECOVERY) /* ignore runrecovery */
ret = t_ret;
}
+ pDbEnv = NULL;
/* hold onto openMutex until done with open */
if (ret != 0)
@@ -7513,11 +7568,6 @@ static int btreeReopenPrivateEnvironment(Btree *p)
#endif
if ((ret = pDbEnv->set_lg_max(pDbEnv, pBt->logFileSize)) != 0)
goto err;
-#ifndef BDBSQL_OMIT_LOG_REMOVE
- if ((ret = pDbEnv->log_set_config(pDbEnv,
- DB_LOG_AUTO_REMOVE, 1)) != 0)
- goto err;
-#endif
#ifdef BDBSQL_FILE_PER_TABLE
/* Reuse dirPathBuf. */
dirPathName = dirPathBuf;
diff --git a/lang/sql/adapter/btreeInt.h b/lang/sql/adapter/btreeInt.h
index cc439d39..d5c3b0b4 100644
--- a/lang/sql/adapter/btreeInt.h
+++ b/lang/sql/adapter/btreeInt.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <errno.h>
@@ -15,7 +15,8 @@
#endif
#define INTKEY_BUFSIZE (sizeof(i64) + 2) /* We add 2 bytes to negatives. */
-#define MULTI_BUFSIZE 8 * SQLITE_DEFAULT_PAGE_SIZE
+/* MULTI_BUFSIZE needs to be at least as large as the maximum page size. */
+#define MULTI_BUFSIZE SQLITE_MAX_PAGE_SIZE
#define DBNAME_SIZE 20
#define NUMMETA 16
#define NUM_DB_PRAGMA 30
@@ -26,6 +27,15 @@
#define BT_MAX_SEQ_NAME 128
/*
+ * If greater than 0, records larger than or equal to N bytes will be stored
+ * in an alternate format that improves the reading and updating speed of large
+ * records.
+ */
+#ifndef BDBSQL_LARGE_RECORD_OPTIMIZATION
+# define BDBSQL_LARGE_RECORD_OPTIMIZATION 0
+#endif
+
+/*
* The default size of the Berkeley DB environment's logging area, in
* bytes.
*/
@@ -94,7 +104,7 @@ typedef struct {
} CACHED_DB;
typedef struct {
- int32_t cache;
+ u32 cache;
int64_t min_val;
int64_t max_val;
int64_t start_val;
@@ -158,6 +168,7 @@ typedef enum { DB_STORE_NAMED, DB_STORE_TMP, DB_STORE_INMEM } storage_mode_t;
typedef enum { TRANS_NONE, TRANS_READ, TRANS_WRITE } txn_mode_t;
typedef enum { LOCKMODE_NONE, LOCKMODE_READ, LOCKMODE_WRITE } lock_mode_t;
typedef enum { NO_LSN_RESET, LSN_RESET_FILE } lsn_reset_t;
+typedef enum { BDBSQL_REP_CLIENT, BDBSQL_REP_MASTER, BDBSQL_REP_UNKNOWN } rep_site_type_t;
/* Declarations for functions that are shared by adapter source files. */
int btreeBeginTransInternal(Btree *p, int wrflag);
@@ -239,6 +250,7 @@ struct BtShared {
char *orig_name;
char *err_file;
char *err_msg;
+ char *master_address; /* Address of the replication master. */
u_int8_t fileid[DB_FILE_ID_LEN];
char *encrypt_pwd;
lsn_reset_t lsn_reset;
@@ -281,6 +293,9 @@ struct BtShared {
u_int32_t logFileSize; /* In bytes */
u_int32_t database_existed; /* Did the database file exist on open. */
u_int32_t read_txn_flags; /* Flags passed to the read transaction. */
+ /* Records >= blob_threshold stored as blob files.*/
+ u_int32_t blob_threshold;
+ u8 blobs_enabled; /* Whether this database can support blobs. */
u8 autoVacuum; /* Is auto-vacuum enabled? */
u8 incrVacuum; /* Is incremental vacuum enabled? */
u8 resultsBuffer; /* Query results are stored in a in-memory buffer */
@@ -300,6 +315,8 @@ struct BtShared {
int repStarted; /* Replication is configured and started. */
int repForceRecover; /* Force recovery on next open environment. */
int single_process; /* If non-zero, keep all environment on the heap. */
+ rep_site_type_t repRole; /* Whether this site is a master, client, unknown. */
+ u_int32_t permFailures; /* Number of perm failures. */
};
struct BtCursor {
@@ -319,6 +336,7 @@ struct BtCursor {
DBT key, data, index;
i64 nKey;
u8 indexKeyBuf[CURSOR_BUFSIZE];
+ u8 hints;
DBT multiData;
void *multiGetPtr, *multiPutPtr;
void *threadID;
@@ -378,7 +396,12 @@ typedef enum {
LOG_VERBOSE, LOG_DEBUG, LOG_NORMAL, LOG_RELEASE, LOG_NONE
} loglevel_t;
-#define CURRENT_LOG_LEVEL LOG_RELEASE
+/*
+ * The Makefile can override this default; e.g., -DCURRENT_LOG_LEVEL=LOG_VERBOSE
+ */
+#ifndef CURRENT_LOG_LEVEL
+#define CURRENT_LOG_LEVEL LOG_RELEASE
+#endif
#ifdef NDEBUG
#define log_msg(...)
diff --git a/lang/sql/adapter/db_encrypt.c b/lang/sql/adapter/db_encrypt.c
index a28dad21..ebc640bd 100644
--- a/lang/sql/adapter/db_encrypt.c
+++ b/lang/sql/adapter/db_encrypt.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -26,10 +26,38 @@ int sqlite3_key(sqlite3 *db, const void *key, int nkey) {
return sqlite3CodecAttach(db, 0, key, nkey);
}
+int sqlite3_key_v2(
+ sqlite3 *db, const char *zDbName, const void *key, int nkey) {
+ int backend;
+ const char *dbname;
+
+ // NULL is an alias of the "main" database.
+ if (zDbName == NULL)
+ dbname = "main";
+ else
+ dbname = zDbName;
+
+ for(backend = 0; backend < db->nDb; backend++) {
+ if (db->aDb[backend].zName == NULL)
+ continue;
+
+ if (sqlite3StrICmp(db->aDb[backend].zName, dbname) == 0)
+ break;
+ }
+ if (backend == db->nDb)
+ return SQLITE_NOTFOUND;
+ return sqlite3CodecAttach(db, backend, key, nkey);
+}
+
int sqlite3_rekey(sqlite3 *db, const void *key, int nkey) {
return 0;
}
+int sqlite3_rekey_v2(
+ sqlite3 *db, const char *zDbName, const void *key, int nkey) {
+ return 0;
+}
+
void sqlite3_activate_see(const char *zPassPhrase) {
return;
}
diff --git a/lang/sql/adapter/db_pragma.c b/lang/sql/adapter/db_pragma.c
index 4c70afec..e7f8fa0e 100644
--- a/lang/sql/adapter/db_pragma.c
+++ b/lang/sql/adapter/db_pragma.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -11,7 +11,6 @@
#include "btreeInt.h"
extern void returnSingleInt(Parse *, const char *, i64);
-extern u8 getBoolean(const char *);
extern int __os_exists (ENV *, const char *, int *);
extern int __os_unlink (ENV *, const char *, int);
extern int __os_mkdir (ENV *, const char *, int);
@@ -23,6 +22,16 @@ extern int __env_ref_get (DB_ENV *, u_int32_t *);
static const char *PRAGMA_FILE = "pragma";
static const char *PRAGMA_VERSION = "1.0";
+static const char *ACK_POLICY_ALL = "all_sites";
+static const char *ACK_POLICY_ALL_AVAILABLE = "all_available";
+static const char *ACK_POLICY_NONE = "none";
+static const char *ACK_POLICY_ONE = "one";
+static const char *ACK_POLICY_QUORUM = "quorum";
+
+static const char *REP_SITE_MASTER = "MASTER";
+static const char *REP_SITE_CLIENT = "CLIENT";
+static const char *REP_SITE_UNKNOWN = "UNKNOWN";
+
static const u32 HDR_SIZE = 256;
static const u32 RECORD_HDR_SIZE = 8;
static const u32 VERSION_RECORD_SIZE = 12;
@@ -47,6 +56,57 @@ static const u32 DEFINED_PRAGMAS = 8;
#define dbExists (pDb->pBt->pBt->full_name != NULL && \
!__os_exists(NULL, pDb->pBt->pBt->full_name, NULL))
+/* Translates a text ack policy to its DB value. */
+static int textToAckPolicy(const char *policy)
+{
+ int len;
+ if (policy == NULL)
+ return (-1);
+
+ len = (int)strlen(policy);
+
+ if ((sqlite3StrNICmp(policy, ACK_POLICY_ALL, len)) == 0)
+ return (DB_REPMGR_ACKS_ALL);
+ else if ((sqlite3StrNICmp(policy, ACK_POLICY_ALL_AVAILABLE, len)) == 0)
+ return (DB_REPMGR_ACKS_ALL_AVAILABLE);
+ else if ((sqlite3StrNICmp(policy, ACK_POLICY_NONE, len)) == 0)
+ return (DB_REPMGR_ACKS_NONE);
+ else if ((sqlite3StrNICmp(policy, ACK_POLICY_ONE, len)) == 0)
+ return (DB_REPMGR_ACKS_ONE);
+ else if ((sqlite3StrNICmp(policy, ACK_POLICY_QUORUM, len)) == 0)
+ return (DB_REPMGR_ACKS_QUORUM);
+ else
+ return (-1);
+}
+
+/* Translates a DB value ack policy to text. */
+static const char *ackPolicyToText(int policy)
+{
+ if (policy == DB_REPMGR_ACKS_ALL)
+ return (ACK_POLICY_ALL);
+ else if (policy == DB_REPMGR_ACKS_ALL_AVAILABLE)
+ return (ACK_POLICY_ALL_AVAILABLE);
+ else if (policy == DB_REPMGR_ACKS_NONE)
+ return (ACK_POLICY_NONE);
+ else if (policy == DB_REPMGR_ACKS_ONE)
+ return (ACK_POLICY_ONE);
+ else if (policy == DB_REPMGR_ACKS_QUORUM)
+ return (ACK_POLICY_QUORUM);
+ else
+ return (NULL);
+}
+
+/* Translates a replication site type to text. */
+static const char *repSiteTypeToText(rep_site_type_t type)
+{
+ if (type == BDBSQL_REP_MASTER)
+ return (REP_SITE_MASTER);
+ else if (type == BDBSQL_REP_CLIENT)
+ return (REP_SITE_CLIENT);
+ else
+ return (REP_SITE_UNKNOWN);
+}
+
static u8 envIsClosed(Parse *pParse, Btree *p, const char *pragmaName)
{
int rc;
@@ -82,6 +142,12 @@ int bdbsqlPragmaMultiversion(Parse *pParse, Btree *p, u8 on)
return 1;
pBt = p->pBt;
+ if (pBt->blobs_enabled && on) {
+ sqlite3ErrorMsg(pParse,
+ "Cannot enable both multiversion and large record optimization.");
+ return 1;
+ }
+
/* Do not want other processes opening the environment */
mutexOpen = sqlite3MutexAlloc(OPEN_MUTEX(pBt->dbStorage));
sqlite3_mutex_enter(mutexOpen);
@@ -250,15 +316,29 @@ static int bdbsqlPragmaStartReplication(Parse *pParse, Db *pDb)
}
if (dbExists) {
+ if (!pBt->pBt->env_opened) {
+ if ((rc = btreeOpenEnvironment(pBt, 1)) != SQLITE_OK)
+ sqlite3ErrorMsg(pParse, "Could not start "
+ "replication on an existing database");
+ goto done;
+ }
+
/*
- * Turning on replication requires recovery on the underlying
+ * Opening the environment started repmgr if it was
+ * configured for it, so we are done here.
+ */
+ if (supportsReplication(pBt))
+ goto done;
+ /*
+ * Turning on replication on an existing environment not
+ * configured for replication requires recovery on the
* BDB environment, which requires single-threaded access.
* Make sure there are no other processes or threads accessing
* it. This is not foolproof, because there is a small chance
* that another process or thread could slip in there between
* this call and the recovery, but it should cover most cases.
*/
- if (hasDatabaseConnections(pBt)) {
+ if (hasDatabaseConnections(pBt) || pBt->pBt->nRef > 1) {
sqlite3ErrorMsg(pParse, "Close all database "
"connections before turning on replication");
goto done;
@@ -275,7 +355,7 @@ static int bdbsqlPragmaStartReplication(Parse *pParse, Db *pDb)
* the underlying BDB environment with replication always
* performs a recovery.
*/
- pBt->pBt->repStartMaster = 1;
+ pBt->pBt->repForceRecover = 1;
if ((rc = btreeReopenEnvironment(pBt, 0)) != SQLITE_OK)
sqlite3ErrorMsg(pParse, "Could not "
"start replication on an existing database");
@@ -310,6 +390,7 @@ done:
static int bdbsqlPragmaStopReplication(Parse *pParse, Db *pDb)
{
Btree *pBt;
+ char *old_addr = NULL;
int rc = SQLITE_OK;
pBt = pDb->pBt;
@@ -341,6 +422,13 @@ static int bdbsqlPragmaStopReplication(Parse *pParse, Db *pDb)
*/
pBt->pBt->repForceRecover = 1;
rc = btreeReopenEnvironment(pBt, 1);
+ sqlite3_mutex_enter(pBt->pBt->mutex);
+ pBt->pBt->repRole = BDBSQL_REP_UNKNOWN;
+ old_addr = pBt->pBt->master_address;
+ pBt->pBt->master_address = NULL;
+ sqlite3_mutex_leave(pBt->pBt->mutex);
+ if (old_addr)
+ sqlite3_free(old_addr);
done:
return rc;
@@ -383,14 +471,14 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
}
} else if (sqlite3StrNICmp(zLeft, "txn_bulk", 8) == 0) {
if (zRight)
- pBt->txn_bulk = getBoolean(zRight);
+ pBt->txn_bulk = sqlite3GetBoolean(zRight, 0);
returnSingleInt(pParse, "txn_bulk", (i64)pBt->txn_bulk);
parsed = 1;
/* Enables MVCC and transactions snapshots. */
} else if (sqlite3StrNICmp(zLeft, "multiversion", 12) == 0) {
if (zRight)
bdbsqlPragmaMultiversion(pParse, pDb->pBt,
- getBoolean(zRight));
+ sqlite3GetBoolean(zRight, 0));
returnSingleInt(pParse, "multiversion",
(i64)((pDb->pBt->pBt->env_oflags & DB_MULTIVERSION)?
@@ -401,7 +489,7 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
*/
} else if (sqlite3StrNICmp(zLeft, "snapshot_isolation", 18) == 0) {
if (zRight) {
- if (getBoolean(zRight)) {
+ if (sqlite3GetBoolean(zRight, 0)) {
if (pDb->pBt->pBt->env_oflags &
DB_MULTIVERSION) {
pDb->pBt->pBt->read_txn_flags
@@ -530,7 +618,7 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
setValue[0] = '\0';
startedRep = stoppedRep = turningOn = 0;
if (zRight) {
- turningOn = (getBoolean(zRight) == 1);
+ turningOn = (sqlite3GetBoolean(zRight, 0) == 1);
strcpy(setValue, turningOn ? "1" : "0");
rc = setPersistentPragma(pDb->pBt, zLeft,
setValue, pParse);
@@ -596,7 +684,7 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
outValue[0] = '\0';
if (zRight)
pDb->pBt->pBt->repStartMaster =
- getBoolean(zRight) == 1 ? 1 : 0;
+ sqlite3GetBoolean(zRight, 0) == 1 ? 1 : 0;
strcpy(outValue,
pDb->pBt->pBt->repStartMaster == 1 ? "1" : "0");
sqlite3VdbeSetNumCols(pParse->pVdbe, 1);
@@ -733,6 +821,40 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
pDb->pBt->txn_priority);
parsed = 1;
/*
+ * PRAGMA bdbsql_log_buffer; -- DB_ENV->get_lg_bsize
+ * PRAGMA bdbsql_log_buffer = N; -- DB_ENV->set_lg_bsize
+ * N provides the size of the log buffer size in bytes.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "bdbsql_log_buffer", 17) == 0) {
+ int iLimit = -2;
+ u_int32_t val;
+
+ if (zRight &&
+ envIsClosed(pParse, pDb->pBt, "bdbsql_log_buffer")) {
+ if (!sqlite3GetInt32(zRight, &iLimit) ||
+ iLimit < 0)
+ sqlite3ErrorMsg(pParse,
+ "Invalid value bdbsql_log_buffer %s",
+ zRight);
+ else {
+ val = iLimit;
+ if ((ret =
+ pDb->pBt->pBt->dbenv->set_lg_bsize(
+ pDb->pBt->pBt->dbenv, val)) != 0) {
+ sqlite3ErrorMsg(pParse,
+ "Failed to set log buffer size "
+ "error: %d.", ret);
+ }
+ }
+ } else if (zRight == NULL) {
+ /* Get existing value */
+ pDb->pBt->pBt->dbenv->get_lg_bsize(
+ pDb->pBt->pBt->dbenv, &val);
+
+ returnSingleInt(pParse, "bdbsql_log_buffer", val);
+ }
+ parsed = 1;
+ /*
* PRAGMA bdbsql_single_process = boolean;
* Turn on/off omit sharing.
*/
@@ -743,7 +865,7 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
if (zRight &&
envIsClosed(pParse, pDb->pBt, "bdbsql_single_process")) {
- new_value = getBoolean(zRight);
+ new_value = sqlite3GetBoolean(zRight, 0);
if (new_value != pDb->pBt->pBt->single_process)
is_changed = 1;
}
@@ -868,7 +990,7 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
if (zRight &&
envIsClosed(pParse, pDb->pBt, "bdbsql_shared_resources")) {
- if (pDb->pBt->pBt->need_open) {
+ if (pDb->pBt->pBt->database_existed) {
/*
* The DBENV->set_memory_max() method must be
* called prior to opening/creating the database
@@ -918,7 +1040,7 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
if (zRight &&
envIsClosed(pParse, pDb->pBt, "bdbsql_lock_tablesize")) {
- if (pDb->pBt->pBt->need_open) {
+ if (pDb->pBt->pBt->database_existed) {
/*
* The DB_ENV->set_lk_tablesize() method must be
* called prior to opening/creating the database
@@ -949,6 +1071,244 @@ int bdbsqlPragma(Parse *pParse, char *zLeft, char *zRight, int iDb)
returnSingleInt(pParse, "bdbsql_lock_tablesize", val);
parsed = 1;
+ /*
+ * PRAGMA large_record_opt; -- Gets the blob threshold
+ * PRAGMA large_record_opt = N; -- Sets the blob threshold of all
+ * tables opened in the database. The blob threshold is set to
+ * N. A value of 0 disables blobs.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "large_record_opt", 16) == 0) {
+ int iLimit = -2;
+ u_int32_t val;
+
+ if (zRight) {
+ if (!sqlite3GetInt32(zRight, &iLimit) || iLimit < 0) {
+ sqlite3ErrorMsg(pParse,
+ "Invalid value large_record_opt %s",
+ zRight);
+ } else {
+ /*
+ * SQL only supports records up to 1GB in
+ * size, so reject page counts that result
+ * in a blob threshold that will always be
+ * larger than 1 GB, to prevent overflow.
+ */
+ if (iLimit > GIGABYTE) {
+ sqlite3ErrorMsg(pParse,
+ "large_record_opt must be less than or equal to 1 gigabyte",
+ zRight);
+ } else {
+ pDb->pBt->pBt->blob_threshold
+ = iLimit;
+ if (iLimit > 0) {
+ pDb->pBt->pBt->blobs_enabled
+ = 1;
+ }
+ }
+ }
+ }
+
+ val = pDb->pBt->pBt->blob_threshold;
+ returnSingleInt(pParse, "large_record_opt", val);
+ parsed = 1;
+ /*
+ * PRAGMA replication_ack_policy; -- DB_ENV->repmgr_get_ack_policy
+ * PRAGMA replication_ack_policy =
+ * [all_sites|all_available|none|one|quorum];
+ * -- DB_ENV->repmgr_set_ack_policy
+ * Sets the policy on how many clients must acknowledge a transaction
+ * commit message before the master will consider it durable.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "replication_ack_policy", 22) == 0) {
+ int val = -1;
+ const char *policy;
+
+ if (zRight) {
+ if ((val = textToAckPolicy(zRight)) == -1)
+ sqlite3ErrorMsg(pParse,
+ "Invalid value replication_ack_policy %s",
+ zRight);
+ else {
+ if ((ret =
+ pDb->pBt->pBt->dbenv->repmgr_set_ack_policy(
+ pDb->pBt->pBt->dbenv, val)) != 0) {
+ sqlite3ErrorMsg(pParse,
+ "Failed to set "
+ "replication_ack_policy. "
+ "Error: %d.", ret);
+ }
+ }
+ }
+
+ /* Get existing value */
+ pDb->pBt->pBt->dbenv->repmgr_get_ack_policy(
+ pDb->pBt->pBt->dbenv, &val);
+
+ policy = ackPolicyToText(val);
+ sqlite3VdbeSetNumCols(pParse->pVdbe, 1);
+ sqlite3VdbeSetColName(pParse->pVdbe, 0, COLNAME_NAME,
+ zLeft, SQLITE_STATIC);
+ if (policy != NULL)
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_String8, 0, 1, 0,
+ policy, 0);
+ else
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_String8, 0, 1, 0,
+ "No policy", 0);
+ sqlite3VdbeAddOp2(pParse->pVdbe, OP_ResultRow, 1, 1);
+ parsed = 1;
+ /*
+ * PRAGMA replication_ack_timeout; -- DB_ENV->rep_get_timeout
+ * PRAGMA replication_ack_timeout = N; -- DB_ENV->rep_set_timeout
+ * N provides the greater than -1 timeout on acks from clients
+ * when the master sends a transaction commit message.
+ */
+ } else if
+ (sqlite3StrNICmp(zLeft, "replication_ack_timeout", 23) == 0) {
+ int iLimit = -2;
+ db_timeout_t val;
+
+ if (zRight) {
+ if (!sqlite3GetInt32(zRight, &iLimit) || iLimit < 0)
+ sqlite3ErrorMsg(pParse,
+ "Invalid value replication_ack_timeout %s",
+ zRight);
+ else {
+ val = iLimit;
+ if ((ret =
+ pDb->pBt->pBt->dbenv->rep_set_timeout(
+ pDb->pBt->pBt->dbenv, DB_REP_ACK_TIMEOUT,
+ val)) != 0) {
+ sqlite3ErrorMsg(pParse,
+ "Failed to set "
+ "replication_ack_timeout. "
+ "Error: %d.", ret);
+ }
+ }
+ }
+
+ /* Get existing value */
+ pDb->pBt->pBt->dbenv->rep_get_timeout(
+ pDb->pBt->pBt->dbenv, DB_REP_ACK_TIMEOUT, &val);
+
+ returnSingleInt(pParse, "replication_ack_timeout", val);
+ parsed = 1;
+ /*
+ * PRAGMA replication_priority; -- DB_ENV->rep_get_priority
+ * PRAGMA replication_priority = N; -- DB_ENV->rep_set_priority
+ * N provides the 1 or greater priority of the replication site,
+ * which is used to decide what site is elected master.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "replication_priority", 20) == 0) {
+ int iLimit = -2;
+ u_int32_t val;
+
+ if (zRight) {
+ if (!sqlite3GetInt32(zRight, &iLimit) || iLimit < 1)
+ sqlite3ErrorMsg(pParse,
+ "Invalid value replication_priority %s",
+ zRight);
+ else {
+ val = iLimit;
+ if ((ret =
+ pDb->pBt->pBt->dbenv->rep_set_priority(
+ pDb->pBt->pBt->dbenv, val)) != 0) {
+ sqlite3ErrorMsg(pParse,
+ "Failed to set replication "
+ "priority. Error: %d.", ret);
+ }
+ }
+ }
+
+ /* Get existing value */
+ pDb->pBt->pBt->dbenv->rep_get_priority(
+ pDb->pBt->pBt->dbenv, &val);
+
+ returnSingleInt(pParse, "replication_priority", val);
+ parsed = 1;
+ /*
+ * PRAGMA replication_num_sites; -- DB_ENV->repmgr_site_list
+ * Returns the number of sites in the replication group.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "replication_num_sites", 21) == 0) {
+ u_int32_t val = -1;
+ DB_REPMGR_SITE *sites = NULL;
+
+ /* Get existing value */
+ pDb->pBt->pBt->dbenv->repmgr_site_list(
+ pDb->pBt->pBt->dbenv, &val, &sites);
+
+ /*
+ * repmgr_site_list only gets the list of remote sites,
+ * so add one for the local site if replication has been
+ * started.
+ */
+ if (pDb->pBt->pBt->repStarted)
+ val++;
+
+ if (sites)
+ sqlite3_free(sites);
+
+ returnSingleInt(pParse, "replication_num_sites", val);
+ parsed = 1;
+ /*
+ * PRAGMA replication_perm_failed; -- DB_REP_PERM_FAILED
+ * Returns the number of times since the last time this pragma was
+ * called the master was unable to get enough acknowledgments from
+ * clients when committing a transaction.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "replication_perm_failed", 23) == 0) {
+ u_int32_t val = 0;
+
+ sqlite3_mutex_enter(pDb->pBt->pBt->mutex);
+ val = pDb->pBt->pBt->permFailures;
+ pDb->pBt->pBt->permFailures = 0;
+ sqlite3_mutex_leave(pDb->pBt->pBt->mutex);
+
+ returnSingleInt(pParse, "replication_perm_failed", val);
+ parsed = 1;
+ /*
+ * PRAGMA replication_site_status;
+ * Returns MASTER if the site is a replication master, CLIENT if the
+ * site is a replication client, and UNKNOWN if replication is not
+ * enabled, or the site status is still being decided (such as during
+ * an election).
+ */
+ } else if (sqlite3StrNICmp(zLeft, "replication_site_status", 23) == 0) {
+ rep_site_type_t val;
+ const char *type;
+
+ sqlite3_mutex_enter(pDb->pBt->pBt->mutex);
+ val = pDb->pBt->pBt->repRole;
+ sqlite3_mutex_leave(pDb->pBt->pBt->mutex);
+
+ type = repSiteTypeToText(val);
+ sqlite3VdbeSetNumCols(pParse->pVdbe, 1);
+ sqlite3VdbeSetColName(pParse->pVdbe, 0, COLNAME_NAME,
+ zLeft, SQLITE_STATIC);
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_String8, 0, 1, 0, type, 0);
+ sqlite3VdbeAddOp2(pParse->pVdbe, OP_ResultRow, 1, 1);
+ parsed = 1;
+ /*
+ * PRAGMA replication_get_master;
+ * Returns the host:port of the master, or NULL if replication has
+ * not started, or there is none.
+ */
+ } else if (sqlite3StrNICmp(zLeft, "replication_get_master", 23) == 0) {
+ const char *address;
+
+ sqlite3_mutex_enter(pDb->pBt->pBt->mutex);
+ address = pDb->pBt->pBt->master_address;
+ sqlite3_mutex_leave(pDb->pBt->pBt->mutex);
+
+ sqlite3VdbeSetNumCols(pParse->pVdbe, 1);
+ sqlite3VdbeSetColName(pParse->pVdbe, 0, COLNAME_NAME,
+ zLeft, SQLITE_STATIC);
+ if (address)
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_String8, 0, 1, 0, address, 0);
+ else
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_String8, 0, 1, 0, "NULL", 0);
+ sqlite3VdbeAddOp2(pParse->pVdbe, OP_ResultRow, 1, 1);
+ parsed = 1;
}
/* Return semantics to match strcmp. */
diff --git a/lang/sql/adapter/db_sequence.c b/lang/sql/adapter/db_sequence.c
index 4c4d1265..ae9281c1 100644
--- a/lang/sql/adapter/db_sequence.c
+++ b/lang/sql/adapter/db_sequence.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
@@ -54,8 +54,10 @@
#define DB_SEQ_NEXT 0x0000
#define DB_SEQ_CURRENT 0x0001
-#define MSG_CREATE_FAIL "Sequence create failed: "
-#define MSG_MALLOC_FAIL "Malloc failed during sequence operation."
+#define MSG_CREATE_FAIL "Sequence create failed: "
+#define MSG_MALLOC_FAIL "Malloc failed during sequence operation."
+#define MSG_INTMPDB_FAIL "Sequences do not support in-memory or" \
+ " temporary databases."
#define CACHE_ENTRY_VALID(_e) \
(_e != NULL && \
@@ -78,6 +80,7 @@ static int btreeSeqPutCookie(
sqlite3_context *context, Btree *p, SEQ_COOKIE *cookie, u_int32_t flags);
static int btreeSeqRemoveHandle(
sqlite3_context *context, Btree *p, CACHED_DB *cache_entry);
+static void btreeSeqSetSeqName(SEQ_COOKIE *cookie, const char *name);
static int btreeSeqStartTransaction(
sqlite3_context *context, Btree *p, int is_write);
@@ -96,6 +99,7 @@ static void db_seq_create_func(
"create_sequence()");
return;
}
+ log_msg(LOG_NORMAL, "db_seq_create_func(%s)", sqlite3_value_text(argv[0]));
/*
* Ensure that the sequence name is OK with our static buffer
* size. We need extra characters for "seq_" and "_db".
@@ -126,9 +130,8 @@ static void db_seq_create_func(
memset(&cookie, 0, sizeof(SEQ_COOKIE));
cookie.incr = 1;
- sqlite3_snprintf(BT_MAX_SEQ_NAME, cookie.name, "seq_%s",
- sqlite3_value_text(argv[0]));
- cookie.name_len = (int)strlen(cookie.name);
+ btreeSeqSetSeqName(&cookie, sqlite3_value_text(argv[0]));
+ log_msg(LOG_NORMAL, "db_seq_drop_func(%s)", cookie.name);
if (pBt->dbStorage == DB_STORE_NAMED && btreeSeqExists(context, p,
cookie.name) == 1) {
btreeSeqError(context, SQLITE_ERROR,
@@ -160,7 +163,7 @@ static void db_seq_create_func(
"%sInvalid parameter.", MSG_CREATE_FAIL);
goto err;
}
- cookie.cache = sqlite3_value_int(argv[i]);
+ cookie.cache = (u32)sqlite3_value_int(argv[i]);
} else if (strncmp((char *)sqlite3_value_text(argv[i]),
"incr", 4) == 0) {
if (i == argc ||
@@ -255,7 +258,7 @@ static void db_seq_create_func(
if ((rc = btreeSeqGetHandle(context, p, SEQ_HANDLE_CREATE, &cookie)) !=
SQLITE_OK) {
- if (rc != SQLITE_ERROR)
+ if (rc != DB_NOINTMP)
btreeSeqError(context, dberr2sqlite(rc, NULL),
"Failed to create sequence %s. Error: %s",
(const char *)sqlite3_value_text(argv[0]),
@@ -290,9 +293,8 @@ static void db_seq_drop_func(
return;
}
- sqlite3_snprintf(BT_MAX_SEQ_NAME, cookie.name, "seq_%s",
- sqlite3_value_text(argv[0]));
- cookie.name_len = (int)strlen(cookie.name);
+ btreeSeqSetSeqName(&cookie, sqlite3_value_text(argv[0]));
+ log_msg(LOG_NORMAL, "db_seq_drop_func(%s)", cookie.name);
rc = btreeSeqGetHandle(context, p, SEQ_HANDLE_OPEN, &cookie);
if (rc != SQLITE_OK) {
@@ -300,7 +302,7 @@ static void db_seq_drop_func(
if (rc == DB_NOTFOUND)
btreeSeqError(context, dberr2sqlite(rc, NULL),
"no such sequence: %s", cookie.name + 4);
- else if (rc != SQLITE_ERROR)
+ else if (rc != DB_NOINTMP)
btreeSeqError(context, dberr2sqlite(rc, NULL),
"Fail to drop sequence %s. Error: %s",
cookie.name + 4, db_strerror(rc));
@@ -322,18 +324,19 @@ static void db_seq_drop_func(
goto done;
}
+ /*
+ * Drop the mutex - it's not valid to begin a transaction while
+ * holding the mutex. We can drop it safely because it's use is to
+ * protect handle cache changes.
+ */
sqlite3_mutex_leave(pBt->mutex);
+
if ((rc = btreeSeqStartTransaction(context, p, 1)) != SQLITE_OK) {
btreeSeqError(context, SQLITE_ERROR,
"Could not begin transaction for drop.");
return;
}
- /*
- * Drop the mutex - it's not valid to begin a transaction while
- * holding the mutex. We can drop it safely because it's use is to
- * protect handle cache changes.
- */
sqlite3_mutex_enter(pBt->mutex);
btreeSeqRemoveHandle(context, p, cache_entry);
done: sqlite3_mutex_leave(pBt->mutex);
@@ -369,6 +372,8 @@ static void btreeSeqGetVal(
p = db->aDb[0].pBt;
pBt = p->pBt;
memset(&cookie, 0, sizeof(cookie));
+ log_msg(LOG_NORMAL, "btreeSeqGetVal(%s, %s)", name,
+ mode == DB_SEQ_NEXT ? "next" : "current");
if (!p->connected &&
(rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK) {
@@ -378,15 +383,14 @@ static void btreeSeqGetVal(
return;
}
- sqlite3_snprintf(BT_MAX_SEQ_NAME, cookie.name, "seq_%s", name);
- cookie.name_len = (int)strlen(cookie.name);
+ btreeSeqSetSeqName(&cookie, name);
rc = btreeSeqGetHandle(context, p, SEQ_HANDLE_OPEN, &cookie);
if (rc != SQLITE_OK) {
if (rc == DB_NOTFOUND)
btreeSeqError(context, dberr2sqlite(rc, NULL),
"no such sequence: %s", name);
- else if (rc != SQLITE_ERROR)
+ else if (rc != DB_NOINTMP)
btreeSeqError(context, dberr2sqlite(rc, NULL),
"Fail to get next value from seq %s. Error: %s",
name, db_strerror(rc));
@@ -448,7 +452,7 @@ static void btreeSeqGetVal(
}
/* Cached gets can't be transactionally protected. */
if ((ret = cookie.handle->get(cookie.handle, NULL,
- cookie.incr, &val, 0)) != 0) {
+ (u_int32_t)cookie.incr, &val, 0)) != 0) {
if (ret == EINVAL)
btreeSeqError(context, SQLITE_ERROR,
"Sequence value out of bounds.");
@@ -509,10 +513,20 @@ static int btreeSeqGetHandle(sqlite3_context *context, Btree *p,
/* Does not support in-memory db and temp db for now */
if (pBt->dbStorage != DB_STORE_NAMED) {
- btreeSeqError(context, SQLITE_ERROR,
- "Sequences do not support in-memory or "
- "temporary databases.");
- return (SQLITE_ERROR);
+ btreeSeqError(context, SQLITE_ERROR, MSG_INTMPDB_FAIL);
+ return (DB_NOINTMP);
+ }
+
+ /* Tell sqlite3VdbeHalt() that this step has a transaction to end. */
+ if (p->db->pVdbe->bIsReader == 0) {
+ p->db->pVdbe->bIsReader = 1;
+ p->db->nVdbeRead++;
+ }
+
+ /* Tell sqlite3VdbeHalt() that this step has a transaction to end. */
+ if (p->db->pVdbe->bIsReader == 0) {
+ p->db->pVdbe->bIsReader = 1;
+ p->db->nVdbeRead++;
}
/*
@@ -863,6 +877,41 @@ static int btreeSeqPutCookie(
}
/*
+ * According to the documentation the sequence name should be converted
+ * to lowercase unless it is surrounded by quotation marks.
+ * This function assumes that the sequence name fits in the buffer to which
+ * cookie.name points.
+ */
+static void btreeSeqSetSeqName(SEQ_COOKIE *cookie, const char *name)
+{
+ char lowercase[BT_MAX_SEQ_NAME];
+ int i;
+ size_t len;
+
+ if (name == NULL) {
+ strcpy(cookie->name, "seq_");
+ cookie->name_len = 4;
+ return;
+ }
+
+ len = strlen(name);
+ if (name[0] == '"' && name[len-1] == '"')
+ sqlite3_snprintf(
+ BT_MAX_SEQ_NAME, cookie->name, "seq_%s", name);
+ else {
+ memset(lowercase, 0, BT_MAX_SEQ_NAME);
+ for (i = 0; i < len; i++) {
+ lowercase[i] = sqlite3UpperToLower[*(name + i)];
+ }
+ sqlite3_snprintf(
+ BT_MAX_SEQ_NAME, cookie->name, "seq_%s", lowercase);
+ }
+ cookie->name_len = (int)strlen(cookie->name);
+
+ return;
+}
+
+/*
* SQLite manages explicit transactions by setting a flag when a BEGIN; is
* issued, then starting an actual transaction in the btree layer when the
* first operation happens (a read txn if it's a read op, a write txn if write)
@@ -873,7 +922,7 @@ static int btreeSeqPutCookie(
* write operations, and thus we need a valid statement_txn.
* - In an explicit transaction, and the first statement. Start a txn and a
statement txn.
- * - In an explicit transaction and not the first statemetn. Start a statement
+ * - In an explicit transaction and not the first statement. Start a statement
* transaction.
*
* The SQLite vdbe will take care of closing the statement transaction for us,
@@ -883,7 +932,7 @@ static int btreeSeqPutCookie(
* that case (and this function should not be called).
*
* It's safe to call this method multiple times since both
- * sqlite3BtreeBeginTrans and sqlite3BtreeBeginStmt are no-ops on subsequent
+ * btreeBeginTransInternal and sqlite3BtreeBeginStmt are no-ops on subsequent
* calls.
*/
static int btreeSeqStartTransaction(
diff --git a/lang/sql/adapter/db_shell.c b/lang/sql/adapter/db_shell.c
index b2b23e28..ba57320d 100644
--- a/lang/sql/adapter/db_shell.c
+++ b/lang/sql/adapter/db_shell.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/lang/sql/adapter/pager.c b/lang/sql/adapter/pager.c
index 769ce98e..d909e53c 100644
--- a/lang/sql/adapter/pager.c
+++ b/lang/sql/adapter/pager.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include "sqliteInt.h"
@@ -60,10 +60,27 @@ sqlite3_file *sqlite3PagerFile(Pager *pPager) {
/*
** Return the full pathname of the database file.
*/
-const char *sqlite3PagerFilename(Pager *pPager) {
+const char *sqlite3PagerFilename(Pager *pPager, int nullIfMemDb) {
Btree *p = (Btree *)pPager;
BtShared *pBt = p->pBt;
- return (pBt->orig_name);
+ return ((nullIfMemDb && (pBt->dbStorage == DB_STORE_TMP
+ || pBt->dbStorage == DB_STORE_INMEM)) ? "" : pBt->orig_name);
+}
+
+/*
+** Return TRUE if the database file is opened read-only. Return FALSE
+** if the database is (in theory) writable.
+*/
+u8 sqlite3PagerIsreadonly(Pager *pPager){
+ Btree *p = (Btree *)pPager;
+ return (p->readonly ? 1 : 0);
+}
+
+/*
+** Free as much memory as possible from the pager.
+*/
+void sqlite3PagerShrink(Pager *pPager){
+ /***************IMPLEMENT***************/
}
/*
@@ -201,6 +218,39 @@ int sqlite3PagerWalSupported(Pager *pPager) {
#endif /* SQLITE_OMIT_WAL */
+/*
+** Parameter eStat must be either SQLITE_DBSTATUS_CACHE_HIT or
+** SQLITE_DBSTATUS_CACHE_MISS. Before returning, *pnVal is incremented by the
+** current cache hit or miss count, according to the value of eStat. If the
+** reset parameter is non-zero, the cache hit or miss count is zeroed before
+** returning.
+*/
+void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){
+ Btree *p;
+ BtShared *pBt;
+ DB_MPOOL_STAT *mpstat;
+ uintmax_t stat;
+
+ p = (Btree *)pPager;
+ pBt = p->pBt;
+
+ assert(eStat == SQLITE_DBSTATUS_CACHE_HIT ||
+ eStat==SQLITE_DBSTATUS_CACHE_MISS);
+ /*
+ * TODO: reset differs from SQLite here. We clear all mpool stats, they
+ * clear only the particular field queried.
+ */
+ if (pBt->dbenv->memp_stat(
+ pBt->dbenv, &mpstat, NULL, reset ? DB_STAT_CLEAR : 0) != 0)
+ return;
+ if (eStat == SQLITE_DBSTATUS_CACHE_HIT)
+ stat = mpstat->st_cache_hit;
+ else
+ stat = mpstat->st_cache_miss;
+ *pnVal += (int)stat;
+ sqlite3_free(mpstat);
+}
+
#ifdef SQLITE_TEST
int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */
int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */
diff --git a/lang/sql/adapter/pager.h b/lang/sql/adapter/pager.h
index fe5c0568..b0504a8f 100644
--- a/lang/sql/adapter/pager.h
+++ b/lang/sql/adapter/pager.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <db.h>
@@ -38,7 +38,24 @@ typedef db_pgno_t Pgno;
#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */
#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */
#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */
-
+ /*
+ ** Flags that make up the mask passed to sqlite3PagerAcquire().
+ */
+ #define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */
+ #define PAGER_GET_READONLY 0x02 /* Read-only page is acceptable */
+
+/*
+** Flags for sqlite3PagerSetFlags()
+*/
+#define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */
+#define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */
+#define PAGER_SYNCHRONOUS_FULL 0x03 /* PRAGMA synchronous=FULL */
+#define PAGER_SYNCHRONOUS_MASK 0x03 /* Mask for three values above */
+#define PAGER_FULLFSYNC 0x04 /* PRAGMA fullfsync=ON */
+#define PAGER_CKPT_FULLFSYNC 0x08 /* PRAGMA checkpoint_fullfsync=ON */
+#define PAGER_CACHESPILL 0x10 /* PRAGMA cache_spill=ON */
+#define PAGER_FLAGS_MASK 0x1c /* All above except SYNCHRONOUS */
+
/*
** Default maximum size for log files. This value may be overridden using the
** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit".
@@ -47,18 +64,22 @@ typedef db_pgno_t Pgno;
#define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT 2 * 1024 * 1024
#endif
+const char *sqlite3PagerFilename(Pager *pPager, int);
+void sqlite3PagerCacheStat(Pager *, int, int, int *);
int sqlite3PagerExclusiveLock(Pager *pPager);
sqlite3_file *sqlite3PagerFile(Pager*);
-const char *sqlite3PagerFilename(Pager *pPager);
int sqlite3PagerGetJournalMode(Pager *pPager);
int sqlite3PagerIsMemdb(Pager*);
+u8 sqlite3PagerIsreadonly(Pager*);
int sqlite3PagerJournalMode(Pager *, int);
i64 sqlite3PagerJournalSizeLimit(Pager *, i64);
int sqlite3PagerLockingMode(Pager *, int);
int sqlite3PagerMemUsed(Pager *pPager);
int sqlite3PagerOkToChangeJournalMode(Pager *pPager);
int sqlite3PagerPagecount(Pager*, int*);
+void sqlite3PagerSetFlags(Pager*,unsigned);
int sqlite3PagerSetJournalMode(Pager *pPager, int eMode);
+void sqlite3PagerShrink(Pager*);
#ifndef SQLITE_OMIT_WAL
int sqlite3PagerWalCallback(Pager *pPager);
diff --git a/lang/sql/adapter/pcache.c b/lang/sql/adapter/pcache.c
index b023db30..816ce7b3 100644
--- a/lang/sql/adapter/pcache.c
+++ b/lang/sql/adapter/pcache.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include "sqliteInt.h"
diff --git a/lang/sql/adapter/pcache.h b/lang/sql/adapter/pcache.h
index 476701f3..425e15f9 100644
--- a/lang/sql/adapter/pcache.h
+++ b/lang/sql/adapter/pcache.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
typedef struct PgHdr PgHdr;
@@ -17,4 +17,8 @@ void sqlite3PCacheSetDefault(void);
#ifdef SQLITE_TEST
void sqlite3PcacheStats(int *a,int *b,int *c,int *d);
#endif
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+int sqlite3PcacheReleaseMemory(int nReq);
+#endif
+
diff --git a/lang/sql/adapter/pcache1.c b/lang/sql/adapter/pcache1.c
index e69de29b..685e179f 100644
--- a/lang/sql/adapter/pcache1.c
+++ b/lang/sql/adapter/pcache1.c
@@ -0,0 +1,10 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
+ */
+#include "sqliteInt.h"
+
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+int sqlite3PcacheReleaseMemory(int nReq){ return nReq; }
+#endif
diff --git a/lang/sql/adapter/sqlite-patches/01_sqlite_excl_test.patch b/lang/sql/adapter/sqlite-patches/01_sqlite_excl_test.patch
deleted file mode 100644
index 8e69044e..00000000
--- a/lang/sql/adapter/sqlite-patches/01_sqlite_excl_test.patch
+++ /dev/null
@@ -1,87 +0,0 @@
---- test/tester.tcl
-+++ test/tester.tcl
-@@ -266,16 +266,21 @@
- proc set_test_counter {counter args} {
- if {[llength $args]} {
- set ::TC($counter) [lindex $args 0]
- }
- set ::TC($counter)
- }
- }
-
-+# Pull in the list of test cases that are excluded and ignored when running
-+# with Berkeley DB.
-+#
-+source $testdir/../../../../test/sql/bdb_excl.test
-+
- # Record the fact that a sequence of tests were omitted.
- #
- proc omit_test {name reason} {
- set omitList [set_test_counter omit_list]
- lappend omitList [list $name $reason]
- set_test_counter omit_list $omitList
- }
-
-@@ -300,22 +305,29 @@
- set_test_counter count [expr [set_test_counter count] + 1]
- }
-
-
- # Invoke the do_test procedure to run a single test
- #
- proc do_test {name cmd expected} {
-
-- global argv cmdlinearg
-+ global argv cmdlinearg IGNORE_CASES EXCLUDE_CASES
-
- fix_testname name
-
- sqlite3_memdebug_settitle $name
-
-+ foreach pattern $EXCLUDE_CASES {
-+ if {[string match $pattern $name]} {
-+ puts "$name... Skipping"
-+ flush stdout
-+ return
-+ }
-+ }
- # if {[llength $argv]==0} {
- # set go 1
- # } else {
- # set go 0
- # foreach pattern $argv {
- # if {[string match $pattern $name]} {
- # set go 1
- # break
-@@ -329,18 +341,29 @@
-
- incr_ntest
- puts -nonewline $name...
- flush stdout
- if {[catch {uplevel #0 "$cmd;\n"} result]} {
- puts "\nError: $result"
- fail_test $name
- } elseif {[string compare $result $expected]} {
-- puts "\nExpected: \[$expected\]\n Got: \[$result\]"
-- fail_test $name
-+ set ignore 0
-+ foreach pattern $IGNORE_CASES {
-+ if {[string match $pattern $name]} {
-+ set ignore 1
-+ break
-+ }
-+ }
-+ if {$ignore} {
-+ puts " Ignored"
-+ } else {
-+ puts "\nExpected: \[$expected\]\n Got: \[$result\]"
-+ fail_test $name
-+ }
- } else {
- puts " Ok"
- }
- flush stdout
- }
-
- proc fix_testname {varname} {
- upvar $varname testname
diff --git a/lang/sql/adapter/sqlite-patches/02_sqlite_test.patch b/lang/sql/adapter/sqlite-patches/02_sqlite_test.patch
index fb6eabcb..7b431308 100644
--- a/lang/sql/adapter/sqlite-patches/02_sqlite_test.patch
+++ b/lang/sql/adapter/sqlite-patches/02_sqlite_test.patch
@@ -1,44 +1,15 @@
---- src/tclsqlite.c
-+++ src/tclsqlite.c
-@@ -761,7 +761,7 @@
- case SQLITE_INTEGER: {
- sqlite_int64 v = sqlite3_value_int64(pIn);
- if( v>=-2147483647 && v<=2147483647 ){
-- pVal = Tcl_NewIntObj(v);
-+ pVal = Tcl_NewIntObj((int)v);
- }else{
- pVal = Tcl_NewWideIntObj(v);
- }
-@@ -1441,7 +1441,7 @@
- case SQLITE_INTEGER: {
- sqlite_int64 v = sqlite3_column_int64(pStmt, iCol);
- if( v>=-2147483647 && v<=2147483647 ){
-- return Tcl_NewIntObj(v);
-+ return Tcl_NewIntObj((int)v);
- }else{
- return Tcl_NewWideIntObj(v);
- }
--- src/test1.c
+++ src/test1.c
-@@ -3207,7 +3207,7 @@
- char *value;
- int rc;
-
-- void (*xDel)() = (objc==6?SQLITE_STATIC:SQLITE_TRANSIENT);
-+ void (*xDel)(void*) = (objc==6?SQLITE_STATIC:SQLITE_TRANSIENT);
- Tcl_Obj *oStmt = objv[objc-4];
- Tcl_Obj *oN = objv[objc-3];
- Tcl_Obj *oString = objv[objc-2];
-@@ -3555,7 +3555,7 @@
+@@ -3626,7 +3626,7 @@
if( bytes>=0 ){
- bytes = bytes - (zTail-zSql);
+ bytes = bytes - (int)(zTail-zSql);
}
-- if( strlen(zTail)<bytes ){
-+ if(bytes>-1 && strlen(zTail)<(unsigned int)bytes ){
- bytes = strlen(zTail);
+- if( (int)strlen(zTail)<bytes ){
++ if(bytes>-1 && (int)strlen(zTail)<(unsigned int)bytes ){
+ bytes = (int)strlen(zTail);
}
Tcl_ObjSetVar2(interp, objv[4], 0, Tcl_NewStringObj(zTail, bytes), 0);
-@@ -5389,8 +5389,6 @@
+@@ -6297,8 +6297,6 @@
extern int sqlite3_hostid_num;
#endif
extern int sqlite3_max_blobsize;
@@ -47,7 +18,7 @@
static struct {
char *zName;
Tcl_CmdProc *xProc;
-@@ -5554,7 +5552,6 @@
+@@ -6486,7 +6486,6 @@
{ "tcl_variable_type", tcl_variable_type, 0 },
#ifndef SQLITE_OMIT_SHARED_CACHE
{ "sqlite3_enable_shared_cache", test_enable_shared, 0 },
@@ -57,20 +28,18 @@
#ifdef SQLITE_ENABLE_COLUMN_METADATA
--- src/test2.c
+++ src/test2.c
-@@ -19,6 +19,7 @@
- #include <string.h>
- #include <ctype.h>
+@@ -21,475 +21,14 @@
+
+ extern const char *sqlite3ErrName(int);
+#ifndef SQLITE_OMIT_DISKIO
++
/*
- ** Interpret an SQLite error number
- */
-@@ -53,470 +54,6 @@
** Page size and reserved size used for testing.
*/
static int test_pagesize = 1024;
--
--/*
+
+ /*
-** Dummy page reinitializer
-*/
-static void pager_test_reiniter(DbPage *pNotUsed){
@@ -103,7 +72,7 @@
- SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_MAIN_DB,
- pager_test_reiniter);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- sqlite3PagerSetCachesize(pPager, nPage);
@@ -135,7 +104,7 @@
- pPager = sqlite3TestTextToPtr(argv[1]);
- rc = sqlite3PagerClose(pPager);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- return TCL_OK;
@@ -162,7 +131,7 @@
- pPager = sqlite3TestTextToPtr(argv[1]);
- rc = sqlite3PagerRollback(pPager);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- return TCL_OK;
@@ -189,12 +158,12 @@
- pPager = sqlite3TestTextToPtr(argv[1]);
- rc = sqlite3PagerCommitPhaseOne(pPager, 0, 0);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- rc = sqlite3PagerCommitPhaseTwo(pPager);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- return TCL_OK;
@@ -221,7 +190,7 @@
- pPager = sqlite3TestTextToPtr(argv[1]);
- rc = sqlite3PagerOpenSavepoint(pPager, 1);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- return TCL_OK;
@@ -249,7 +218,7 @@
- rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, 0);
- sqlite3PagerSavepoint(pPager, SAVEPOINT_RELEASE, 0);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- return TCL_OK;
@@ -276,7 +245,7 @@
- pPager = sqlite3TestTextToPtr(argv[1]);
- rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_RELEASE, 0);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- return TCL_OK;
@@ -369,7 +338,7 @@
- rc = sqlite3PagerGet(pPager, pgno, &pPage);
- }
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- sqlite3_snprintf(sizeof(zBuf),zBuf,"%p",pPage);
@@ -523,7 +492,7 @@
- pPage = (DbPage *)sqlite3TestTextToPtr(argv[1]);
- rc = sqlite3PagerWrite(pPage);
- if( rc!=SQLITE_OK ){
-- Tcl_AppendResult(interp, errorName(rc), 0);
+- Tcl_AppendResult(interp, sqlite3ErrName(rc), 0);
- return TCL_ERROR;
- }
- pData = sqlite3PagerGetData(pPage);
@@ -533,10 +502,11 @@
-}
-
-#ifndef SQLITE_OMIT_DISKIO
- /*
- ** Usage: fake_big_file N FILENAME
- **
-@@ -544,6 +77,12 @@
+-/*
+ ** Usage: fake_big_file N FILENAME
+ **
+ ** Write a few bytes at the N megabyte point of FILENAME. This will
+@@ -518,6 +57,12 @@
}
if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR;
@@ -547,9 +517,9 @@
+ return TCL_ERROR;
+
pVfs = sqlite3_vfs_find(0);
- rc = sqlite3OsOpenMalloc(pVfs, argv[2], &fd,
- (SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB), 0
-@@ -564,31 +103,6 @@
+ nFile = (int)strlen(argv[2]);
+ zFile = sqlite3_malloc( nFile+2 );
+@@ -545,31 +90,6 @@
}
#endif
@@ -581,7 +551,7 @@
/*
** sqlite3BitvecBuiltinTest SIZE PROGRAM
**
-@@ -623,58 +137,49 @@
+@@ -604,58 +124,49 @@
return TCL_OK;
}
@@ -681,10 +651,10 @@
+ return TCL_OK;
+}
+
+ extern const char *sqlite3ErrName(int);
+
/*
- ** Interpret an SQLite error number
- */
-@@ -355,36 +364,6 @@
+@@ -339,36 +348,6 @@
}
/*
@@ -721,7 +691,7 @@
** Usage: btree_payload_size ID
**
** Return the number of bytes of payload
-@@ -396,8 +375,7 @@
+@@ -380,8 +359,7 @@
const char **argv /* Text of each argument */
){
BtCursor *pCur;
@@ -731,7 +701,7 @@
char zBuf[50];
if( argc!=2 ){
-@@ -410,103 +388,13 @@
+@@ -394,103 +372,13 @@
/* The cursor may be in "require-seek" state. If this is the case, the
** call to BtreeDataSize() will fix it. */
@@ -784,7 +754,7 @@
- if( Tcl_GetInt(interp, argv[4], (int*)&incr) ) return TCL_ERROR;
- in = start;
- in *= mult;
-- for(i=0; i<count; i++){
+- for(i=0; i<(int)count; i++){
- char zErr[200];
- n1 = putVarint(zBuf, in);
- if( n1>9 || n1<1 ){
@@ -837,7 +807,7 @@
** usage: btree_from_db DB-HANDLE
**
** This command returns the btree handle for the main database associated
-@@ -551,67 +439,6 @@
+@@ -535,67 +423,6 @@
}
/*
@@ -905,89 +875,24 @@
** Register commands with the TCL interpreter.
*/
int Sqlitetest3_Init(Tcl_Interp *interp){
-@@ -622,17 +449,17 @@
- { "btree_open", (Tcl_CmdProc*)btree_open },
- { "btree_close", (Tcl_CmdProc*)btree_close },
- { "btree_begin_transaction", (Tcl_CmdProc*)btree_begin_transaction },
-- { "btree_pager_stats", (Tcl_CmdProc*)btree_pager_stats },
-+ { "btree_pager_stats", (Tcl_CmdProc*)btree_pager_stats },
+@@ -610,13 +437,13 @@
{ "btree_cursor", (Tcl_CmdProc*)btree_cursor },
{ "btree_close_cursor", (Tcl_CmdProc*)btree_close_cursor },
{ "btree_next", (Tcl_CmdProc*)btree_next },
- { "btree_eof", (Tcl_CmdProc*)btree_eof },
-- { "btree_payload_size", (Tcl_CmdProc*)btree_payload_size },
-+ { "btree_eof", (Tcl_CmdProc*)t3_tcl_function_stub },
-+ { "btree_payload_size", (Tcl_CmdProc*)btree_payload_size },
++ { "btree_eof", (Tcl_CmdProc*)t3_tcl_function_stub },
+ { "btree_payload_size", (Tcl_CmdProc*)btree_payload_size },
{ "btree_first", (Tcl_CmdProc*)btree_first },
- { "btree_varint_test", (Tcl_CmdProc*)btree_varint_test },
-- { "btree_from_db", (Tcl_CmdProc*)btree_from_db },
++ { "btree_varint_test", (Tcl_CmdProc*)t3_tcl_function_stub },
+ { "btree_from_db", (Tcl_CmdProc*)btree_from_db },
- { "btree_ismemdb", (Tcl_CmdProc*)btree_ismemdb },
- { "btree_set_cache_size", (Tcl_CmdProc*)btree_set_cache_size }
-+ { "btree_varint_test", (Tcl_CmdProc*)t3_tcl_function_stub },
-+ { "btree_from_db", (Tcl_CmdProc*)btree_from_db },
-+ { "btree_ismemdb", (Tcl_CmdProc*)t3_tcl_function_stub },
-+ { "btree_set_cache_size", (Tcl_CmdProc*)t3_tcl_function_stub }
++ { "btree_ismemdb", (Tcl_CmdProc*)t3_tcl_function_stub },
++ { "btree_set_cache_size", (Tcl_CmdProc*)t3_tcl_function_stub }
};
int i;
---- src/test6.c
-+++ src/test6.c
-@@ -177,7 +177,7 @@
- iSkip = 512;
- }
- if( (iAmt-iSkip)>0 ){
-- rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], iAmt-iSkip, iOff+iSkip);
-+ rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], (int)iAmt-iSkip, iOff+iSkip);
- }
- return rc;
- }
-@@ -306,8 +306,8 @@
- }
- case 3: { /* Trash sectors */
- u8 *zGarbage;
-- int iFirst = (pWrite->iOffset/g.iSectorSize);
-- int iLast = (pWrite->iOffset+pWrite->nBuf-1)/g.iSectorSize;
-+ int iFirst = ((int)pWrite->iOffset/g.iSectorSize);
-+ int iLast = ((int)pWrite->iOffset+pWrite->nBuf-1)/g.iSectorSize;
-
- assert(pWrite->zBuf);
-
-@@ -430,7 +430,7 @@
- ){
- CrashFile *pCrash = (CrashFile *)pFile;
- if( iAmt+iOfst>pCrash->iSize ){
-- pCrash->iSize = iAmt+iOfst;
-+ pCrash->iSize = iAmt+(int)iOfst;
- }
- while( pCrash->iSize>pCrash->nData ){
- u8 *zNew;
-@@ -454,7 +454,7 @@
- CrashFile *pCrash = (CrashFile *)pFile;
- assert(size>=0);
- if( pCrash->iSize>size ){
-- pCrash->iSize = size;
-+ pCrash->iSize = (int)size;
- }
- return writeListAppend(pFile, size, 0, 0);
- }
-@@ -612,7 +612,7 @@
- ** never contains valid data anyhow. So avoid doing such a read here.
- */
- const int isDb = (flags&SQLITE_OPEN_MAIN_DB);
-- i64 iChunk = pWrapper->iSize;
-+ int iChunk = pWrapper->iSize;
- if( iChunk>PENDING_BYTE && isDb ){
- iChunk = PENDING_BYTE;
- }
-@@ -620,7 +620,7 @@
- rc = sqlite3OsRead(pReal, pWrapper->zData, iChunk, 0);
- if( SQLITE_OK==rc && pWrapper->iSize>(PENDING_BYTE+512) && isDb ){
- i64 iOff = PENDING_BYTE+512;
-- iChunk = pWrapper->iSize - iOff;
-+ iChunk = pWrapper->iSize - (int)iOff;
- rc = sqlite3OsRead(pReal, &pWrapper->zData[iOff], iChunk, iOff);
- }
- }else{
--- src/test_btree.c
+++ src/test_btree.c
@@ -13,50 +13,3 @@
@@ -1014,7 +919,7 @@
- BtShared *pBt;
- Tcl_Obj *pRet = Tcl_NewObj();
- for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){
-- const char *zFile = sqlite3PagerFilename(pBt->pPager);
+- const char *zFile = sqlite3PagerFilename(pBt->pPager, 1);
- Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj(zFile, -1));
- Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(pBt->nRef));
- }
@@ -1041,155 +946,14 @@
- }
-#endif
-}
---- src/test_journal.c
-+++ src/test_journal.c
-@@ -136,7 +136,7 @@
- */
- static int jtClose(sqlite3_file*);
- static int jtRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
--static int jtWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst);
-+static int jtWrite(sqlite3_file*,const void*, int iAmt, sqlite3_int64 iOfst);
- static int jtTruncate(sqlite3_file*, sqlite3_int64 size);
- static int jtSync(sqlite3_file*, int flags);
- static int jtFileSize(sqlite3_file*, sqlite3_int64 *pSize);
-@@ -405,7 +405,7 @@
-
- /* Calculate and store a checksum for each page in the database file. */
- if( rc==SQLITE_OK ){
-- int ii;
-+ unsigned int ii;
- for(ii=0; rc==SQLITE_OK && ii<pMain->nPage; ii++){
- i64 iOff = (i64)(pMain->nPagesize) * (i64)ii;
- if( iOff==PENDING_BYTE ) continue;
-@@ -467,7 +467,7 @@
- continue;
- }
- }
-- nRec = (iSize-iOff) / (pMain->nPagesize+8);
-+ nRec = (u32)((iSize-iOff) / (pMain->nPagesize+8));
- }
-
- /* Read all the records that follow the journal-header just read. */
-@@ -539,7 +539,7 @@
- }
-
- if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){
-- if( iAmt<p->nPagesize
-+ if( (u32)iAmt<p->nPagesize
- && p->nPagesize%iAmt==0
- && iOfst>=(PENDING_BYTE+512)
- && iOfst+iAmt<=PENDING_BYTE+p->nPagesize
-@@ -550,7 +550,7 @@
- ** pending-byte page.
- */
- }else{
-- u32 pgno = iOfst/p->nPagesize + 1;
-+ u32 pgno = (u32)(iOfst/p->nPagesize + 1);
- assert( (iAmt==1||iAmt==p->nPagesize) && ((iOfst+iAmt)%p->nPagesize)==0 );
- assert( pgno<=p->nPage || p->nSync>0 );
- assert( pgno>p->nPage || sqlite3BitvecTest(p->pWritable, pgno) );
-@@ -579,7 +579,7 @@
- if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){
- u32 pgno;
- u32 locking_page = (u32)(PENDING_BYTE/p->nPagesize+1);
-- for(pgno=size/p->nPagesize+1; pgno<=p->nPage; pgno++){
-+ for(pgno=(u32)(size/p->nPagesize+1); pgno<=p->nPage; pgno++){
- assert( pgno==locking_page || sqlite3BitvecTest(p->pWritable, pgno) );
- }
- }
--- src/test_malloc.c
+++ src/test_malloc.c
@@ -749,7 +749,7 @@
int isNew;
int aKey[MALLOC_LOG_KEYINTS];
-- int nKey = sizeof(int)*MALLOC_LOG_KEYINTS;
+- unsigned int nKey = sizeof(int)*MALLOC_LOG_KEYINTS;
+ unsigned int nKey = sizeof(int)*MALLOC_LOG_FRAMES;
memset(aKey, 0, nKey);
if( (sizeof(void*)*nFrame)<nKey ){
---- src/test_onefile.c
-+++ src/test_onefile.c
-@@ -288,7 +288,7 @@
- ){
- tmp_file *pTmp = (tmp_file *)pFile;
- if( (iAmt+iOfst)>pTmp->nAlloc ){
-- int nNew = 2*(iAmt+iOfst+pTmp->nAlloc);
-+ int nNew = 2*(iAmt+(int)iOfst+pTmp->nAlloc);
- char *zNew = sqlite3_realloc(pTmp->zAlloc, nNew);
- if( !zNew ){
- return SQLITE_NOMEM;
-@@ -297,7 +297,7 @@
- pTmp->nAlloc = nNew;
- }
- memcpy(&pTmp->zAlloc[iOfst], zBuf, iAmt);
-- pTmp->nSize = MAX(pTmp->nSize, iOfst+iAmt);
-+ pTmp->nSize = (int)MAX(pTmp->nSize, iOfst+iAmt);
- return SQLITE_OK;
- }
-
-@@ -306,7 +306,7 @@
- */
- static int tmpTruncate(sqlite3_file *pFile, sqlite_int64 size){
- tmp_file *pTmp = (tmp_file *)pFile;
-- pTmp->nSize = MIN(pTmp->nSize, size);
-+ pTmp->nSize = (int)MIN(pTmp->nSize, size);
- return SQLITE_OK;
- }
-
-@@ -418,7 +418,7 @@
- /* Journal file. */
- int iRem = iAmt;
- int iBuf = 0;
-- int ii = iOfst;
-+ int ii = (int)iOfst;
- while( iRem>0 && rc==SQLITE_OK ){
- int iRealOff = pReal->nBlob - BLOCKSIZE*((ii/BLOCKSIZE)+1) + ii%BLOCKSIZE;
- int iRealAmt = MIN(iRem, BLOCKSIZE - (iRealOff%BLOCKSIZE));
-@@ -453,14 +453,14 @@
- }else{
- rc = pF->pMethods->xWrite(pF, zBuf, iAmt, iOfst+BLOCKSIZE);
- if( rc==SQLITE_OK ){
-- pReal->nDatabase = MAX(pReal->nDatabase, iAmt+iOfst);
-+ pReal->nDatabase = (int)MAX(pReal->nDatabase, iAmt+iOfst);
- }
- }
- }else{
- /* Journal file. */
- int iRem = iAmt;
- int iBuf = 0;
-- int ii = iOfst;
-+ int ii = (int)iOfst;
- while( iRem>0 && rc==SQLITE_OK ){
- int iRealOff = pReal->nBlob - BLOCKSIZE*((ii/BLOCKSIZE)+1) + ii%BLOCKSIZE;
- int iRealAmt = MIN(iRem, BLOCKSIZE - (iRealOff%BLOCKSIZE));
-@@ -475,7 +475,7 @@
- }
- }
- if( rc==SQLITE_OK ){
-- pReal->nJournal = MAX(pReal->nJournal, iAmt+iOfst);
-+ pReal->nJournal = (int)MAX(pReal->nJournal, iAmt+iOfst);
- }
- }
-
-@@ -489,9 +489,9 @@
- fs_file *p = (fs_file *)pFile;
- fs_real_file *pReal = p->pReal;
- if( p->eType==DATABASE_FILE ){
-- pReal->nDatabase = MIN(pReal->nDatabase, size);
-+ pReal->nDatabase = (int)MIN(pReal->nDatabase, size);
- }else{
-- pReal->nJournal = MIN(pReal->nJournal, size);
-+ pReal->nJournal = (int)MIN(pReal->nJournal, size);
- }
- return SQLITE_OK;
- }
-@@ -641,7 +641,7 @@
- pReal->nBlob = BLOBSIZE;
- }else{
- unsigned char zS[4];
-- pReal->nBlob = size;
-+ pReal->nBlob = (int)size;
- rc = pRealFile->pMethods->xRead(pRealFile, zS, 4, 0);
- pReal->nDatabase = (zS[0]<<24)+(zS[1]<<16)+(zS[2]<<8)+zS[3];
- if( rc==SQLITE_OK ){
diff --git a/lang/sql/adapter/sqlite-patches/03_editline.patch b/lang/sql/adapter/sqlite-patches/03_editline.patch
index 170457ba..35ea4ca5 100644
--- a/lang/sql/adapter/sqlite-patches/03_editline.patch
+++ b/lang/sql/adapter/sqlite-patches/03_editline.patch
@@ -1,6 +1,6 @@
--- Makefile.in
+++ Makefile.in
-@@ -49,7 +49,7 @@ LIBTCL = @TCL_LIB_SPEC@ @TCL_LIBS@
+@@ -49,7 +49,7 @@
# Compiler options needed for programs that use the readline() library.
#
@@ -11,7 +11,7 @@
#
--- configure
+++ configure
-@@ -890,6 +890,7 @@ TCL_STUB_LIB_SPEC
+@@ -888,6 +888,7 @@
HAVE_TCL
TARGET_READLINE_LIBS
TARGET_READLINE_INC
@@ -19,107 +19,107 @@
TARGET_HAVE_READLINE
TARGET_DEBUG
USE_AMALGAMATION
-@@ -3734,13 +3735,13 @@ if test "${lt_cv_nm_interface+set}" = se
+@@ -3732,13 +3733,13 @@
else
lt_cv_nm_interface="BSD nm"
echo "int some_variable = 0;" > conftest.$ac_ext
-- (eval echo "\"\$as_me:3737: $ac_compile\"" >&5)
-+ (eval echo "\"\$as_me:3738: $ac_compile\"" >&5)
+- (eval echo "\"\$as_me:3735: $ac_compile\"" >&5)
++ (eval echo "\"\$as_me:3736: $ac_compile\"" >&5)
(eval "$ac_compile" 2>conftest.err)
cat conftest.err >&5
-- (eval echo "\"\$as_me:3740: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
-+ (eval echo "\"\$as_me:3741: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+- (eval echo "\"\$as_me:3738: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
++ (eval echo "\"\$as_me:3739: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
(eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
cat conftest.err >&5
-- (eval echo "\"\$as_me:3743: output\"" >&5)
-+ (eval echo "\"\$as_me:3744: output\"" >&5)
+- (eval echo "\"\$as_me:3741: output\"" >&5)
++ (eval echo "\"\$as_me:3742: output\"" >&5)
cat conftest.out >&5
if $GREP 'External.*some_variable' conftest.out > /dev/null; then
lt_cv_nm_interface="MS dumpbin"
-@@ -4962,7 +4963,7 @@ ia64-*-hpux*)
+@@ -4960,7 +4961,7 @@
;;
*-*-irix6*)
# Find out which ABI we are using.
-- echo '#line 4965 "configure"' > conftest.$ac_ext
-+ echo '#line 4966 "configure"' > conftest.$ac_ext
+- echo '#line 4963 "configure"' > conftest.$ac_ext
++ echo '#line 4964 "configure"' > conftest.$ac_ext
if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
(eval $ac_compile) 2>&5
ac_status=$?
-@@ -6831,11 +6832,11 @@ else
+@@ -6829,11 +6830,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
-- (eval echo "\"\$as_me:6834: $lt_compile\"" >&5)
-+ (eval echo "\"\$as_me:6835: $lt_compile\"" >&5)
+- (eval echo "\"\$as_me:6832: $lt_compile\"" >&5)
++ (eval echo "\"\$as_me:6833: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
-- echo "$as_me:6838: \$? = $ac_status" >&5
-+ echo "$as_me:6839: \$? = $ac_status" >&5
+- echo "$as_me:6836: \$? = $ac_status" >&5
++ echo "$as_me:6837: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
-@@ -7170,11 +7171,11 @@ else
+@@ -7168,11 +7169,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
-- (eval echo "\"\$as_me:7173: $lt_compile\"" >&5)
-+ (eval echo "\"\$as_me:7174: $lt_compile\"" >&5)
+- (eval echo "\"\$as_me:7171: $lt_compile\"" >&5)
++ (eval echo "\"\$as_me:7172: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
-- echo "$as_me:7177: \$? = $ac_status" >&5
-+ echo "$as_me:7178: \$? = $ac_status" >&5
+- echo "$as_me:7175: \$? = $ac_status" >&5
++ echo "$as_me:7176: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
-@@ -7275,11 +7276,11 @@ else
+@@ -7273,11 +7274,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
-- (eval echo "\"\$as_me:7278: $lt_compile\"" >&5)
-+ (eval echo "\"\$as_me:7279: $lt_compile\"" >&5)
+- (eval echo "\"\$as_me:7276: $lt_compile\"" >&5)
++ (eval echo "\"\$as_me:7277: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
-- echo "$as_me:7282: \$? = $ac_status" >&5
-+ echo "$as_me:7283: \$? = $ac_status" >&5
+- echo "$as_me:7280: \$? = $ac_status" >&5
++ echo "$as_me:7281: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
-@@ -7330,11 +7331,11 @@ else
+@@ -7328,11 +7329,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
-- (eval echo "\"\$as_me:7333: $lt_compile\"" >&5)
-+ (eval echo "\"\$as_me:7334: $lt_compile\"" >&5)
+- (eval echo "\"\$as_me:7331: $lt_compile\"" >&5)
++ (eval echo "\"\$as_me:7332: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
-- echo "$as_me:7337: \$? = $ac_status" >&5
-+ echo "$as_me:7338: \$? = $ac_status" >&5
+- echo "$as_me:7335: \$? = $ac_status" >&5
++ echo "$as_me:7336: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
-@@ -10143,7 +10144,7 @@ else
+@@ -10141,7 +10142,7 @@
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
--#line 10146 "configure"
-+#line 10147 "configure"
+-#line 10144 "configure"
++#line 10145 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
-@@ -10239,7 +10240,7 @@ else
+@@ -10237,7 +10238,7 @@
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
--#line 10242 "configure"
-+#line 10243 "configure"
+-#line 10240 "configure"
++#line 10241 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
-@@ -12920,6 +12921,7 @@ fi
+@@ -12912,6 +12913,7 @@
#
TARGET_READLINE_LIBS=""
TARGET_READLINE_INC=""
@@ -127,7 +127,7 @@
TARGET_HAVE_READLINE=0
# Check whether --enable-readline was given.
if test "${enable_readline+set}" = set; then
-@@ -12930,6 +12932,7 @@ fi
+@@ -12922,6 +12924,7 @@
if test x"$with_readline" != xno; then
@@ -135,7 +135,7 @@
found="yes"
-@@ -12941,9 +12944,12 @@ else
+@@ -12933,9 +12936,12 @@
fi
if test "x$with_readline_lib" = xauto; then
@@ -151,7 +151,7 @@
$as_echo_n "checking for library containing tgetent... " >&6; }
if test "${ac_cv_search_tgetent+set}" = set; then
$as_echo_n "(cached) " >&6
-@@ -12971,7 +12977,7 @@ return tgetent ();
+@@ -12963,7 +12969,7 @@
return 0;
}
_ACEOF
@@ -160,7 +160,7 @@
if test -z "$ac_lib"; then
ac_res="none required"
else
-@@ -13032,13 +13038,14 @@ else
+@@ -13024,13 +13030,14 @@
term_LIBS=""
fi
@@ -179,7 +179,7 @@
cat >conftest.$ac_ext <<_ACEOF
/* confdefs.h. */
_ACEOF
-@@ -13082,12 +13089,12 @@ eval ac_try_echo="\"\$as_me:$LINENO: $ac
+@@ -13074,12 +13081,12 @@
test "$cross_compiling" = yes ||
$as_test_x conftest$ac_exeext
}; then
@@ -194,7 +194,7 @@
fi
rm -rf conftest.dSYM
-@@ -13095,16 +13102,28 @@ rm -f core conftest.err conftest.$ac_obj
+@@ -13087,16 +13094,28 @@
conftest$ac_exeext conftest.$ac_ext
LIBS=$ac_check_lib_save_LIBS
fi
@@ -229,7 +229,7 @@
else
TARGET_READLINE_LIBS="$with_readline_lib"
fi
-@@ -13118,18 +13137,21 @@ else
+@@ -13110,18 +13129,21 @@
fi
if test "x$with_readline_inc" = xauto; then
@@ -261,7 +261,7 @@
cat >conftest.$ac_ext <<_ACEOF
/* confdefs.h. */
_ACEOF
-@@ -13137,7 +13159,7 @@ cat confdefs.h >>conftest.$ac_ext
+@@ -13129,7 +13151,7 @@
cat >>conftest.$ac_ext <<_ACEOF
/* end confdefs.h. */
$ac_includes_default
@@ -270,7 +270,7 @@
_ACEOF
rm -f conftest.$ac_objext
if { (ac_try="$ac_compile"
-@@ -13170,15 +13192,15 @@ rm -f core conftest.err conftest.$ac_obj
+@@ -13162,15 +13184,15 @@
$as_echo "$ac_header_compiler" >&6; }
# Is the header present?
@@ -289,7 +289,7 @@
_ACEOF
if { (ac_try="$ac_cpp conftest.$ac_ext"
case "(($ac_try" in
-@@ -13212,40 +13234,43 @@ rm -f conftest.err conftest.$ac_ext
+@@ -13204,40 +13226,43 @@
# So? What about this header?
case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
yes:no: )
@@ -364,7 +364,7 @@
found="yes"
else
-@@ -13253,9 +13278,9 @@ else
+@@ -13245,9 +13270,9 @@
if test "$cross_compiling" != yes; then
for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do
for subdir in include include/readline; do
@@ -377,7 +377,7 @@
if { as_var=$as_ac_File; eval "test \"\${$as_var+set}\" = set"; }; then
$as_echo_n "(cached) " >&6
else
-@@ -13263,7 +13288,7 @@ else
+@@ -13255,7 +13280,7 @@
{ { $as_echo "$as_me:$LINENO: error: cannot check for file existence when cross compiling" >&5
$as_echo "$as_me: error: cannot check for file existence when cross compiling" >&2;}
{ (exit 1); exit 1; }; }
@@ -386,7 +386,7 @@
eval "$as_ac_File=yes"
else
eval "$as_ac_File=no"
-@@ -13297,6 +13322,7 @@ fi
+@@ -13289,6 +13314,7 @@
if test x"$found" = xno; then
TARGET_READLINE_LIBS=""
TARGET_READLINE_INC=""
@@ -394,7 +394,7 @@
TARGET_HAVE_READLINE=0
else
TARGET_HAVE_READLINE=1
-@@ -13307,6 +13333,7 @@ fi
+@@ -13299,6 +13325,7 @@
@@ -404,7 +404,7 @@
# that use "fdatasync()" function.
--- configure.ac
+++ configure.ac
-@@ -524,6 +524,7 @@ AC_SUBST(HAVE_TCL)
+@@ -515,6 +515,7 @@
#
TARGET_READLINE_LIBS=""
TARGET_READLINE_INC=""
@@ -412,7 +412,7 @@
TARGET_HAVE_READLINE=0
AC_ARG_ENABLE([readline],
[AC_HELP_STRING([--disable-readline],[disable readline support [default=detect]])],
-@@ -531,6 +532,7 @@ AC_ARG_ENABLE([readline],
+@@ -522,6 +523,7 @@
[with_readline=auto])
if test x"$with_readline" != xno; then
@@ -420,7 +420,7 @@
found="yes"
AC_ARG_WITH([readline-lib],
-@@ -538,12 +540,24 @@ if test x"$with_readline" != xno; then
+@@ -529,12 +531,24 @@
[with_readline_lib=$withval],
[with_readline_lib="auto"])
if test "x$with_readline_lib" = xauto; then
@@ -451,7 +451,7 @@
else
TARGET_READLINE_LIBS="$with_readline_lib"
fi
-@@ -553,12 +567,12 @@ if test x"$with_readline" != xno; then
+@@ -544,12 +558,12 @@
[with_readline_inc=$withval],
[with_readline_inc="auto"])
if test "x$with_readline_inc" = xauto; then
@@ -466,7 +466,7 @@
if test "$found" = "yes"; then
TARGET_READLINE_INC="-I$dir/$subdir"
break
-@@ -575,6 +589,7 @@ if test x"$with_readline" != xno; then
+@@ -566,6 +580,7 @@
if test x"$found" = xno; then
TARGET_READLINE_LIBS=""
TARGET_READLINE_INC=""
@@ -474,7 +474,7 @@
TARGET_HAVE_READLINE=0
else
TARGET_HAVE_READLINE=1
-@@ -583,6 +598,7 @@ fi
+@@ -574,6 +589,7 @@
AC_SUBST(TARGET_READLINE_LIBS)
AC_SUBST(TARGET_READLINE_INC)
@@ -484,12 +484,12 @@
##########
--- src/shell.c
+++ src/shell.c
-@@ -38,14 +38,13 @@
- # include <unistd.h>
+@@ -45,14 +45,13 @@
+ # include <sys/types.h>
#endif
-#ifdef HAVE_EDITLINE
-+#if defined (HAVE_EDITLINE) && HAVE_EDITLINE==1
++#if defined(HAVE_EDITLINE) && HAVE_EDITLINE==1
# include <editline/editline.h>
-#endif
-#if defined(HAVE_READLINE) && HAVE_READLINE==1
@@ -499,6 +499,6 @@
#endif
-#if !defined(HAVE_EDITLINE) && (!defined(HAVE_READLINE) || HAVE_READLINE!=1)
+#if (!defined(HAVE_EDITLINE) || HAVE_EDITLINE!=1) && (!defined(HAVE_READLINE) || HAVE_READLINE!=1)
- # define readline(p) local_getline(p,stdin)
# define add_history(X)
# define read_history(X)
+ # define write_history(X)
diff --git a/lang/sql/adapter/sqlite-patches/04_build_config.patch b/lang/sql/adapter/sqlite-patches/04_build_config.patch
index 8bd00869..0ae553ee 100644
--- a/lang/sql/adapter/sqlite-patches/04_build_config.patch
+++ b/lang/sql/adapter/sqlite-patches/04_build_config.patch
@@ -1,4 +1,4 @@
---- /dev/null
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ build_config.h.in
@@ -0,0 +1,47 @@
+
@@ -50,7 +50,7 @@
+#endif
--- configure
+++ configure
-@@ -13548,7 +13548,7 @@ ac_config_headers="$ac_config_headers co
+@@ -13600,7 +13600,7 @@
# Generate the output files.
#
@@ -59,7 +59,7 @@
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
-@@ -14402,6 +14402,7 @@ do
+@@ -14454,6 +14454,7 @@
"config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;;
@@ -69,7 +69,7 @@
$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
--- configure.ac
+++ configure.ac
-@@ -714,4 +714,5 @@ AC_SUBST(BUILD_CFLAGS)
+@@ -690,4 +690,5 @@
AC_OUTPUT([
Makefile
sqlite3.pc
diff --git a/lang/sql/adapter/sqlite-patches/05_shell_config.patch b/lang/sql/adapter/sqlite-patches/05_shell_config.patch
index 415114fe..ae8aa49e 100644
--- a/lang/sql/adapter/sqlite-patches/05_shell_config.patch
+++ b/lang/sql/adapter/sqlite-patches/05_shell_config.patch
@@ -1,7 +1,7 @@
--- src/shell.c
+++ src/shell.c
-@@ -17,6 +17,14 @@
- #define _CRT_SECURE_NO_WARNINGS
+@@ -28,6 +28,14 @@
+ # define _LARGEFILE_SOURCE 1
#endif
+/*
diff --git a/lang/sql/adapter/sqlite-patches/07_shell_prompt.patch b/lang/sql/adapter/sqlite-patches/07_shell_prompt.patch
index 77bb25d0..a0d208d9 100644
--- a/lang/sql/adapter/sqlite-patches/07_shell_prompt.patch
+++ b/lang/sql/adapter/sqlite-patches/07_shell_prompt.patch
@@ -1,25 +1,25 @@
--- src/shell.c
+++ src/shell.c
-@@ -2529,7 +2529,7 @@
- memcpy(data->separator,"|", 2);
+@@ -2924,7 +2924,7 @@
data->showHeader = 0;
+ sqlite3_config(SQLITE_CONFIG_URI, 1);
sqlite3_config(SQLITE_CONFIG_LOG, shellLog, data);
- sqlite3_snprintf(sizeof(mainPrompt), mainPrompt,"sqlite> ");
+ sqlite3_snprintf(sizeof(mainPrompt), mainPrompt,"dbsql> ");
sqlite3_snprintf(sizeof(continuePrompt), continuePrompt," ...> ");
sqlite3_config(SQLITE_CONFIG_SINGLETHREAD);
}
-@@ -2730,11 +2730,12 @@
+@@ -3177,11 +3177,12 @@
char *zHome;
char *zHistory = 0;
int nHistory;
+ extern char *db_full_version(int *, int *, int *, int *, int *);
printf(
-- "SQLite version %s\n"
+- "SQLite version %s %.19s\n" /*extra-version-info*/
+ "%s\n"
"Enter \".help\" for instructions\n"
"Enter SQL statements terminated with a \";\"\n",
-- sqlite3_libversion()
+- sqlite3_libversion(), sqlite3_sourceid()
+ db_full_version(NULL, NULL, NULL, NULL, NULL)
);
zHome = find_home_dir();
diff --git a/lang/sql/adapter/sqlite-patches/08_errno_header.patch b/lang/sql/adapter/sqlite-patches/08_errno_header.patch
index 08fd0e45..595d5225 100644
--- a/lang/sql/adapter/sqlite-patches/08_errno_header.patch
+++ b/lang/sql/adapter/sqlite-patches/08_errno_header.patch
@@ -12,30 +12,30 @@
--- configure
+++ configure
-@@ -11993,7 +11993,8 @@ fi
+@@ -11991,7 +11991,8 @@
--for ac_header in sys/types.h stdlib.h stdint.h inttypes.h
+-for ac_header in sys/types.h stdlib.h stdint.h inttypes.h malloc.h
+
-+for ac_header in sys/types.h stdlib.h stdint.h inttypes.h errno.h
++for ac_header in sys/types.h stdlib.h stdint.h inttypes.h malloc.h errno.h
do
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
--- configure.ac
+++ configure.ac
-@@ -122,7 +122,7 @@ AC_CHECK_TYPES([int8_t, int16_t, int32_t
+@@ -122,7 +122,7 @@
#########
# Check for needed/wanted headers
--AC_CHECK_HEADERS([sys/types.h stdlib.h stdint.h inttypes.h])
-+AC_CHECK_HEADERS([sys/types.h stdlib.h stdint.h inttypes.h errno.h])
+-AC_CHECK_HEADERS([sys/types.h stdlib.h stdint.h inttypes.h malloc.h])
++AC_CHECK_HEADERS([sys/types.h stdlib.h stdint.h inttypes.h malloc.h errno.h])
#########
# Figure out whether or not we have these functions
--- src/sqliteInt.h
+++ src/sqliteInt.h
-@@ -75,6 +75,9 @@
+@@ -80,6 +80,9 @@
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
@@ -44,4 +44,4 @@
+#endif
/*
- ** The number of samples of an index that SQLite takes in order to
+ ** The following macros are used to cast pointers to integers and
diff --git a/lang/sql/adapter/sqlite-patches/09_comment_tests.patch b/lang/sql/adapter/sqlite-patches/09_comment_tests.patch
index bf55d5cf..2883fbba 100644
--- a/lang/sql/adapter/sqlite-patches/09_comment_tests.patch
+++ b/lang/sql/adapter/sqlite-patches/09_comment_tests.patch
@@ -12,22 +12,47 @@
if {0==[sqlite3 -has-codec]} {
do_test date-14.1 {
execsql {
-@@ -528,4 +533,5 @@
+@@ -528,6 +533,7 @@
} {1}
}
}
+}
- finish_test
+
+ # Verify that multiple calls to date functions with 'now' return the
+ # same answer.
+--- test/incrblob.test
++++ test/incrblob.test
+@@ -135,8 +135,8 @@
+ INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45);
+ COMMIT;
+ }
+- expr [file size test.db]/1024
+- } [expr 31 + $AutoVacuumMode]
++ } {}
++
+
+ ifcapable autovacuum {
+ do_test incrblob-2.$AutoVacuumMode.2 {
+--- test/manydb.test
++++ test/manydb.test
+@@ -67,6 +67,7 @@
+ for {set i 0} {$i<$N} {incr i} {
+ do_test manydb-1.$i {
+ sqlite3 db$i $dbname($i)
++ execsql { PRAGMA cache_size=20; } db$i
+ execsql {
+ CREATE TABLE t1(a,b);
+ BEGIN;
--- test/rollback.test
+++ test/rollback.test
@@ -88,8 +88,8 @@
BEGIN;
INSERT INTO t3 VALUES('hello world');
}
-- file copy -force test.db testA.db
-- file copy -force test.db-journal testA.db-journal
-+ #file copy -force test.db testA.db
-+ #file copy -force test.db-journal testA.db-journal
+- forcecopy test.db testA.db
+- forcecopy test.db-journal testA.db-journal
++ #forcecopy test.db testA.db
++ #forcecopy test.db-journal testA.db-journal
execsql {
COMMIT;
}
@@ -77,7 +102,7 @@
# Open a handle on testA.db and use it to query the database. At one
# point the first query would attempt a hot rollback, attempt to open
-@@ -127,6 +127,8 @@
+@@ -127,6 +130,8 @@
# be opened. This is incorrect, it should simply delete the journal
# file and proceed with the query.
#
@@ -86,37 +111,24 @@
do_test rollback-2.2 {
sqlite3 db2 testA.db
execsql {
-@@ -146,5 +148,6 @@
+@@ -146,5 +151,6 @@
db2 close
}
+}
finish_test
---- test/incrblob.test
-+++ test/incrblob.test
-@@ -134,8 +134,8 @@
- INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45);
- COMMIT;
- }
-- expr [file size test.db]/1024
-- } [expr 31 + $AutoVacuumMode]
-+ } {}
-+
-
- ifcapable autovacuum {
- do_test incrblob-2.$AutoVacuumMode.2 {
--- test/shared3.test
+++ test/shared3.test
-@@ -101,7 +101,7 @@
+@@ -102,7 +102,7 @@
db1 close
db2 close
-db3 close
+#db3 close
- sqlite3_enable_shared_cache $::enable_shared_cache
- finish_test
+ #-------------------------------------------------------------------------
+ # At one point this was causing a faulty assert to fail.
--- test/shared6.test
+++ test/shared6.test
@@ -42,8 +42,8 @@
@@ -130,13 +142,3 @@
do_test shared6-1.2.2 {
execsql { SELECT * FROM t1 } db1
} {}
---- test/manydb.test
-+++ test/manydb.test
-@@ -67,6 +67,7 @@
- for {set i 0} {$i<$N} {incr i} {
- do_test manydb-1.$i {
- sqlite3 db$i $dbname($i)
-+ execsql { PRAGMA cache_size=20; } db$i
- execsql {
- CREATE TABLE t1(a,b);
- BEGIN;
diff --git a/lang/sql/adapter/sqlite-patches/10_compile_options.patch b/lang/sql/adapter/sqlite-patches/10_compile_options.patch
index 4338b0e5..f4ddc58b 100644
--- a/lang/sql/adapter/sqlite-patches/10_compile_options.patch
+++ b/lang/sql/adapter/sqlite-patches/10_compile_options.patch
@@ -1,6 +1,6 @@
--- src/ctime.c
+++ src/ctime.c
-@@ -39,6 +39,9 @@ static const char * const azCompileOpt[]
+@@ -39,6 +39,9 @@
#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
"4_BYTE_ALIGNED_MALLOC",
#endif
diff --git a/lang/sql/adapter/sqlite-patches/11_android_shell.patch b/lang/sql/adapter/sqlite-patches/11_android_shell.patch
index e591c11c..278cc8cc 100644
--- a/lang/sql/adapter/sqlite-patches/11_android_shell.patch
+++ b/lang/sql/adapter/sqlite-patches/11_android_shell.patch
@@ -1,7 +1,7 @@
--- src/shell.c
+++ src/shell.c
-@@ -25,6 +25,12 @@
- #include "config.h"
+@@ -28,6 +28,12 @@
+ # define _LARGEFILE_SOURCE 1
#endif
+#ifdef ANDROID
@@ -13,7 +13,7 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
-@@ -1389,6 +1395,13 @@ static void open_db(struct callback_data
+@@ -1617,6 +1623,13 @@
sqlite3_create_function(db, "shellstatic", 0, SQLITE_UTF8, 0,
shellstaticFunc, 0, 0);
}
diff --git a/lang/sql/adapter/sqlite-patches/12_e_fts3_test.patch b/lang/sql/adapter/sqlite-patches/12_e_fts3_test.patch
index 30152d0f..50308e75 100644
--- a/lang/sql/adapter/sqlite-patches/12_e_fts3_test.patch
+++ b/lang/sql/adapter/sqlite-patches/12_e_fts3_test.patch
@@ -1,6 +1,6 @@
--- test/e_fts3.test
+++ test/e_fts3.test
-@@ -509,6 +509,7 @@ error_test 2.1.8 {
+@@ -509,6 +509,7 @@
# of the document examples above.
#
do_malloc_test e_fts3-3 -tclbody {
diff --git a/lang/sql/adapter/sqlite-patches/13_malloc_test.patch b/lang/sql/adapter/sqlite-patches/13_malloc_test.patch
index 1f8982ed..ecac5093 100644
--- a/lang/sql/adapter/sqlite-patches/13_malloc_test.patch
+++ b/lang/sql/adapter/sqlite-patches/13_malloc_test.patch
@@ -1,11 +1,6 @@
--- test/malloc.test
+++ test/malloc.test
-@@ -713,17 +713,17 @@
-
- # After committing a transaction in persistent-journal mode, if a journal
- # size limit is configured SQLite may attempt to truncate the journal file.
- # This test verifies the libraries response to a malloc() failure during
- # this operation.
+@@ -718,7 +718,7 @@
#
do_malloc_test 31 -sqlprep {
PRAGMA journal_mode = persist;
@@ -14,8 +9,3 @@
CREATE TABLE t1(a PRIMARY KEY, b);
} -sqlbody {
INSERT INTO t1 VALUES(1, 2);
- }
-
- # When written, this test provoked an obscure change-counter bug.
- #
- # If, when running in exclusive mode, a malloc() failure occurs
diff --git a/lang/sql/adapter/sqlite-patches/14_custom_pragma.patch b/lang/sql/adapter/sqlite-patches/14_custom_pragma.patch
index b10d312b..2166663a 100644
--- a/lang/sql/adapter/sqlite-patches/14_custom_pragma.patch
+++ b/lang/sql/adapter/sqlite-patches/14_custom_pragma.patch
@@ -7,9 +7,9 @@
- callback.lo complete.lo ctime.lo date.lo delete.lo \
+ callback.lo complete.lo ctime.lo date.lo db_pragma.lo delete.lo \
expr.lo fault.lo fkey.lo \
- fts3.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo fts3_porter.lo \
- fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo fts3_write.lo \
-@@ -207,6 +207,7 @@
+ fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo \
+ fts3_porter.lo fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo \
+@@ -209,6 +209,7 @@
$(TOP)/src/complete.c \
$(TOP)/src/ctime.c \
$(TOP)/src/date.c \
@@ -17,7 +17,7 @@
$(TOP)/src/delete.c \
$(TOP)/src/expr.c \
$(TOP)/src/fault.c \
-@@ -573,6 +574,10 @@
+@@ -615,6 +616,10 @@
date.lo: $(TOP)/src/date.c $(HDR)
$(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/date.c
@@ -30,16 +30,7 @@
--- src/pragma.c
+++ src/pragma.c
-@@ -49,7 +49,7 @@
- /*
- ** Interpret the given string as a boolean value.
- */
--static u8 getBoolean(const char *z){
-+u8 getBoolean(const char *z){
- return getSafetyLevel(z)&1;
- }
-
-@@ -142,7 +142,7 @@
+@@ -601,7 +601,7 @@
/*
** Generate code to return a single integer value.
*/
@@ -48,7 +39,7 @@
Vdbe *v = sqlite3GetVdbe(pParse);
int mem = ++pParse->nMem;
i64 *pI64 = sqlite3DbMallocRaw(pParse->db, sizeof(value));
-@@ -283,6 +283,8 @@
+@@ -687,6 +687,8 @@
return azModeName[eMode];
}
@@ -57,19 +48,21 @@
/*
** Process a pragma statement.
**
-@@ -344,6 +346,9 @@
+@@ -783,6 +785,11 @@
goto pragma_out;
}
-
-+ if( bdbsqlPragma(pParse, zLeft, zRight, iDb)==0 ){
-+ /* Do nothing if this was a Berkeley DB specific pragma. */
-+ }else
- #ifndef SQLITE_OMIT_PAGER_PRAGMAS
- /*
- ** PRAGMA [database.]default_cache_size
+
++ if ( bdbsqlPragma(pParse, zLeft, zRight, iDb)==0 ) {
++ /* Do nothing if this was a Berkeley DB specific pragma. */
++ goto pragma_out;
++ }
++
+ /* Locate the pragma in the lookup table */
+ lwr = 0;
+ upr = ArraySize(aPragmaNames)-1;
--- tool/mksqlite3c.tcl
+++ tool/mksqlite3c.tcl
-@@ -250,6 +250,7 @@
+@@ -269,6 +269,7 @@
btmutex.c
btree.c
backup.c
diff --git a/lang/sql/adapter/sqlite-patches/15_bdb_stat.patch b/lang/sql/adapter/sqlite-patches/15_bdb_stat.patch
index f4533097..84a16105 100644
--- a/lang/sql/adapter/sqlite-patches/15_bdb_stat.patch
+++ b/lang/sql/adapter/sqlite-patches/15_bdb_stat.patch
@@ -8,10 +8,10 @@
- expr.lo fault.lo fkey.lo \
+ callback.lo complete.lo ctime.lo date.lo db_pragma.lo db_shell.lo \
+ delete.lo expr.lo fault.lo fkey.lo \
- fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo fts3_porter.lo \
- fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo fts3_write.lo \
- func.lo global.lo hash.lo \
-@@ -208,6 +208,7 @@ SRC = \
+ fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo \
+ fts3_porter.lo fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo \
+ fts3_tokenize_vtab.lo \
+@@ -210,6 +210,7 @@
$(TOP)/src/ctime.c \
$(TOP)/src/date.c \
$(TOP)/../adapter/db_pragma.c \
@@ -19,7 +19,7 @@
$(TOP)/src/delete.c \
$(TOP)/src/expr.c \
$(TOP)/src/fault.c \
-@@ -583,6 +584,10 @@ db_pragma.lo: $(TOP)/../adapter/db_pragm
+@@ -620,6 +621,10 @@
$(TOP)/../adapter/btreeInt.h
$(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/../adapter/db_pragma.c
@@ -32,7 +32,7 @@
--- src/shell.c
+++ src/shell.c
-@@ -1368,6 +1368,17 @@ static char zHelp[] =
+@@ -1600,6 +1600,17 @@
" LIKE pattern TABLE.\n"
".separator STRING Change separator used by output mode and .import\n"
".show Show the current values for various settings\n"
@@ -50,7 +50,7 @@
".stats ON|OFF Turn stats on or off\n"
".tables ?TABLE? List names of tables\n"
" If TABLE specified, only list tables matching\n"
-@@ -2128,6 +2137,27 @@ static int do_meta_command(char *zLine,
+@@ -2685,6 +2696,27 @@
}
fprintf(p->out,"\n");
}else
@@ -60,7 +60,7 @@
+ extern int bdbSqlEnvStatPrint(sqlite3 *db, FILE *);
+ extern int bdbSqlRepSumStatPrint(sqlite3 *db, FILE *);
+
-+ open_db(p);
++ open_db(p, 0);
+
+ if (nArg == 1 || nArg == 2 && strcmp(azArg[1], ":env:") == 0)
+ rc = bdbSqlEnvStatPrint(p->db, p->out);
@@ -80,7 +80,7 @@
p->statsOn = booleanValue(azArg[1]);
--- tool/mksqlite3c.tcl
+++ tool/mksqlite3c.tcl
-@@ -251,6 +251,7 @@ foreach file {
+@@ -270,6 +270,7 @@
btree.c
backup.c
db_pragma.c
diff --git a/lang/sql/adapter/sqlite-patches/16_bdb_deadlock.patch b/lang/sql/adapter/sqlite-patches/16_bdb_deadlock.patch
index 21f57b1c..52771eb2 100644
--- a/lang/sql/adapter/sqlite-patches/16_bdb_deadlock.patch
+++ b/lang/sql/adapter/sqlite-patches/16_bdb_deadlock.patch
@@ -1,6 +1,6 @@
--- src/vdbe.c
+++ src/vdbe.c
-@@ -1112,7 +1112,10 @@ case OP_ResultRow: {
+@@ -1215,7 +1215,10 @@
** The statement transaction is never a top-level transaction. Hence
** the RELEASE call below can never fail.
*/
@@ -12,22 +12,16 @@
rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE);
if( NEVER(rc!=SQLITE_OK) ){
break;
-@@ -2786,9 +2781,12 @@ case OP_Transaction: {
+@@ -3001,7 +3004,7 @@
goto abort_due_to_error;
}
- if( pOp->p2 && p->usesStmtJournal
-+ /*if( pOp->p2 && p->usesStmtJournal
- && (db->autoCommit==0 || db->activeVdbeCnt>1)
-- ){
-+ ){*/
-+ /* In Berkeley DB create a statement transaction for every update
-+ * statement. BDB */
-+ if ( pOp->p2 && (db->autoCommit==0 || db->activeVdbeCnt>1)) {
++ if( pOp->p2 && (!db->aVTrans || p->usesStmtJournal)
+ && (db->autoCommit==0 || db->nVdbeRead>1)
+ ){
assert( sqlite3BtreeIsInTrans(pBt) );
- if( p->iStatement==0 ){
- assert( db->nStatement>=0 && db->nSavepoint>=0 );
-@@ -5907,6 +5913,14 @@ vdbe_error_halt:
+@@ -6273,6 +6276,14 @@
testcase( sqlite3GlobalConfig.xLog!=0 );
sqlite3_log(rc, "statement aborts at %d: [%s] %s",
pc, p->zSql, p->zErrMsg);
@@ -44,7 +38,7 @@
rc = SQLITE_ERROR;
--- src/vdbeblob.c
+++ src/vdbeblob.c
-@@ -155,6 +155,7 @@ int sqlite3_blob_open(
+@@ -156,6 +156,7 @@
Table *pTab;
Parse *pParse = 0;
Incrblob *pBlob = 0;
@@ -52,7 +46,7 @@
flags = !!flags; /* flags = (flags ? 1 : 0); */
*ppBlob = 0;
-@@ -254,7 +255,7 @@ int sqlite3_blob_open(
+@@ -259,7 +260,7 @@
assert( pBlob->pStmt || db->mallocFailed );
if( pBlob->pStmt ){
Vdbe *v = (Vdbe *)pBlob->pStmt;
@@ -61,9 +55,9 @@
sqlite3VdbeAddOpList(v, sizeof(openBlob)/sizeof(VdbeOpList), openBlob);
-@@ -298,6 +299,10 @@ int sqlite3_blob_open(
- if( !db->mallocFailed ){
- sqlite3VdbeMakeReady(v, 1, 1, 1, 0, 0, 0);
+@@ -307,6 +308,10 @@
+ pParse->nTab = 1;
+ sqlite3VdbeMakeReady(v, pParse);
}
+ /* This will prevent the statement transaction from being committed,
+ * which would invalidate the incrblob cursor. BDB */
diff --git a/lang/sql/adapter/sqlite-patches/17_encryption.patch b/lang/sql/adapter/sqlite-patches/17_encryption.patch
index edf097f3..7f9404dc 100644
--- a/lang/sql/adapter/sqlite-patches/17_encryption.patch
+++ b/lang/sql/adapter/sqlite-patches/17_encryption.patch
@@ -8,10 +8,10 @@
- delete.lo expr.lo fault.lo fkey.lo \
+ callback.lo complete.lo ctime.lo date.lo db_encrypt.lo db_pragma.lo \
+ db_shell.lo delete.lo expr.lo fault.lo fkey.lo \
- fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo fts3_porter.lo \
- fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo fts3_write.lo \
- func.lo global.lo hash.lo \
-@@ -207,6 +208,7 @@
+ fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo \
+ fts3_porter.lo fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo \
+ fts3_tokenize_vtab.lo \
+@@ -209,6 +209,7 @@
$(TOP)/src/complete.c \
$(TOP)/src/ctime.c \
$(TOP)/src/date.c \
@@ -19,7 +19,7 @@
$(TOP)/../adapter/db_pragma.c \
$(TOP)/../adapter/db_shell.c \
$(TOP)/src/delete.c \
-@@ -580,6 +581,10 @@
+@@ -617,6 +618,10 @@
date.lo: $(TOP)/src/date.c $(HDR)
$(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/date.c
@@ -32,17 +32,7 @@
$(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/../adapter/db_pragma.c
--- src/tclsqlite.c
+++ src/tclsqlite.c
-@@ -2864,6 +2864,9 @@
- const char *zFile;
- const char *zVfs = 0;
- int flags;
-+#ifdef SQLITE_HAS_CODEC
-+ int rc;
-+#endif
- Tcl_DString translatedFilename;
-
- /* In normal use, each TCL interpreter runs in a single thread. So
-@@ -2968,7 +2971,13 @@
+@@ -3036,7 +3036,13 @@
}
#ifdef SQLITE_HAS_CODEC
if( p->db ){
@@ -59,7 +49,7 @@
if( p->db==0 ){
--- test/vtab1.test
+++ test/vtab1.test
-@@ -849,8 +849,12 @@
+@@ -879,8 +879,12 @@
ifcapable attach {
do_test vtab1.8-1 {
set echo_module ""
@@ -75,7 +65,7 @@
set echo_module
--- tool/mksqlite3c.tcl
+++ tool/mksqlite3c.tcl
-@@ -250,6 +250,7 @@
+@@ -269,6 +269,7 @@
btmutex.c
btree.c
backup.c
diff --git a/lang/sql/adapter/sqlite-patches/18_vacuum_test.patch b/lang/sql/adapter/sqlite-patches/18_vacuum_test.patch
index d276e41e..a035b72c 100644
--- a/lang/sql/adapter/sqlite-patches/18_vacuum_test.patch
+++ b/lang/sql/adapter/sqlite-patches/18_vacuum_test.patch
@@ -17,9 +17,9 @@
+# DBSQL does not do "db2 close" here because incrvacuum-12.2 is excluded
+# so db2 is not opended.
db close
- file delete -force test.db test.db-journal
+ forcedelete test.db test.db-journal
sqlite3 db test.db ; set ::DB [sqlite3_connection_pointer db]
-@@ -736,8 +737,8 @@
+@@ -736,8 +739,8 @@
catchsql {
PRAGMA incremental_vacuum(10);
} db3
@@ -31,7 +31,7 @@
do_test incrvacuum-15.1 {
--- test/vacuum4.test
+++ test/vacuum4.test
-@@ -65,3 +65,5 @@ do_test vacuum4-1.1 {
+@@ -65,3 +65,5 @@
VACUUM;
}
} {}
diff --git a/lang/sql/adapter/sqlite-patches/19_backup_test.patch b/lang/sql/adapter/sqlite-patches/19_backup_test.patch
index 7156d333..5bf83f3c 100644
--- a/lang/sql/adapter/sqlite-patches/19_backup_test.patch
+++ b/lang/sql/adapter/sqlite-patches/19_backup_test.patch
@@ -1,5 +1,14 @@
--- test/backup.test
+++ test/backup.test
+@@ -73,7 +73,7 @@
+ # Check that it is possible to create and finish backup operations.
+ #
+ do_test backup-1.3.1 {
+- delete_file test2.db
++ forcedelete test2.db
+ sqlite3 db2 test2.db
+ sqlite3_backup B db2 main db main
+ } {B}
@@ -164,12 +164,12 @@
set file_dest temp
}] {
@@ -9,10 +18,10 @@
foreach nPagePerStep {1 200} {
# Open the databases.
-- catch { file delete test.db }
-- catch { file delete test2.db }
-+ catch { file delete -force -- test.db }
-+ catch { file delete -force -- test2.db }
+- catch { delete_file test.db }
+- catch { delete_file test2.db }
++ catch { forcedelete test.db }
++ catch { forcedelete test2.db }
eval $zOpenScript
# Set to true if copying to an in-memory destination. Copying to an
@@ -35,11 +44,10 @@
CREATE TABLE ${file_dest}.t1(a, b);
CREATE INDEX ${file_dest}.i1 ON t1(a, b);
" $db_dest
-@@ -244,119 +245,6 @@
- # End of backup-2.* tests.
+@@ -245,120 +246,6 @@
#---------------------------------------------------------------------
--#---------------------------------------------------------------------
+ #---------------------------------------------------------------------
-# These tests, backup-3.*, ensure that nothing goes wrong if either
-# the source or destination database are large enough to include the
-# the locking-page (the page that contains the range of bytes that
@@ -74,8 +82,8 @@
-foreach nDestRow {10 100} {
-foreach nDestPgsz {512 1024 2048 4096} {
-
-- catch { file delete test.db }
-- catch { file delete test2.db }
+- catch { delete_file test.db }
+- catch { delete_file test2.db }
- sqlite3 db test.db
- sqlite3 db2 test2.db
-
@@ -122,8 +130,8 @@
-
-#--------------------------------------------------------------------
-do_test backup-3.$iTest.1 {
-- catch { file delete -force test.db }
-- catch { file delete -force test2.db }
+- catch { forcedelete test.db }
+- catch { forcedelete test2.db }
- sqlite3 db test.db
- set iTab 1
-
@@ -152,34 +160,27 @@
-# End of backup-3.* tests.
-#---------------------------------------------------------------------
-
-
- #---------------------------------------------------------------------
+-
+-#---------------------------------------------------------------------
# The following tests, backup-4.*, test various error conditions:
-@@ -439,7 +327,7 @@
- db2 close
-
- do_test backup-4.5.1 {
-- catch { file delete -force test.db }
-+ catch { file delete -force -- test.db }
- sqlite3 db test.db
- sqlite3 db2 :memory:
- execsql {
-@@ -492,11 +380,11 @@
+ #
+ # backup-4.1.*: Test invalid database names.
+@@ -492,11 +379,11 @@
#
set iTest 0
- file delete -force bak.db-wal
+ forcedelete bak.db-wal
-foreach {writer file} {db test.db db3 test.db db :memory:} {
+foreach {writer file} {db test.db db :memory:} {
incr iTest
-- catch { file delete bak.db }
-+ catch { file delete -force bak.db }
+- catch { delete_file bak.db }
++ catch { forcedelete bak.db }
sqlite3 db2 bak.db
-- catch { file delete $file }
-+ catch { file delete -force $file }
+- catch { delete_file $file }
++ catch { forcedelete $file }
sqlite3 db $file
sqlite3 db3 $file
-@@ -520,7 +408,7 @@
+@@ -520,7 +407,7 @@
} {SQLITE_OK}
do_test backup-5.$iTest.1.3 {
execsql { UPDATE t1 SET a = a + 1 } $writer
@@ -188,19 +189,19 @@
} {SQLITE_DONE}
do_test backup-5.$iTest.1.4 {
B finish
-@@ -597,9 +485,9 @@
+@@ -597,9 +484,9 @@
catch {db close}
catch {db2 close}
catch {db3 close}
-- catch { file delete bak.db }
-+ catch { file delete -force -- bak.db }
+- catch { delete_file bak.db }
++ catch { forcedelete bak.db }
sqlite3 db2 bak.db
-- catch { file delete $file }
-+ catch { file delete -force -- $file }
+- catch { delete_file $file }
++ catch { forcedelete $file }
sqlite3 db $file
sqlite3 db3 $file
do_test backup-5.$iTest.5.1 {
-@@ -631,7 +519,6 @@
+@@ -631,7 +518,6 @@
B finish
} {SQLITE_OK}
integrity_check backup-5.$iTest.5.5 db2
@@ -208,32 +209,15 @@
catch {db close}
catch {db2 close}
catch {db3 close}
-@@ -644,8 +531,8 @@
- # Test the sqlite3_backup_remaining() and backup_pagecount() APIs.
- #
- do_test backup-6.1 {
-- catch { file delete -force test.db }
-- catch { file delete -force test2.db }
-+ catch { file delete -force -- test.db }
-+ catch { file delete -force -- test2.db }
- sqlite3 db test.db
- sqlite3 db2 test2.db
- execsql {
-@@ -701,10 +588,11 @@
- # backup-7.3.*: Destination database is externally locked (return SQLITE_BUSY).
- #
- do_test backup-7.0 {
-- catch { file delete -force test.db }
-- catch { file delete -force test2.db }
-+ catch { file delete -force -- test.db }
-+ catch { file delete -force -- test2.db }
+@@ -705,6 +591,7 @@
+ catch { forcedelete test2.db }
sqlite3 db2 test2.db
sqlite3 db test.db
+ sqlite3 db3 test.db
execsql {
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
-@@ -723,24 +611,12 @@
+@@ -723,24 +610,12 @@
sqlite3_backup B db2 main db main
B step 5
} {SQLITE_OK}
@@ -258,20 +242,13 @@
do_test backup-7.2.3 {
execsql { ROLLBACK }
B step 5000
-@@ -754,17 +630,17 @@
- do_test backup-7.3.1 {
- db2 close
- db3 close
-- file delete -force test2.db
-+ file delete -force -- test2.db
- sqlite3 db2 test2.db
+@@ -759,12 +634,12 @@
sqlite3 db3 test2.db
sqlite3_backup B db2 main db main
- execsql { BEGIN ; CREATE TABLE t2(a, b); } db3
--
+ execsql { BEGIN ; CREATE TABLE t2(a, b); COMMIT; } db3
-+
+
B step 5
} {SQLITE_BUSY}
do_test backup-7.3.2 {
@@ -280,7 +257,7 @@
B step 5000
} {SQLITE_DONE}
do_test backup-7.3.3 {
-@@ -773,7 +649,6 @@
+@@ -773,7 +648,6 @@
test_contents backup-7.3.4 db main db2 main
integrity_check backup-7.3.5 db2
catch { db2 close }
@@ -288,33 +265,10 @@
#-----------------------------------------------------------------------
# The following tests, backup-8.*, test attaching multiple backup
-@@ -783,8 +658,8 @@
- # These tests reuse the database "test.db" left over from backup-7.*.
- #
- do_test backup-8.1 {
-- catch { file delete -force test2.db }
-- catch { file delete -force test3.db }
-+ catch { file delete -force -- test2.db }
-+ catch { file delete -force -- test3.db }
- sqlite3 db2 test2.db
- sqlite3 db3 test3.db
-
-@@ -865,8 +740,8 @@
-
- ifcapable memorymanage {
- db close
-- file delete -force test.db
-- file delete -force bak.db
-+ file delete -force -- test.db
-+ file delete -force -- bak.db
-
- sqlite3 db test.db
- sqlite3 db2 test.db
-@@ -915,17 +790,24 @@
- # used as the source by a backup operation:
+@@ -916,16 +790,24 @@
#
# 10.1.*: If the db is in-memory, the backup is restarted.
--# 10.2.*: If the db is a file, the backup is not restarted.
+ # 10.2.*: If the db is a file, the backup is not restarted.
+# 10.2.*: If the db is a file, the backup is restarted.
+# 10.3.*: If the db is in-memory, and not updated, the backup is not
+# restarted
@@ -322,13 +276,12 @@
+# restarted
#
db close
--file delete -force test.db test.db-journal
+ forcedelete test.db test.db-journal
-foreach {tn file rc} {
- 1 test.db SQLITE_DONE
- 2 :memory: SQLITE_OK
-+file delete -force -- test.db test.db-journal
+foreach {tn file update rc} {
-+ 1 test.db 1 SQLITE_OK
++ 1 test.db 1 SQLITE_DONE
+ 2 :memory: 1 SQLITE_OK
+ 1 test.db 0 SQLITE_DONE
+ 2 :memory: 0 SQLITE_DONE
@@ -340,7 +293,7 @@
CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB);
BEGIN;
INSERT INTO t1 VALUES(NULL, randomblob(200));
-@@ -942,21 +824,18 @@
+@@ -942,11 +824,6 @@
}
} {256}
@@ -350,10 +303,9 @@
- } {1}
-
do_test backup-10.$tn.3 {
-- file delete -force bak.db bak.db-journal
-+ file delete -force -- bak.db bak.db-journal
+ forcedelete bak.db bak.db-journal
sqlite3 db2 bak.db
- sqlite3_backup B db2 main db main
+@@ -954,9 +831,11 @@
B step 50
} {SQLITE_OK}
@@ -368,4 +320,3 @@
do_test backup-10.$tn.5 {
B step 50
-
diff --git a/lang/sql/adapter/sqlite-patches/22_unique_key.patch b/lang/sql/adapter/sqlite-patches/22_unique_key.patch
deleted file mode 100644
index 426246b1..00000000
--- a/lang/sql/adapter/sqlite-patches/22_unique_key.patch
+++ /dev/null
@@ -1,79 +0,0 @@
---- src/sqliteInt.h
-+++ src/sqliteInt.h
-@@ -1414,7 +1414,6 @@
- KeyInfo *pKeyInfo; /* Collation and sort-order information */
- u16 nField; /* Number of entries in apMem[] */
- u16 flags; /* Boolean settings. UNPACKED_... below */
-- i64 rowid; /* Used by UNPACKED_PREFIX_SEARCH */
- Mem *aMem; /* Values */
- };
-
-@@ -1426,7 +1425,6 @@
- #define UNPACKED_IGNORE_ROWID 0x0004 /* Ignore trailing rowid on key1 */
- #define UNPACKED_INCRKEY 0x0008 /* Make this key an epsilon larger */
- #define UNPACKED_PREFIX_MATCH 0x0010 /* A prefix match is considered OK */
--#define UNPACKED_PREFIX_SEARCH 0x0020 /* A prefix match is considered OK */
-
- /*
- ** Each SQL index is represented in memory by an
---- src/vdbe.c
-+++ src/vdbe.c
-@@ -3526,6 +3526,7 @@
- Mem *aMx;
- UnpackedRecord r; /* B-Tree index search key */
- i64 R; /* Rowid stored in register P3 */
-+ i64 rowid; /* Rowid found */
-
- pIn3 = &aMem[pOp->p3];
- aMx = &aMem[pOp->p4.i];
-@@ -3555,8 +3556,8 @@
- if( pCrsr!=0 ){
- /* Populate the index search key. */
- r.pKeyInfo = pCx->pKeyInfo;
-- r.nField = nField + 1;
-- r.flags = UNPACKED_PREFIX_SEARCH;
-+ r.nField = nField;
-+ r.flags = UNPACKED_PREFIX_MATCH;
- r.aMem = aMx;
- #ifdef SQLITE_DEBUG
- { int i; for(i=0; i<r.nField; i++) assert( memIsValid(&r.aMem[i]) ); }
-@@ -3570,10 +3571,14 @@
- ** to P2. Otherwise, copy the rowid of the conflicting record to
- ** register P3 and fall through to the next instruction. */
- rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &pCx->seekResult);
-- if( (r.flags & UNPACKED_PREFIX_SEARCH) || r.rowid==R ){
-- pc = pOp->p2 - 1;
-- }else{
-- pIn3->u.i = r.rowid;
-+ if( rc != SQLITE_OK || pCx->seekResult != 0 ){
-+ pc = pOp->p2 - 1;
-+ }else{
-+ rc = sqlite3VdbeIdxRowid(db, pCrsr, &rowid);
-+ if (rc == SQLITE_OK && rowid == R)
-+ pc = pOp->p2 - 1;
-+ else
-+ pIn3->u.i = rowid;
- }
- }
- break;
---- src/vdbeaux.c
-+++ src/vdbeaux.c
-@@ -2916,18 +2916,6 @@
- rc = -rc;
- }
-
-- /* If the PREFIX_SEARCH flag is set and all fields except the final
-- ** rowid field were equal, then clear the PREFIX_SEARCH flag and set
-- ** pPKey2->rowid to the value of the rowid field in (pKey1, nKey1).
-- ** This is used by the OP_IsUnique opcode.
-- */
-- if( (pPKey2->flags & UNPACKED_PREFIX_SEARCH) && i==(pPKey2->nField-1) ){
-- assert( idx1==szHdr1 && rc );
-- assert( mem1.flags & MEM_Int );
-- pPKey2->flags &= ~UNPACKED_PREFIX_SEARCH;
-- pPKey2->rowid = mem1.u.i;
-- }
--
- return rc;
- }
- i++;
diff --git a/lang/sql/adapter/sqlite-patches/23_sequence_functions.patch b/lang/sql/adapter/sqlite-patches/23_sequence_functions.patch
index 71480c0e..2d3c6b81 100644
--- a/lang/sql/adapter/sqlite-patches/23_sequence_functions.patch
+++ b/lang/sql/adapter/sqlite-patches/23_sequence_functions.patch
@@ -1,30 +1,15 @@
--- Makefile.in
+++ Makefile.in
-@@ -159,17 +159,17 @@
-
- USE_AMALGAMATION = @USE_AMALGAMATION@
-
- # Object files for the SQLite library (non-amalgamation).
- #
+@@ -164,7 +164,7 @@
LIBOBJS0 = alter.lo analyze.lo attach.lo auth.lo \
backup.lo bitvec.lo btmutex.lo btree.lo build.lo \
callback.lo complete.lo ctime.lo date.lo db_encrypt.lo db_pragma.lo \
- db_shell.lo delete.lo expr.lo fault.lo fkey.lo \
+ db_sequence.lo db_shell.lo delete.lo expr.lo fault.lo fkey.lo \
- fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo fts3_porter.lo \
- fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo fts3_write.lo \
- func.lo global.lo hash.lo \
- icu.lo insert.lo journal.lo legacy.lo loadext.lo \
- main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \
- memjournal.lo \
- mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \
- notify.lo opcodes.lo os.lo os_os2.lo os_unix.lo os_win.lo \
-@@ -204,16 +204,17 @@
- $(TOP)/../adapter/btreeInt.h \
- $(TOP)/src/build.c \
- $(TOP)/src/callback.c \
- $(TOP)/src/complete.c \
- $(TOP)/src/ctime.c \
+ fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo \
+ fts3_porter.lo fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo \
+ fts3_tokenize_vtab.lo \
+@@ -211,6 +211,7 @@
$(TOP)/src/date.c \
$(TOP)/../adapter/db_encrypt.c \
$(TOP)/../adapter/db_pragma.c \
@@ -32,17 +17,7 @@
$(TOP)/../adapter/db_shell.c \
$(TOP)/src/delete.c \
$(TOP)/src/expr.c \
- $(TOP)/src/fault.c \
- $(TOP)/src/fkey.c \
- $(TOP)/src/func.c \
- $(TOP)/src/global.c \
- $(TOP)/src/hash.c \
-@@ -381,16 +382,17 @@
-
- # Source code to the library files needed by the test fixture
- #
- TESTSRC2 = \
- $(TOP)/src/attach.c \
+@@ -410,6 +411,7 @@
$(TOP)/../adapter/backup.c \
$(TOP)/src/bitvec.c \
$(TOP)/../adapter/btree.c \
@@ -50,17 +25,7 @@
$(TOP)/src/build.c \
$(TOP)/src/ctime.c \
$(TOP)/src/date.c \
- $(TOP)/src/expr.c \
- $(TOP)/src/func.c \
- $(TOP)/src/insert.c \
- $(TOP)/../adapter/wal.c \
- $(TOP)/src/mem5.c \
-@@ -579,16 +581,20 @@
- db_encrypt.lo: $(TOP)/../adapter/db_encrypt.c $(HDR) \
- $(TOP)/../adapter/btreeInt.h
- $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/../adapter/db_encrypt.c
-
- db_pragma.lo: $(TOP)/../adapter/db_pragma.c $(HDR) \
+@@ -626,6 +628,10 @@
$(TOP)/../adapter/btreeInt.h
$(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/../adapter/db_pragma.c
@@ -71,11 +36,6 @@
db_shell.lo: $(TOP)/../adapter/db_shell.c $(HDR) \
$(TOP)/../adapter/btreeInt.h
$(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/../adapter/db_shell.c
-
- delete.lo: $(TOP)/src/delete.c $(HDR)
- $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/delete.c
-
- expr.lo: $(TOP)/src/expr.c $(HDR)
--- src/main.c
+++ src/main.c
@@ -55,6 +55,8 @@
@@ -87,10 +47,10 @@
#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE)
/*
** If the following function pointer is not NULL and if
-@@ -1854,6 +1856,13 @@
+@@ -2532,6 +2534,13 @@
db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 0);
assert( db->pDfltColl!=0 );
-
+
+ /*
+ ** Berkley DB customization!
+ ** Register any Berkeley DB specific extension functions.
@@ -99,25 +59,15 @@
+ /* End Berkeley DB customization. */
+
/* Also add a UTF-8 case-insensitive collation sequence. */
- createCollation(db, "NOCASE", SQLITE_UTF8, SQLITE_COLL_NOCASE, 0,
- nocaseCollatingFunc, 0);
+ createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0);
+
--- tool/mksqlite3c.tcl
+++ tool/mksqlite3c.tcl
-@@ -247,16 +247,17 @@
- pager.c
- wal.c
-
- btmutex.c
- btree.c
+@@ -271,6 +271,7 @@
backup.c
db_encrypt.c
db_pragma.c
+ db_sequence.c
db_shell.c
-
+
vdbemem.c
- vdbeaux.c
- vdbeapi.c
- vdbetrace.c
- vdbe.c
- vdbeblob.c
diff --git a/lang/sql/adapter/sqlite-patches/24_exclusive_error_handling.patch b/lang/sql/adapter/sqlite-patches/24_exclusive_error_handling.patch
index edaaaa4b..1d66bc0b 100644
--- a/lang/sql/adapter/sqlite-patches/24_exclusive_error_handling.patch
+++ b/lang/sql/adapter/sqlite-patches/24_exclusive_error_handling.patch
@@ -1,11 +1,6 @@
--- src/prepare.c
+++ src/prepare.c
-@@ -242,16 +242,20 @@
- ** meta[8] unused
- ** meta[9] unused
- **
- ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to
- ** the possible values of meta[4].
+@@ -249,6 +249,10 @@
*/
for(i=0; i<ArraySize(meta); i++){
sqlite3BtreeGetMeta(pDb->pBt, i+1, (u32 *)&meta[i]);
@@ -16,41 +11,21 @@
}
pDb->pSchema->schema_cookie = meta[BTREE_SCHEMA_VERSION-1];
- /* If opening a non-empty database, check the text encoding. For the
- ** main database, set sqlite3.enc to the encoding of the main database.
- ** For an attached db, it is an error if the encoding is not the same
- ** as sqlite3.enc.
- */
-@@ -466,17 +470,20 @@
- openedTransaction = 1;
- }
-
- /* Read the schema cookie from the database. If it does not match the
- ** value stored as part of the in-memory schema representation,
+@@ -480,7 +484,10 @@
** set Parse.rc to SQLITE_SCHEMA. */
sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie);
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+- if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){
+ if( pParse->rc == SQLITE_OK && db->errCode == SQLITE_BUSY )
+ pParse->rc = db->errCode;
+ if( pParse->rc != SQLITE_BUSY &&
+ cookie!=db->aDb[iDb].pSchema->schema_cookie ){
-- if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){
- sqlite3ResetInternalSchema(db, iDb);
+ sqlite3ResetOneSchema(db, iDb);
pParse->rc = SQLITE_SCHEMA;
}
-
- /* Close the transaction, if one was opened. */
- if( openedTransaction ){
- sqlite3BtreeCommit(pBt);
- }
--- src/vdbe.c
+++ src/vdbe.c
-@@ -2858,16 +2858,18 @@
- iDb = pOp->p1;
- iCookie = pOp->p3;
- assert( pOp->p3<SQLITE_N_BTREE_META );
- assert( iDb>=0 && iDb<db->nDb );
- assert( db->aDb[iDb].pBt!=0 );
+@@ -3055,6 +3055,8 @@
assert( (p->btreeMask & (((yDbMask)1)<<iDb))!=0 );
sqlite3BtreeGetMeta(db->aDb[iDb].pBt, iCookie, (u32 *)&iMeta);
@@ -59,17 +34,7 @@
pOut->u.i = iMeta;
break;
}
-
- /* Opcode: SetCookie P1 P2 P3 * *
- **
- ** Write the content of register P3 (interpreted as an integer)
- ** into cookie number P2 of database P1. P2==1 is the schema version.
-@@ -2930,16 +2932,20 @@
- Btree *pBt;
-
- assert( pOp->p1>=0 && pOp->p1<db->nDb );
- assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
- assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) );
+@@ -3129,6 +3131,10 @@
pBt = db->aDb[pOp->p1].pBt;
if( pBt ){
sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta);
@@ -80,8 +45,3 @@
iGen = db->aDb[pOp->p1].pSchema->iGeneration;
}else{
iGen = iMeta = 0;
- }
- if( iMeta!=pOp->p2 || iGen!=pOp->p3 ){
- sqlite3DbFree(db, p->zErrMsg);
- p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed");
- /* If the schema-cookie from the database file matches the cookie
diff --git a/lang/sql/adapter/sqlite-patches/25_tester.patch b/lang/sql/adapter/sqlite-patches/25_tester.patch
index fe4a9e04..73308e98 100644
--- a/lang/sql/adapter/sqlite-patches/25_tester.patch
+++ b/lang/sql/adapter/sqlite-patches/25_tester.patch
@@ -1,43 +1,244 @@
--- test/tester.tcl
+++ test/tester.tcl
-@@ -1441,6 +1441,7 @@
- set f [string range $f2 3 end]
- file copy -force $f2 $f
+@@ -138,7 +138,7 @@
+ # NOTE: Return the default number of retries for [file] operations. A
+ # value of zero or less here means "disabled".
+ #
+- return [expr {$::tcl_platform(platform) eq "windows" ? 50 : 0}]
++ return [expr {$::tcl_platform(platform) eq "windows" ? 10 : 0}]
}
-+ file delete test.db-journal/__db.register
+ return $::G(file-retries)
}
- proc db_restore_and_reopen {{dbfile test.db}} {
- catch { db close }
-@@ -1449,7 +1450,7 @@
+@@ -500,6 +500,9 @@
+ forcedelete test.db-journal
+ forcedelete test.db-wal
+ sqlite3 db ./test.db
++ if {[ array names ::env BDB_BLOB_SETTING ] != "" } {
++ db eval "pragma large_record_opt=$::env(BDB_BLOB_SETTING)"
++ }
+ set ::DB [sqlite3_connection_pointer db]
+ if {[info exists ::SETUP_SQL]} {
+ db eval $::SETUP_SQL
+@@ -534,6 +537,11 @@
+ }
}
- proc db_delete_and_reopen {{file test.db}} {
- catch { db close }
-- foreach f [glob -nocomplain test.db*] { file delete -force $f }
-+ foreach f [glob -nocomplain test.db*] { forcedelete $f }
- sqlite3 db $file
+
++# Pull in the list of test cases that are excluded and ignored when running
++# with Berkeley DB.
++#
++source $testdir/../../../../test/sql/bdb_excl.test
++
+ # Record the fact that a sequence of tests were omitted.
+ #
+ proc omit_test {name reason {append 1}} {
+@@ -581,12 +589,20 @@
+ # Invoke the do_test procedure to run a single test
+ #
+ proc do_test {name cmd expected} {
+- global argv cmdlinearg
++ global argv cmdlinearg IGNORE_CASES EXCLUDE_CASES
+
+ fix_testname name
+
+ sqlite3_memdebug_settitle $name
+
++ foreach pattern $EXCLUDE_CASES {
++ if {[string match $pattern $name]} {
++ puts "$name... Skipping"
++ flush stdout
++ return
++ }
++ }
++
+ # if {[llength $argv]==0} {
+ # set go 1
+ # } else {
+@@ -650,11 +666,19 @@
+ set ok [expr {[string compare $result $expected]==0}]
+ }
+ if {!$ok} {
+- # if {![info exists ::testprefix] || $::testprefix eq ""} {
+- # error "no test prefix"
+- # }
+- puts "\nExpected: \[$expected\]\n Got: \[$result\]"
+- fail_test $name
++ set ignore 0
++ foreach pattern $IGNORE_CASES {
++ if {[string match $pattern $name]} {
++ set ignore 1
++ break
++ }
++ }
++ if {$ignore} {
++ puts " Ignored"
++ } else {
++ puts "\nExpected: \[$expected\]\n Got: \[$result\]"
++ fail_test $name
++ }
+ } else {
+ puts " Ok"
+ }
+@@ -1224,12 +1248,6 @@
+ return $ret
+ }
+
+-# Returns non-zero if the capabilities are present; zero otherwise.
+-#
+-proc capable {expr} {
+- set e [fix_ifcapable_expr $expr]; return [expr ($e)]
+-}
+-
+ # Evaluate a boolean expression of capabilities. If true, execute the
+ # code. Omit the code if false.
+ #
+@@ -1340,25 +1358,6 @@
+ lappend r $msg
}
---- test/permutations.test
-+++ test/permutations.test
-@@ -171,4 +171,9 @@
+-proc run_ioerr_prep {} {
+- set ::sqlite_io_error_pending 0
+- catch {db close}
+- catch {db2 close}
+- catch {forcedelete test.db}
+- catch {forcedelete test.db-journal}
+- catch {forcedelete test2.db}
+- catch {forcedelete test2.db-journal}
+- set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db]
+- sqlite3_extended_result_codes $::DB $::ioerropts(-erc)
+- if {[info exists ::ioerropts(-tclprep)]} {
+- eval $::ioerropts(-tclprep)
+- }
+- if {[info exists ::ioerropts(-sqlprep)]} {
+- execsql $::ioerropts(-sqlprep)
+- }
+- expr 0
+-}
+-
+ # Usage: do_ioerr_test <test number> <options...>
+ #
+ # This proc is used to implement test cases that check that IO errors
+@@ -1392,25 +1392,9 @@
+ # a couple of obscure IO errors that do not return them.
+ set ::ioerropts(-erc) 0
+
+- # Create a single TCL script from the TCL and SQL specified
+- # as the body of the test.
+- set ::ioerrorbody {}
+- if {[info exists ::ioerropts(-tclbody)]} {
+- append ::ioerrorbody "$::ioerropts(-tclbody)\n"
+- }
+- if {[info exists ::ioerropts(-sqlbody)]} {
+- append ::ioerrorbody "db eval {$::ioerropts(-sqlbody)}"
+- }
+-
+- save_prng_state
+- if {$::ioerropts(-cksum)} {
+- run_ioerr_prep
+- eval $::ioerrorbody
+- set ::goodcksum [cksum]
+- }
+-
+ set ::go 1
+ #reset_prng_state
++ save_prng_state
+ for {set n $::ioerropts(-start)} {$::go} {incr n} {
+ set ::TN $n
+ incr ::ioerropts(-count) -1
+@@ -1427,12 +1410,27 @@
+ # Delete the files test.db and test2.db, then execute the TCL and
+ # SQL (in that order) to prepare for the test case.
+ do_test $testname.$n.1 {
+- run_ioerr_prep
++ set ::sqlite_io_error_pending 0
++ catch {db close}
++ catch {db2 close}
++ catch {forcedelete test.db}
++ catch {forcedelete test.db-journal}
++ catch {forcedelete test2.db}
++ catch {forcedelete test2.db-journal}
++ set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db]
++ sqlite3_extended_result_codes $::DB $::ioerropts(-erc)
++ if {[info exists ::ioerropts(-tclprep)]} {
++ eval $::ioerropts(-tclprep)
++ }
++ if {[info exists ::ioerropts(-sqlprep)]} {
++ execsql $::ioerropts(-sqlprep)
++ }
++ expr 0
+ } {0}
+
+ # Read the 'checksum' of the database.
+ if {$::ioerropts(-cksum)} {
+- set ::checksum [cksum]
++ set checksum [cksum]
+ }
+
+ # Set the Nth IO error to fail.
+@@ -1440,10 +1438,20 @@
+ set ::sqlite_io_error_persist $::ioerropts(-persist)
+ set ::sqlite_io_error_pending $n
+ }] $n
++
++ # Create a single TCL script from the TCL and SQL specified
++ # as the body of the test.
++ set ::ioerrorbody {}
++ if {[info exists ::ioerropts(-tclbody)]} {
++ append ::ioerrorbody "$::ioerropts(-tclbody)\n"
++ }
++ if {[info exists ::ioerropts(-sqlbody)]} {
++ append ::ioerrorbody "db eval {$::ioerropts(-sqlbody)}"
++ }
+
+- # Execute the TCL script created for the body of this test. If
+- # at least N IO operations performed by SQLite as a result of
+- # the script, the Nth will fail.
++ # Execute the TCL Script created in the above block. If
++ # there are at least N IO operations performed by SQLite as
++ # a result of the script, the Nth will fail.
+ do_test $testname.$n.3 {
+ set ::sqlite_io_error_hit 0
+ set ::sqlite_io_error_hardhit 0
+@@ -1547,15 +1555,8 @@
+ catch {db close}
+ catch {db2 close}
+ set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db]
+- set nowcksum [cksum]
+- set res [expr {$nowcksum==$::checksum || $nowcksum==$::goodcksum}]
+- if {$res==0} {
+- puts "now=$nowcksum"
+- puts "the=$::checksum"
+- puts "fwd=$::goodcksum"
+- }
+- set res
+- } 1
++ cksum
++ } $checksum
+ }
+
+ set ::sqlite_io_error_hardhit 0
+@@ -1747,6 +1748,7 @@
+ $db eval { SELECT * FROM sqlite_master }
+ do_test $testname [list $db eval "PRAGMA main.journal_mode"] {wal}
+ }
++ forcedelete test.db-journal/__db.register
}
-+# We exlcude below fts3 tests:
-+# fts3defer.test, fts3defer2.test
-+# -- Known difference of zeroblob between SQLite and BDBSQL. #19764
-+# fts3fault.test
-+# -- Known difference: DBSQL testfixture does't support sqlite_io_error tests.
- test_suite "fts3" -prefix "" -description {
- All FTS3 tests except fts3rnd.test.
-@@ -178,8 +183,8 @@
- fts3ak.test fts3al.test fts3am.test fts3an.test fts3ao.test
- fts3atoken.test fts3b.test fts3c.test fts3cov.test fts3d.test
-- fts3defer.test fts3defer2.test fts3e.test fts3expr.test fts3expr2.test
-+ fts3e.test fts3expr.test fts3expr2.test
- fts3near.test fts3query.test fts3shared.test fts3snippet.test
-
-- fts3fault.test fts3malloc.test fts3matchinfo.test
-+ fts3malloc.test fts3matchinfo.test
-
- fts3aux1.test fts3comp1.test
-
+ proc permutation {} {
+@@ -1884,6 +1886,7 @@
+ set f [string range $f2 3 end]
+ forcecopy $f2 $f
+ }
++ forcedelete test.db-journal/__db.register
+ }
+ proc db_restore_and_reopen {{dbfile test.db}} {
+ catch { db close }
+--- Makefile.in
++++ Makefile.in
+@@ -445,7 +445,6 @@
+ $(TOP)/ext/fts3/fts3.c \
+ $(TOP)/ext/fts3/fts3_aux.c \
+ $(TOP)/ext/fts3/fts3_expr.c \
+- $(TOP)/ext/fts3/fts3_term.c \
+ $(TOP)/ext/fts3/fts3_tokenizer.c \
+ $(TOP)/ext/fts3/fts3_write.c \
+ $(TOP)/ext/async/sqlite3async.c
diff --git a/lang/sql/adapter/sqlite-patches/26_solaris_build.patch b/lang/sql/adapter/sqlite-patches/26_solaris_build.patch
index b3a5ec8a..4b766c8d 100644
--- a/lang/sql/adapter/sqlite-patches/26_solaris_build.patch
+++ b/lang/sql/adapter/sqlite-patches/26_solaris_build.patch
@@ -1,7 +1,6 @@
--- src/test_demovfs.c
+++ src/test_demovfs.c
-@@ -119,8 +119,9@@
-
+@@ -118,6 +118,7 @@
#include <sqlite3.h>
#include <assert.h>
@@ -9,4 +8,3 @@
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
- #include <sys/file.h>
diff --git a/lang/sql/adapter/sqlite-patches/27_sqlthread.patch b/lang/sql/adapter/sqlite-patches/27_sqlthread.patch
deleted file mode 100644
index 5d6ef98d..00000000
--- a/lang/sql/adapter/sqlite-patches/27_sqlthread.patch
+++ /dev/null
@@ -1,97 +0,0 @@
---- src/test_thread.c
-+++ src/test_thread.c
-@@ -273,6 +273,9 @@
-
- const char *zFilename;
- sqlite3 *db;
-+ void *pKey = 0;
-+ int nKey = 0;
-+ char *zErrMsg;
- int rc;
- char zBuf[100];
- extern void Md5_Register(sqlite3*);
-@@ -281,7 +284,18 @@
- UNUSED_PARAMETER(objc);
-
- zFilename = Tcl_GetString(objv[2]);
-+ pKey = Tcl_GetByteArrayFromObj(objv[3], &nKey);
- rc = sqlite3_open(zFilename, &db);
-+#ifdef SQLITE_HAS_CODEC
-+ if(db){
-+ rc = sqlite3_key(db, pKey, nKey);
-+ if( rc ){
-+ zErrMsg = sqlite3_mprintf("%s", sqlite3ErrStr(rc));
-+ sqlite3_close(db);
-+ db = NULL;
-+ }
-+ }
-+#endif
- Md5_Register(db);
- sqlite3_busy_handler(db, xBusy, 0);
-
-@@ -330,7 +344,7 @@
- } aSub[] = {
- {"parent", sqlthread_parent, 1, "SCRIPT"},
- {"spawn", sqlthread_spawn, 2, "VARNAME SCRIPT"},
-- {"open", sqlthread_open, 1, "DBNAME"},
-+ {"open", sqlthread_open, 2, "DBNAME KEY"},
- {"id", sqlthread_id, 0, ""},
- {0, 0, 0}
- };
---- test/thread001.test
-+++ test/thread001.test
-@@ -77,7 +77,11 @@
- #sqlthread parent {puts STARTING..}
- set needToClose 0
- if {![info exists ::DB]} {
-- set ::DB [sqlthread open test.db]
-+ set key ""
-+ if {[sqlite -has-codec]} {
-+ set key "xyzzy"
-+ }
-+ set ::DB [sqlthread open test.db $key]
- #sqlthread parent "puts \"OPEN $::DB\""
- set needToClose 1
- }
-diff --git a/lang/sql/sqlite/test/thread003.test b/lang/sql/sqlite/test/thread003.test
---- test/thread003.test
-+++ test/thread003.test
-@@ -80,7 +80,11 @@
- foreach zFile {test.db test2.db} {
- set SCRIPT [format {
- set iEnd [expr {[clock_seconds] + %d}]
-- set ::DB [sqlthread open %s]
-+ set key ""
-+ if {[sqlite -has-codec]} {
-+ set key "xyzzy"
-+ }
-+ set ::DB [sqlthread open %s $key]
-
- # Set the cache size to 15 pages per cache. 30 available globally.
- execsql { PRAGMA cache_size = 15 }
-@@ -117,7 +121,11 @@
- set SCRIPT [format {
- set iStart [clock_seconds]
- set iEnd [expr {[clock_seconds] + %d}]
-- set ::DB [sqlthread open %s]
-+ set key ""
-+ if {[sqlite -has-codec]} {
-+ set key "xyzzy"
-+ }
-+ set ::DB [sqlthread open %s $key]
-
- # Set the cache size to 15 pages per cache. 30 available globally.
- execsql { PRAGMA cache_size = 15 }
-@@ -156,7 +164,11 @@
- do_test thread003.4 {
- thread_spawn finished(1) $thread_procs [format {
- set iEnd [expr {[clock_seconds] + %d}]
-- set ::DB [sqlthread open test.db]
-+ set key ""
-+ if {[sqlite -has-codec]} {
-+ set key "xyzzy"
-+ }
-+ set ::DB [sqlthread open test.db $key]
-
- # Set the cache size to 15 pages per cache. 30 available globally.
- execsql { PRAGMA cache_size = 15 }
diff --git a/lang/sql/adapter/sqlite-patches/28_wal_pragma.patch b/lang/sql/adapter/sqlite-patches/28_wal_pragma.patch
deleted file mode 100644
index 4c700a51..00000000
--- a/lang/sql/adapter/sqlite-patches/28_wal_pragma.patch
+++ /dev/null
@@ -1,21 +0,0 @@
---- src/vdbe.c
-+++ src/vdbe.c
-@@ -5323,17 +5323,17 @@
-
- #ifndef SQLITE_OMIT_WAL
- zFilename = sqlite3PagerFilename(pPager);
-
- /* Do not allow a transition to journal_mode=WAL for a database
- ** in temporary storage or if the VFS does not support shared memory
- */
- if( eNew==PAGER_JOURNALMODE_WAL
-- && (zFilename[0]==0 /* Temp file */
-+ && (zFilename==NULL || zFilename[0]==0 /* Temp file */
- || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */
- ){
- eNew = eOld;
- }
-
- if( (eNew!=eOld)
- && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL)
- ){
diff --git a/lang/sql/adapter/sqlite-patches/29_manydb_test.patch b/lang/sql/adapter/sqlite-patches/29_manydb_test.patch
index 603fbd5c..a58e1cf9 100644
--- a/lang/sql/adapter/sqlite-patches/29_manydb_test.patch
+++ b/lang/sql/adapter/sqlite-patches/29_manydb_test.patch
@@ -5,7 +5,7 @@
source $testdir/tester.tcl
-set N 300
-+set N 100
++set N 50
# if we're using proxy locks, we use 5 filedescriptors for a db
# that is open and in the middle of writing changes, normally
# sqlite uses 3 (proxy locking adds the conch and the local lock)
diff --git a/lang/sql/adapter/sqlite-patches/30_handle_cache.patch b/lang/sql/adapter/sqlite-patches/30_handle_cache.patch
index 36de9fc8..69f0b8da 100644
--- a/lang/sql/adapter/sqlite-patches/30_handle_cache.patch
+++ b/lang/sql/adapter/sqlite-patches/30_handle_cache.patch
@@ -1,48 +1,49 @@
--- src/btree.h
+++ src/btree.h
-@@ -94,6 +94,14 @@ int sqlite3BtreeCopyFile(Btree *, Btree
+@@ -121,6 +121,14 @@
+ int sqlite3BtreeNewDb(Btree *p);
- int sqlite3BtreeIncrVacuum(Btree *);
-
-+/*
-+ * BEGIN Berkeley DB specific btree APIs.
+ /*
++ * BEGIN Berkeley DB specific function forward declarations.
+ */
-+int sqlite3BtreeHandleCacheUpdate(Btree *p, int schema_changed);
++int sqlite3BtreeHandleCacheFixup(Btree *, int);
+/*
-+ * END Berkeley DB specific btree APIs.
++ * END Berkeley DB specific function forward declarations.
+ */
+
- /* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR
- ** of the flags shown below.
- **
++/*
+ ** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta
+ ** should be one of the following values. The integer values are assigned
+ ** to constants so that the offset of the corresponding field in an
--- src/vdbe.c
+++ src/vdbe.c
-@@ -2929,11 +2929,13 @@ case OP_SetCookie: { /* in3 */
+@@ -3122,12 +3122,14 @@
case OP_VerifyCookie: {
int iMeta;
int iGen;
-+ int iSchemaChanged;
++ int iSchemaUpdated;
Btree *pBt;
assert( pOp->p1>=0 && pOp->p1<db->nDb );
assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) );
-+ iSchemaChanged = 0;
+ assert( p->bIsReader );
++ iSchemaUpdated = 0;
pBt = db->aDb[pOp->p1].pBt;
if( pBt ){
sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta);
-@@ -2962,12 +2964,14 @@ case OP_VerifyCookie: {
+@@ -3156,12 +3158,14 @@
** a v-table method.
*/
if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){
-+ iSchemaChanged = 1;
- sqlite3ResetInternalSchema(db, pOp->p1);
++ iSchemaUpdated = 1;
+ sqlite3ResetOneSchema(db, pOp->p1);
}
p->expired = 1;
rc = SQLITE_SCHEMA;
}
-+ sqlite3BtreeHandleCacheUpdate(pBt, iSchemaChanged);
++ sqlite3BtreeHandleCacheFixup(pBt, iSchemaUpdated);
break;
}
diff --git a/lang/sql/adapter/sqlite-patches/31_eqp_test.patch b/lang/sql/adapter/sqlite-patches/31_eqp_test.patch
new file mode 100644
index 00000000..b3f49810
--- /dev/null
+++ b/lang/sql/adapter/sqlite-patches/31_eqp_test.patch
@@ -0,0 +1,18 @@
+--- test/eqp.test
++++ test/eqp.test
+@@ -521,6 +521,7 @@
+ # documentation page eqp.html works. The C code is duplicated in test1.c
+ # and wrapped in Tcl command [print_explain_query_plan]
+ #
++db close
+ set boilerplate {
+ proc explain_query_plan {db sql} {
+ set stmt [sqlite3_prepare_v2 db $sql -1 DUMMY]
+@@ -556,6 +557,7 @@
+ 0 0 0 COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)
+ }]
+
++sqlite3 db test.db
+ #-------------------------------------------------------------------------
+ # The following tests - eqp-7.* - test that queries that use the OP_Count
+ # optimization return something sensible with EQP.
diff --git a/lang/sql/adapter/sqlite-patches/32_permutations_test.patch b/lang/sql/adapter/sqlite-patches/32_permutations_test.patch
new file mode 100644
index 00000000..f3316172
--- /dev/null
+++ b/lang/sql/adapter/sqlite-patches/32_permutations_test.patch
@@ -0,0 +1,63 @@
+--- test/permutations.test
++++ test/permutations.test
+@@ -178,6 +178,11 @@
+ thread004.test thread005.test walthread.test
+ }
+
++# We exlcude below fts3 tests:
++# fts3defer.test, fts3defer2.test
++# -- Known difference of zeroblob between SQLite and BDBSQL. #19764
++# fts3fault.test
++# -- Known difference: DBSQL testfixture does't support sqlite_io_error tests.
+ test_suite "fts3" -prefix "" -description {
+ All FTS3 tests except fts3rnd.test.
+ } -files {
+@@ -185,14 +190,14 @@
+ fts3af.test fts3ag.test fts3ah.test fts3ai.test fts3aj.test
+ fts3ak.test fts3al.test fts3am.test fts3an.test fts3ao.test
+ fts3atoken.test fts3b.test fts3c.test fts3cov.test fts3d.test
+- fts3defer.test fts3defer2.test fts3e.test fts3expr.test fts3expr2.test
++ fts3e.test fts3expr.test fts3expr2.test
+ fts3expr3.test
+- fts3near.test fts3query.test fts3shared.test fts3snippet.test
++ fts3near.test fts3query.test fts3snippet.test
+ fts3sort.test
+- fts3fault.test fts3malloc.test fts3matchinfo.test
++ fts3malloc.test fts3matchinfo.test
+ fts3aux1.test fts3comp1.test fts3auto.test
+ fts4aa.test fts4content.test
+- fts3conf.test fts3prefix.test fts3fault2.test fts3corrupt.test
++ fts3conf.test fts3prefix.test fts3corrupt.test
+ fts3corrupt2.test fts3first.test fts4langid.test fts4merge.test
+ fts4check.test fts4unicode.test fts4noti.test
+ fts3varint.test
+@@ -537,6 +542,29 @@
+ vacuum.test view.test where.test
+ }
+
++# Run tests appropriate to the BDB SQL API using UTF-16 databases.
++#
++test_suite "bdb-utf16" -description {
++ Run tests using UTF-16 databases
++} -presql {
++ pragma encoding = 'UTF-16'
++} -files {
++ alter.test alter3.test
++ auth.test bind.test blob.test collate1.test
++ collate2.test collate3.test collate4.test collate6.test
++ date.test delete.test expr.test fkey1.test
++ index.test insert2.test insert.test interrupt.test in.test
++ intpkey.test join2.test join.test lastinsert.test
++ laststmtchanges.test limit.test main.test
++ memdb.test minmax.test misc2.test misc3.test notnull.test
++ null.test quote.test rowid.test select1.test select2.test
++ select3.test select4.test select6.test sort.test
++ subselect.test tableapi.test table.test temptable.test
++ trace.test trigger1.test trigger2.test trigger3.test
++ trigger4.test types2.test types.test unique.test update.test
++ view.test where.test
++}
++
+ # Run some tests in exclusive locking mode.
+ #
+ test_suite "exclusive" -description {
diff --git a/lang/sql/adapter/sqlite-patches/33_vdbe_assert.patch b/lang/sql/adapter/sqlite-patches/33_vdbe_assert.patch
new file mode 100644
index 00000000..0b8c901e
--- /dev/null
+++ b/lang/sql/adapter/sqlite-patches/33_vdbe_assert.patch
@@ -0,0 +1,27 @@
+--- src/vdbe.c
++++ src/vdbe.c
+@@ -2327,7 +2327,9 @@
+ assert( rc==SQLITE_OK ); /* DataSize() cannot fail */
+ pC->aRow = sqlite3BtreeDataFetch(pCrsr, &avail);
+ }
+- assert( avail<=65536 ); /* Maximum page size is 64KiB */
++ /* The BDB version can return data items larger than the largest page.
++ ** assert( avail<=65536 );
++ */
+ if( pC->payloadSize <= (u32)avail ){
+ pC->szRow = pC->payloadSize;
+ }else{
+@@ -3296,9 +3298,10 @@
+ assert( OPFLAG_BULKCSR==BTREE_BULKLOAD );
+ sqlite3BtreeCursorHints(pCur->pCursor, (pOp->p5 & OPFLAG_BULKCSR));
+
+- /* Since it performs no memory allocation or IO, the only value that
+- ** sqlite3BtreeCursor() may return is SQLITE_OK. */
+- assert( rc==SQLITE_OK );
++ /* The BDB version of sqlite3BtreeCursor() performs disk i/o and memory
++ ** allocations and so may return an error, so this assert is not valid.
++ ** assert( rc==SQLITE_OK );
++ */
+
+ /* Set the VdbeCursor.isTable variable. Previous versions of
+ ** SQLite used to check if the root-page flags were sane at this point
diff --git a/lang/sql/adapter/vacuum.c b/lang/sql/adapter/vacuum.c
index 88ccf519..37da19cf 100644
--- a/lang/sql/adapter/vacuum.c
+++ b/lang/sql/adapter/vacuum.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -58,7 +58,7 @@ int btreeVacuum(Btree *p, char **pzErrMsg) {
if (rc != SQLITE_DONE) {
sqlite3SetString(pzErrMsg, db,
"error during vacuum, rolled back");
- (void)sqlite3BtreeRollback(p);
+ (void)sqlite3BtreeRollback(p, SQLITE_OK);
} else if ((rc = sqlite3BtreeCommit(p)) != SQLITE_OK) {
sqlite3SetString(pzErrMsg, db,
"failed to commit the vacuum transaction");
diff --git a/lang/sql/adapter/wal.c b/lang/sql/adapter/wal.c
index 6e35afc2..bb6e5a3f 100644
--- a/lang/sql/adapter/wal.c
+++ b/lang/sql/adapter/wal.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WAL)
diff --git a/lang/sql/adapter/wal.h b/lang/sql/adapter/wal.h
index d328318e..03ec3dbb 100644
--- a/lang/sql/adapter/wal.h
+++ b/lang/sql/adapter/wal.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include "sqliteInt.h"