summaryrefslogtreecommitdiff
path: root/lang/sql/sqlite/tool
diff options
context:
space:
mode:
Diffstat (limited to 'lang/sql/sqlite/tool')
-rw-r--r--lang/sql/sqlite/tool/build-all-msvc.bat546
-rw-r--r--lang/sql/sqlite/tool/build-shell.sh22
-rw-r--r--lang/sql/sqlite/tool/checkSpacing.c84
-rw-r--r--lang/sql/sqlite/tool/extract.c46
-rw-r--r--lang/sql/sqlite/tool/fast_vacuum.c234
-rw-r--r--lang/sql/sqlite/tool/getlock.c134
-rw-r--r--lang/sql/sqlite/tool/lemon.c439
-rw-r--r--lang/sql/sqlite/tool/logest.c141
-rw-r--r--lang/sql/sqlite/tool/mkautoconfamal.sh83
-rw-r--r--lang/sql/sqlite/tool/mkkeywordhash.c10
-rw-r--r--lang/sql/sqlite/tool/mkpragmatab.tcl434
-rw-r--r--lang/sql/sqlite/tool/mksqlite3c-noext.tcl305
-rw-r--r--lang/sql/sqlite/tool/mksqlite3c.tcl37
-rw-r--r--lang/sql/sqlite/tool/mksqlite3h.tcl12
-rw-r--r--lang/sql/sqlite/tool/mksqlite3internalh.tcl1
-rw-r--r--lang/sql/sqlite/tool/mkvsix.tcl655
-rw-r--r--lang/sql/sqlite/tool/offsets.c329
-rw-r--r--lang/sql/sqlite/tool/omittest.tcl67
-rw-r--r--lang/sql/sqlite/tool/pagesig.c92
-rw-r--r--lang/sql/sqlite/tool/shell1.test714
-rw-r--r--lang/sql/sqlite/tool/shell2.test222
-rw-r--r--lang/sql/sqlite/tool/shell3.test124
-rw-r--r--lang/sql/sqlite/tool/shell4.test129
-rw-r--r--lang/sql/sqlite/tool/shell5.test243
-rw-r--r--lang/sql/sqlite/tool/showdb.c318
-rw-r--r--lang/sql/sqlite/tool/showwal.c293
-rw-r--r--lang/sql/sqlite/tool/spaceanal.tcl337
-rw-r--r--lang/sql/sqlite/tool/stack_usage.tcl98
-rw-r--r--lang/sql/sqlite/tool/symbols-mingw.sh33
-rw-r--r--lang/sql/sqlite/tool/symbols.sh34
-rw-r--r--lang/sql/sqlite/tool/tostr.awk8
-rw-r--r--lang/sql/sqlite/tool/vdbe-compress.tcl15
-rw-r--r--lang/sql/sqlite/tool/warnings-clang.sh14
-rw-r--r--lang/sql/sqlite/tool/warnings.sh19
-rw-r--r--lang/sql/sqlite/tool/win/sqlite.vsixbin0 -> 32825 bytes
35 files changed, 4512 insertions, 1760 deletions
diff --git a/lang/sql/sqlite/tool/build-all-msvc.bat b/lang/sql/sqlite/tool/build-all-msvc.bat
new file mode 100644
index 00000000..6e0aeb57
--- /dev/null
+++ b/lang/sql/sqlite/tool/build-all-msvc.bat
@@ -0,0 +1,546 @@
+@ECHO OFF
+
+::
+:: build-all-msvc.bat --
+::
+:: Multi-Platform Build Tool for MSVC
+::
+
+REM
+REM This batch script is used to build the SQLite DLL for multiple platforms
+REM and configurations using MSVC. The built SQLite DLLs, their associated
+REM import libraries, and optionally their symbols files, are placed within
+REM the directory specified on the command line, in sub-directories named for
+REM their respective platforms and configurations. This batch script must be
+REM run from inside a Visual Studio Command Prompt for the desired version of
+REM Visual Studio ^(the initial platform configured for the command prompt does
+REM not really matter^). Exactly one command line argument is required, the
+REM name of an existing directory to be used as the final destination directory
+REM for the generated output files, which will be placed in sub-directories
+REM created therein. Ideally, the directory specified should be empty.
+REM
+REM Example:
+REM
+REM CD /D C:\dev\sqlite\core
+REM tool\build-all-msvc.bat C:\Temp
+REM
+REM In the example above, "C:\dev\sqlite\core" represents the root of the
+REM source tree for SQLite and "C:\Temp" represents the final destination
+REM directory for the generated output files.
+REM
+REM There are several environment variables that may be set to modify the
+REM behavior of this batch script and its associated Makefile. The list of
+REM platforms to build may be overriden by using the PLATFORMS environment
+REM variable, which should contain a list of platforms ^(e.g. x86 x86_amd64
+REM x86_arm^). All platforms must be supported by the version of Visual Studio
+REM being used. The list of configurations to build may be overridden by
+REM setting the CONFIGURATIONS environment variable, which should contain a
+REM list of configurations to build ^(e.g. Debug Retail^). Neither of these
+REM variable values may contain any double quotes, surrounding or embedded.
+REM Finally, the NCRTLIBPATH and NSDKLIBPATH environment variables may be set
+REM to specify the location of the CRT and SDK, respectively, needed to compile
+REM executables native to the architecture of the build machine during any
+REM cross-compilation that may be necessary, depending on the platforms to be
+REM built. These values in these two variables should be surrounded by double
+REM quotes if they contain spaces.
+REM
+REM Please note that the SQLite build process performed by the Makefile
+REM associated with this batch script requires both Gawk ^(gawk.exe^) and Tcl
+REM 8.5 ^(tclsh85.exe^) to be present in a directory contained in the PATH
+REM environment variable unless a pre-existing amalgamation file is used.
+REM
+SETLOCAL
+
+REM SET __ECHO=ECHO
+REM SET __ECHO2=ECHO
+REM SET __ECHO3=ECHO
+IF NOT DEFINED _AECHO (SET _AECHO=REM)
+IF NOT DEFINED _CECHO (SET _CECHO=REM)
+IF NOT DEFINED _VECHO (SET _VECHO=REM)
+
+%_AECHO% Running %0 %*
+
+REM SET DFLAGS=/L
+
+%_VECHO% DFlags = '%DFLAGS%'
+
+SET FFLAGS=/V /F /G /H /I /R /Y /Z
+
+%_VECHO% FFlags = '%FFLAGS%'
+
+SET ROOT=%~dp0\..
+SET ROOT=%ROOT:\\=\%
+
+%_VECHO% Root = '%ROOT%'
+
+REM
+REM NOTE: The first and only argument to this batch file should be the output
+REM directory where the platform-specific binary directories should be
+REM created.
+REM
+SET BINARYDIRECTORY=%1
+
+IF NOT DEFINED BINARYDIRECTORY (
+ GOTO usage
+)
+
+%_VECHO% BinaryDirectory = '%BINARYDIRECTORY%'
+
+SET DUMMY=%2
+
+IF DEFINED DUMMY (
+ GOTO usage
+)
+
+REM
+REM NOTE: From this point, we need a clean error level. Reset it now.
+REM
+CALL :fn_ResetErrorLevel
+
+REM
+REM NOTE: Change the current directory to the root of the source tree, saving
+REM the current directory on the directory stack.
+REM
+%__ECHO2% PUSHD "%ROOT%"
+
+IF ERRORLEVEL 1 (
+ ECHO Could not change directory to "%ROOT%".
+ GOTO errors
+)
+
+REM
+REM NOTE: This batch file requires the ComSpec environment variable to be set,
+REM typically to something like "C:\Windows\System32\cmd.exe".
+REM
+IF NOT DEFINED ComSpec (
+ ECHO The ComSpec environment variable must be defined.
+ GOTO errors
+)
+
+REM
+REM NOTE: This batch file requires the VcInstallDir environment variable to be
+REM set. Tyipcally, this means this batch file needs to be run from an
+REM MSVC command prompt.
+REM
+IF NOT DEFINED VCINSTALLDIR (
+ ECHO The VCINSTALLDIR environment variable must be defined.
+ GOTO errors
+)
+
+REM
+REM NOTE: If the list of platforms is not already set, use the default list.
+REM
+IF NOT DEFINED PLATFORMS (
+ SET PLATFORMS=x86 x86_amd64 x86_arm
+)
+
+%_VECHO% Platforms = '%PLATFORMS%'
+
+REM
+REM NOTE: If the list of configurations is not already set, use the default
+REM list.
+REM
+IF NOT DEFINED CONFIGURATIONS (
+ SET CONFIGURATIONS=Debug Retail
+)
+
+%_VECHO% Configurations = '%CONFIGURATIONS%'
+
+REM
+REM NOTE: Setup environment variables to translate between the MSVC platform
+REM names and the names to be used for the platform-specific binary
+REM directories.
+REM
+SET amd64_NAME=x64
+SET arm_NAME=ARM
+SET x64_NAME=x64
+SET x86_NAME=x86
+SET x86_amd64_NAME=x64
+SET x86_arm_NAME=ARM
+SET x86_x64_NAME=x64
+
+%_VECHO% amd64_Name = '%amd64_NAME%'
+%_VECHO% arm_Name = '%arm_NAME%'
+%_VECHO% x64_Name = '%x64_NAME%'
+%_VECHO% x86_Name = '%x86_NAME%'
+%_VECHO% x86_amd64_Name = '%x86_amd64_NAME%'
+%_VECHO% x86_arm_Name = '%x86_arm_NAME%'
+%_VECHO% x86_x64_Name = '%x86_x64_NAME%'
+
+REM
+REM NOTE: Check for the external tools needed during the build process ^(i.e.
+REM those that do not get compiled as part of the build process itself^)
+REM along the PATH.
+REM
+FOR %%T IN (gawk.exe tclsh85.exe) DO (
+ SET %%T_PATH=%%~dp$PATH:T
+)
+
+REM
+REM NOTE: The Gawk executable "gawk.exe" is required during the SQLite build
+REM process unless a pre-existing amalgamation file is used.
+REM
+IF NOT DEFINED gawk.exe_PATH (
+ ECHO The Gawk executable "gawk.exe" is required to be in the PATH.
+ GOTO errors
+)
+
+REM
+REM NOTE: The Tcl 8.5 executable "tclsh85.exe" is required during the SQLite
+REM build process unless a pre-existing amalgamation file is used.
+REM
+IF NOT DEFINED tclsh85.exe_PATH (
+ ECHO The Tcl 8.5 executable "tclsh85.exe" is required to be in the PATH.
+ GOTO errors
+)
+
+REM
+REM NOTE: Set the TOOLPATH variable to contain all the directories where the
+REM external tools were found in the search above.
+REM
+SET TOOLPATH=%gawk.exe_PATH%;%tclsh85.exe_PATH%
+
+%_VECHO% ToolPath = '%TOOLPATH%'
+
+REM
+REM NOTE: Check for MSVC 2012/2013 because the Windows SDK directory handling
+REM is slightly different for those versions.
+REM
+IF "%VisualStudioVersion%" == "11.0" (
+ REM
+ REM NOTE: If the Windows SDK library path has already been set, do not set
+ REM it to something else later on.
+ REM
+ IF NOT DEFINED NSDKLIBPATH (
+ SET SET_NSDKLIBPATH=1
+ )
+) ELSE IF "%VisualStudioVersion%" == "12.0" (
+ REM
+ REM NOTE: If the Windows SDK library path has already been set, do not set
+ REM it to something else later on.
+ REM
+ IF NOT DEFINED NSDKLIBPATH (
+ SET SET_NSDKLIBPATH=1
+ )
+) ELSE (
+ CALL :fn_UnsetVariable SET_NSDKLIBPATH
+)
+
+REM
+REM NOTE: Check if this is the Windows Phone SDK. If so, a different batch
+REM file is necessary to setup the build environment. Since the variable
+REM values involved here may contain parenthesis, using GOTO instead of
+REM an IF block is required.
+REM
+IF DEFINED WindowsPhoneKitDir GOTO set_vcvarsall_phone
+SET VCVARSALL=%VCINSTALLDIR%\vcvarsall.bat
+GOTO set_vcvarsall_done
+:set_vcvarsall_phone
+SET VCVARSALL=%VCINSTALLDIR%\WPSDK\WP80\vcvarsphoneall.bat
+:set_vcvarsall_done
+
+REM
+REM NOTE: This is the outer loop. There should be exactly one iteration per
+REM platform.
+REM
+FOR %%P IN (%PLATFORMS%) DO (
+ REM
+ REM NOTE: Using the MSVC platform name, lookup the simpler platform name to
+ REM be used for the name of the platform-specific binary directory via
+ REM the environment variables setup earlier.
+ REM
+ CALL :fn_CopyVariable %%P_NAME PLATFORMNAME
+
+ REM
+ REM NOTE: This is the second loop. There should be exactly one iteration.
+ REM This loop is necessary because the PlatformName environment
+ REM variable was set above and that value is needed by some of the
+ REM commands contained in the inner loop. If these commands were
+ REM directly contained in the outer loop, the PlatformName environment
+ REM variable would be stuck with its initial empty value instead.
+ REM
+ FOR /F "tokens=2* delims==" %%D IN ('SET PLATFORMNAME') DO (
+ REM
+ REM NOTE: Attempt to clean the environment of all variables used by MSVC
+ REM and/or Visual Studio. This block may need to be updated in the
+ REM future to account for additional environment variables.
+ REM
+ CALL :fn_UnsetVariable DevEnvDir
+ CALL :fn_UnsetVariable ExtensionSdkDir
+ CALL :fn_UnsetVariable Framework35Version
+ CALL :fn_UnsetVariable FrameworkDir
+ CALL :fn_UnsetVariable FrameworkDir32
+ CALL :fn_UnsetVariable FrameworkVersion
+ CALL :fn_UnsetVariable FrameworkVersion32
+ CALL :fn_UnsetVariable FSHARPINSTALLDIR
+ CALL :fn_UnsetVariable INCLUDE
+ CALL :fn_UnsetVariable LIB
+ CALL :fn_UnsetVariable LIBPATH
+ CALL :fn_UnsetVariable Platform
+ REM CALL :fn_UnsetVariable VCINSTALLDIR
+ CALL :fn_UnsetVariable VSINSTALLDIR
+ CALL :fn_UnsetVariable WindowsPhoneKitDir
+ CALL :fn_UnsetVariable WindowsSdkDir
+ CALL :fn_UnsetVariable WindowsSdkDir_35
+ CALL :fn_UnsetVariable WindowsSdkDir_old
+
+ REM
+ REM NOTE: Reset the PATH here to the absolute bare minimum required.
+ REM
+ SET PATH=%TOOLPATH%;%SystemRoot%\System32;%SystemRoot%
+
+ REM
+ REM NOTE: This is the inner loop. There are normally two iterations, one
+ REM for each supported build configuration, e.g. Debug or Retail.
+ REM
+ FOR %%B IN (%CONFIGURATIONS%) DO (
+ REM
+ REM NOTE: When preparing the debug build, set the DEBUG and MEMDEBUG
+ REM environment variables to be picked up by the MSVC makefile
+ REM itself.
+ REM
+ IF /I "%%B" == "Debug" (
+ SET DEBUG=2
+ SET MEMDEBUG=1
+ ) ELSE (
+ CALL :fn_UnsetVariable DEBUG
+ CALL :fn_UnsetVariable MEMDEBUG
+ )
+
+ REM
+ REM NOTE: Launch a nested command shell to perform the following steps:
+ REM
+ REM 1. Setup the MSVC environment for this platform using the
+ REM official batch file.
+ REM
+ REM 2. Make sure that no stale build output files are present.
+ REM
+ REM 3. Build the "sqlite3.dll" and "sqlite3.lib" binaries for this
+ REM platform.
+ REM
+ REM 4. Copy the "sqlite3.dll" and "sqlite3.lib" binaries for this
+ REM platform to the platform-specific directory beneath the
+ REM binary directory.
+ REM
+ REM 5. Unless prevented from doing so, copy the "sqlite3.pdb"
+ REM symbols file for this platform to the platform-specific
+ REM directory beneath the binary directory.
+ REM
+ "%ComSpec%" /C (
+ REM
+ REM NOTE: Attempt to setup the MSVC environment for this platform.
+ REM
+ %__ECHO3% CALL "%VCVARSALL%" %%P
+
+ IF ERRORLEVEL 1 (
+ ECHO Failed to call "%VCVARSALL%" for platform %%P.
+ GOTO errors
+ )
+
+ REM
+ REM NOTE: If this batch file is not running in "what-if" mode, check to
+ REM be sure we were actually able to setup the MSVC environment
+ REM as current versions of their official batch file do not set
+ REM the exit code upon failure.
+ REM
+ IF NOT DEFINED __ECHO3 (
+ IF NOT DEFINED WindowsPhoneKitDir (
+ IF NOT DEFINED WindowsSdkDir (
+ ECHO Cannot build, Windows SDK not found for platform %%P.
+ GOTO errors
+ )
+ )
+ )
+
+ REM
+ REM NOTE: When using MSVC 2012 and/or 2013, the native SDK path cannot
+ REM simply use the "lib" sub-directory beneath the location
+ REM specified in the WindowsSdkDir environment variable because
+ REM that location does not actually contain the necessary library
+ REM files for x86. This must be done for each iteration because
+ REM it relies upon the WindowsSdkDir environment variable being
+ REM set by the batch file used to setup the MSVC environment.
+ REM
+ IF DEFINED SET_NSDKLIBPATH (
+ REM
+ REM NOTE: The Windows Phone SDK has a slightly different directory
+ REM structure and must be handled specially here.
+ REM
+ IF DEFINED WindowsPhoneKitDir (
+ CALL :fn_CopyVariable WindowsPhoneKitDir NSDKLIBPATH
+ CALL :fn_AppendVariable NSDKLIBPATH \lib\x86
+ ) ELSE IF DEFINED WindowsSdkDir (
+ CALL :fn_CopyVariable WindowsSdkDir NSDKLIBPATH
+
+ REM
+ REM NOTE: The Windows 8.1 SDK has a slightly different directory
+ REM naming convention. Currently, this tool assumes that
+ REM the Windows 8.1 SDK should only be used with MSVC 2013.
+ REM
+ IF "%VisualStudioVersion%" == "12.0" (
+ CALL :fn_AppendVariable NSDKLIBPATH \lib\winv6.3\um\x86
+ ) ELSE (
+ CALL :fn_AppendVariable NSDKLIBPATH \lib\win8\um\x86
+ )
+ )
+ )
+
+ REM
+ REM NOTE: Unless prevented from doing so, invoke NMAKE with the MSVC
+ REM makefile to clean any stale build output from previous
+ REM iterations of this loop and/or previous runs of this batch
+ REM file, etc.
+ REM
+ IF NOT DEFINED NOCLEAN (
+ %__ECHO% nmake -f Makefile.msc clean
+
+ IF ERRORLEVEL 1 (
+ ECHO Failed to clean for platform %%P.
+ GOTO errors
+ )
+ ) ELSE (
+ REM
+ REM NOTE: Even when the cleaning step has been disabled, we still
+ REM need to remove the build output for the files we are
+ REM specifically wanting to build for each platform.
+ REM
+ %__ECHO% DEL /Q *.lo sqlite3.dll sqlite3.lib sqlite3.pdb
+ )
+
+ REM
+ REM NOTE: Call NMAKE with the MSVC makefile to build the "sqlite3.dll"
+ REM binary. The x86 compiler will be used to compile the native
+ REM command line tools needed during the build process itself.
+ REM Also, disable looking for and/or linking to the native Tcl
+ REM runtime library.
+ REM
+ %__ECHO% nmake -f Makefile.msc sqlite3.dll XCOMPILE=1 USE_NATIVE_LIBPATHS=1 NO_TCL=1 %NMAKE_ARGS%
+
+ IF ERRORLEVEL 1 (
+ ECHO Failed to build %%B "sqlite3.dll" for platform %%P.
+ GOTO errors
+ )
+
+ REM
+ REM NOTE: Copy the "sqlite3.dll" file to the appropriate directory for
+ REM the build and platform beneath the binary directory.
+ REM
+ %__ECHO% XCOPY sqlite3.dll "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
+
+ IF ERRORLEVEL 1 (
+ ECHO Failed to copy "sqlite3.dll" to "%BINARYDIRECTORY%\%%B\%%D\".
+ GOTO errors
+ )
+
+ REM
+ REM NOTE: Copy the "sqlite3.lib" file to the appropriate directory for
+ REM the build and platform beneath the binary directory.
+ REM
+ %__ECHO% XCOPY sqlite3.lib "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
+
+ IF ERRORLEVEL 1 (
+ ECHO Failed to copy "sqlite3.lib" to "%BINARYDIRECTORY%\%%B\%%D\".
+ GOTO errors
+ )
+
+ REM
+ REM NOTE: Copy the "sqlite3.pdb" file to the appropriate directory for
+ REM the build and platform beneath the binary directory unless we
+ REM are prevented from doing so.
+ REM
+ IF NOT DEFINED NOSYMBOLS (
+ %__ECHO% XCOPY sqlite3.pdb "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
+
+ IF ERRORLEVEL 1 (
+ ECHO Failed to copy "sqlite3.pdb" to "%BINARYDIRECTORY%\%%B\%%D\".
+ GOTO errors
+ )
+ )
+ )
+ )
+ )
+
+ REM
+ REM NOTE: Handle any errors generated during the nested command shell.
+ REM
+ IF ERRORLEVEL 1 (
+ GOTO errors
+ )
+)
+
+REM
+REM NOTE: Restore the saved current directory from the directory stack.
+REM
+%__ECHO2% POPD
+
+IF ERRORLEVEL 1 (
+ ECHO Could not restore directory.
+ GOTO errors
+)
+
+REM
+REM NOTE: If we get to this point, we have succeeded.
+REM
+GOTO no_errors
+
+:fn_ResetErrorLevel
+ VERIFY > NUL
+ GOTO :EOF
+
+:fn_SetErrorLevel
+ VERIFY MAYBE 2> NUL
+ GOTO :EOF
+
+:fn_CopyVariable
+ IF NOT DEFINED %1 GOTO :EOF
+ IF "%2" == "" GOTO :EOF
+ SETLOCAL
+ SET __ECHO_CMD=ECHO %%%1%%
+ FOR /F "delims=" %%V IN ('%__ECHO_CMD%') DO (
+ SET VALUE=%%V
+ )
+ ENDLOCAL && SET %2=%VALUE%
+ GOTO :EOF
+
+:fn_UnsetVariable
+ IF NOT "%1" == "" (
+ SET %1=
+ CALL :fn_ResetErrorLevel
+ )
+ GOTO :EOF
+
+:fn_AppendVariable
+ SET __ECHO_CMD=ECHO %%%1%%
+ IF DEFINED %1 (
+ FOR /F "delims=" %%V IN ('%__ECHO_CMD%') DO (
+ SET %1=%%V%~2
+ )
+ ) ELSE (
+ SET %1=%~2
+ )
+ SET __ECHO_CMD=
+ CALL :fn_ResetErrorLevel
+ GOTO :EOF
+
+:usage
+ ECHO.
+ ECHO Usage: %~nx0 ^<binaryDirectory^>
+ ECHO.
+ GOTO errors
+
+:errors
+ CALL :fn_SetErrorLevel
+ ENDLOCAL
+ ECHO.
+ ECHO Failure, errors were encountered.
+ GOTO end_of_file
+
+:no_errors
+ CALL :fn_ResetErrorLevel
+ ENDLOCAL
+ ECHO.
+ ECHO Success, no errors were encountered.
+ GOTO end_of_file
+
+:end_of_file
+%__ECHO% EXIT /B %ERRORLEVEL%
diff --git a/lang/sql/sqlite/tool/build-shell.sh b/lang/sql/sqlite/tool/build-shell.sh
new file mode 100644
index 00000000..6a48299d
--- /dev/null
+++ b/lang/sql/sqlite/tool/build-shell.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+#
+# This script demonstrates how to do a full-featured build of the sqlite3
+# command-line shell on Linux.
+#
+# SQLite source code should be in a sibling directory named "sqlite". For
+# example, put SQLite sources in ~/sqlite/sqlite and run this script from
+# ~/sqlite/bld. There should be an appropriate Makefile in the current
+# directory as well.
+#
+make sqlite3.c
+gcc -o sqlite3 -g -Os -I. \
+ -DSQLITE_THREADSAFE=0 \
+ -DSQLITE_ENABLE_VFSTRACE \
+ -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_FTS4 \
+ -DSQLITE_ENABLE_RTREE \
+ -DHAVE_READLINE \
+ -DHAVE_USLEEP=1 \
+ ../sqlite/src/shell.c \
+ ../sqlite/src/test_vfstrace.c \
+ sqlite3.c -ldl -lreadline -lncurses
diff --git a/lang/sql/sqlite/tool/checkSpacing.c b/lang/sql/sqlite/tool/checkSpacing.c
new file mode 100644
index 00000000..ce38b08c
--- /dev/null
+++ b/lang/sql/sqlite/tool/checkSpacing.c
@@ -0,0 +1,84 @@
+/*
+** This program checks for formatting problems in source code:
+**
+** * Any use of tab characters
+** * White space at the end of a line
+** * Blank lines at the end of a file
+**
+** Any violations are reported.
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define CR_OK 0x001
+#define WSEOL_OK 0x002
+
+static void checkSpacing(const char *zFile, unsigned flags){
+ FILE *in = fopen(zFile, "rb");
+ int i;
+ int seenSpace;
+ int seenTab;
+ int ln = 0;
+ int lastNonspace = 0;
+ char zLine[2000];
+ if( in==0 ){
+ printf("cannot open %s\n", zFile);
+ return;
+ }
+ while( fgets(zLine, sizeof(zLine), in) ){
+ seenSpace = 0;
+ seenTab = 0;
+ ln++;
+ for(i=0; zLine[i]; i++){
+ if( zLine[i]=='\t' && seenTab==0 ){
+ printf("%s:%d: tab (\\t) character\n", zFile, ln);
+ seenTab = 1;
+ }else if( zLine[i]=='\r' ){
+ if( (flags & CR_OK)==0 ){
+ printf("%s:%d: carriage-return (\\r) character\n", zFile, ln);
+ }
+ }else if( zLine[i]==' ' ){
+ seenSpace = 1;
+ }else if( zLine[i]!='\n' ){
+ lastNonspace = ln;
+ seenSpace = 0;
+ }
+ }
+ if( seenSpace && (flags & WSEOL_OK)==0 ){
+ printf("%s:%d: whitespace at end-of-line\n", zFile, ln);
+ }
+ }
+ fclose(in);
+ if( lastNonspace<ln ){
+ printf("%s:%d: blank lines at end of file (%d)\n",
+ zFile, ln, ln - lastNonspace);
+ }
+}
+
+int main(int argc, char **argv){
+ int i;
+ unsigned flags = WSEOL_OK;
+ for(i=1; i<argc; i++){
+ const char *z = argv[i];
+ if( z[0]=='-' ){
+ while( z[0]=='-' ) z++;
+ if( strcmp(z,"crok")==0 ){
+ flags |= CR_OK;
+ }else if( strcmp(z, "wseol")==0 ){
+ flags &= ~WSEOL_OK;
+ }else if( strcmp(z, "help")==0 ){
+ printf("Usage: %s [options] FILE ...\n", argv[0]);
+ printf(" --crok Do not report on carriage-returns\n");
+ printf(" --wseol Complain about whitespace at end-of-line\n");
+ printf(" --help This message\n");
+ }else{
+ printf("unknown command-line option: [%s]\n", argv[i]);
+ printf("use --help for additional information\n");
+ }
+ }else{
+ checkSpacing(argv[i], flags);
+ }
+ }
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/extract.c b/lang/sql/sqlite/tool/extract.c
new file mode 100644
index 00000000..5bf5caa3
--- /dev/null
+++ b/lang/sql/sqlite/tool/extract.c
@@ -0,0 +1,46 @@
+/*
+** Extract a range of bytes from a file.
+**
+** Usage:
+**
+** extract FILENAME OFFSET AMOUNT
+**
+** The bytes are written to standard output.
+*/
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv){
+ FILE *f;
+ char *zBuf;
+ int ofst;
+ int n;
+ size_t got;
+
+ if( argc!=4 ){
+ fprintf(stderr, "Usage: %s FILENAME OFFSET AMOUNT\n", *argv);
+ return 1;
+ }
+ f = fopen(argv[1], "rb");
+ if( f==0 ){
+ fprintf(stderr, "cannot open \"%s\"\n", argv[1]);
+ return 1;
+ }
+ ofst = atoi(argv[2]);
+ n = atoi(argv[3]);
+ zBuf = malloc( n );
+ if( zBuf==0 ){
+ fprintf(stderr, "out of memory\n");
+ return 1;
+ }
+ fseek(f, ofst, SEEK_SET);
+ got = fread(zBuf, 1, n, f);
+ fclose(f);
+ if( got<n ){
+ fprintf(stderr, "got only %d of %d bytes\n", got, n);
+ return 1;
+ }else{
+ fwrite(zBuf, 1, n, stdout);
+ }
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/fast_vacuum.c b/lang/sql/sqlite/tool/fast_vacuum.c
new file mode 100644
index 00000000..6a50dcc6
--- /dev/null
+++ b/lang/sql/sqlite/tool/fast_vacuum.c
@@ -0,0 +1,234 @@
+/*
+** 2013-10-01
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This program implements a high-speed version of the VACUUM command.
+** It repacks an SQLite database to remove as much unused space as
+** possible and to relocate content sequentially in the file.
+**
+** This program runs faster and uses less temporary disk space than the
+** built-in VACUUM command. On the other hand, this program has a number
+** of important restrictions relative to the built-in VACUUM command.
+**
+** (1) The caller must ensure that no other processes are accessing the
+** database file while the vacuum is taking place. The usual SQLite
+** file locking is insufficient for this. The caller must use
+** external means to make sure only this one routine is reading and
+** writing the database.
+**
+** (2) Database reconfiguration such as page size or auto_vacuum changes
+** are not supported by this utility.
+**
+** (3) The database file might be renamed if a power loss or crash
+** occurs at just the wrong moment. Recovery must be prepared to
+** to deal with the possibly changed filename.
+**
+** This program is intended as a *Demonstration Only*. The intent of this
+** program is to provide example code that application developers can use
+** when creating similar functionality in their applications.
+**
+** To compile this program:
+**
+** cc fast_vacuum.c sqlite3.c
+**
+** Add whatever linker options are required. (Example: "-ldl -lpthread").
+** Then to run the program:
+**
+** ./a.out file-to-vacuum
+**
+*/
+#include "sqlite3.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+/*
+** Finalize a prepared statement. If an error has occurred, print the
+** error message and exit.
+*/
+static void vacuumFinalize(sqlite3_stmt *pStmt){
+ sqlite3 *db = sqlite3_db_handle(pStmt);
+ int rc = sqlite3_finalize(pStmt);
+ if( rc ){
+ fprintf(stderr, "finalize error: %s\n", sqlite3_errmsg(db));
+ exit(1);
+ }
+}
+
+/*
+** Execute zSql on database db. The SQL text is printed to standard
+** output. If an error occurs, print an error message and exit the
+** process.
+*/
+static void execSql(sqlite3 *db, const char *zSql){
+ sqlite3_stmt *pStmt;
+ if( !zSql ){
+ fprintf(stderr, "out of memory!\n");
+ exit(1);
+ }
+ printf("%s;\n", zSql);
+ if( SQLITE_OK!=sqlite3_prepare(db, zSql, -1, &pStmt, 0) ){
+ fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db));
+ exit(1);
+ }
+ sqlite3_step(pStmt);
+ vacuumFinalize(pStmt);
+}
+
+/*
+** Execute zSql on database db. The zSql statement returns exactly
+** one column. Execute this return value as SQL on the same database.
+**
+** The zSql statement is printed on standard output prior to being
+** run. If any errors occur, an error is printed and the process
+** exits.
+*/
+static void execExecSql(sqlite3 *db, const char *zSql){
+ sqlite3_stmt *pStmt;
+ int rc;
+
+ printf("%s;\n", zSql);
+ rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
+ if( rc!=SQLITE_OK ){
+ fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db));
+ exit(1);
+ }
+ while( SQLITE_ROW==sqlite3_step(pStmt) ){
+ execSql(db, (char*)sqlite3_column_text(pStmt, 0));
+ }
+ vacuumFinalize(pStmt);
+}
+
+
+int main(int argc, char **argv){
+ sqlite3 *db; /* Connection to the database file */
+ int rc; /* Return code from SQLite interface calls */
+ sqlite3_uint64 r; /* A random number */
+ const char *zDbToVacuum; /* Database to be vacuumed */
+ char *zBackupDb; /* Backup copy of the original database */
+ char *zTempDb; /* Temporary database */
+ char *zSql; /* An SQL statement */
+
+ if( argc!=2 ){
+ fprintf(stderr, "Usage: %s DATABASE\n", argv[0]);
+ return 1;
+ }
+
+ /* Identify the database file to be vacuumed and open it.
+ */
+ zDbToVacuum = argv[1];
+ printf("-- open database file \"%s\"\n", zDbToVacuum);
+ rc = sqlite3_open(zDbToVacuum, &db);
+ if( rc ){
+ fprintf(stderr, "%s: %s\n", zDbToVacuum, sqlite3_errstr(rc));
+ return 1;
+ }
+
+ /* Create names for two other files. zTempDb will be a new database
+ ** into which we construct a vacuumed copy of zDbToVacuum. zBackupDb
+ ** will be a new name for zDbToVacuum after it is vacuumed.
+ */
+ sqlite3_randomness(sizeof(r), &r);
+ zTempDb = sqlite3_mprintf("%s-vacuum-%016llx", zDbToVacuum, r);
+ zBackupDb = sqlite3_mprintf("%s-backup-%016llx", zDbToVacuum, r);
+
+ /* Attach the zTempDb database to the database connection.
+ */
+ zSql = sqlite3_mprintf("ATTACH '%q' AS vacuum_db;", zTempDb);
+ execSql(db, zSql);
+ sqlite3_free(zSql);
+
+ /* TODO:
+ ** Set the page_size and auto_vacuum mode for zTempDb here, if desired.
+ */
+
+ /* The vacuum will occur inside of a transaction. Set writable_schema
+ ** to ON so that we can directly update the sqlite_master table in the
+ ** zTempDb database.
+ */
+ execSql(db, "PRAGMA writable_schema=ON");
+ execSql(db, "BEGIN");
+
+
+ /* Query the schema of the main database. Create a mirror schema
+ ** in the temporary database.
+ */
+ execExecSql(db,
+ "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) "
+ " FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
+ " AND rootpage>0"
+ );
+ execExecSql(db,
+ "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)"
+ " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %'"
+ );
+ execExecSql(db,
+ "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) "
+ " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"
+ );
+
+ /* Loop through the tables in the main database. For each, do
+ ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
+ ** the contents to the temporary database.
+ */
+ execExecSql(db,
+ "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
+ "|| ' SELECT * FROM main.' || quote(name) "
+ "FROM main.sqlite_master "
+ "WHERE type = 'table' AND name!='sqlite_sequence' "
+ " AND rootpage>0"
+ );
+
+ /* Copy over the sequence table
+ */
+ execExecSql(db,
+ "SELECT 'DELETE FROM vacuum_db.' || quote(name) "
+ "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence'"
+ );
+ execExecSql(db,
+ "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
+ "|| ' SELECT * FROM main.' || quote(name) "
+ "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence'"
+ );
+
+ /* Copy the triggers, views, and virtual tables from the main database
+ ** over to the temporary database. None of these objects has any
+ ** associated storage, so all we have to do is copy their entries
+ ** from the SQLITE_MASTER table.
+ */
+ execSql(db,
+ "INSERT INTO vacuum_db.sqlite_master "
+ " SELECT type, name, tbl_name, rootpage, sql"
+ " FROM main.sqlite_master"
+ " WHERE type='view' OR type='trigger'"
+ " OR (type='table' AND rootpage=0)"
+ );
+
+ /* Commit the transaction and close the database
+ */
+ execSql(db, "COMMIT");
+ printf("-- close database\n");
+ sqlite3_close(db);
+
+
+ /* At this point, zDbToVacuum is unchanged. zTempDb contains a
+ ** vacuumed copy of zDbToVacuum. Rearrange filenames so that
+ ** zTempDb becomes thenew zDbToVacuum.
+ */
+ printf("-- rename \"%s\" to \"%s\"\n", zDbToVacuum, zBackupDb);
+ rename(zDbToVacuum, zBackupDb);
+ printf("-- rename \"%s\" to \"%s\"\n", zTempDb, zDbToVacuum);
+ rename(zTempDb, zDbToVacuum);
+
+ /* Release allocated memory */
+ sqlite3_free(zTempDb);
+ sqlite3_free(zBackupDb);
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/getlock.c b/lang/sql/sqlite/tool/getlock.c
new file mode 100644
index 00000000..7eff04d7
--- /dev/null
+++ b/lang/sql/sqlite/tool/getlock.c
@@ -0,0 +1,134 @@
+/*
+** This utility program looks at an SQLite database and determines whether
+** or not it is locked, the kind of lock, and who is holding this lock.
+**
+** This only works on unix when the posix advisory locking method is used
+** (which is the default on unix) and when the PENDING_BYTE is in its
+** usual place.
+*/
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+
+static void usage(const char *argv0){
+ fprintf(stderr, "Usage: %s database\n", argv0);
+ exit(1);
+}
+
+/* Check for a conflicting lock. If one is found, print an this
+** on standard output using the format string given and return 1.
+** If there are no conflicting locks, return 0.
+*/
+static int isLocked(
+ int h, /* File descriptor to check */
+ int type, /* F_RDLCK or F_WRLCK */
+ unsigned int iOfst, /* First byte of the lock */
+ unsigned int iCnt, /* Number of bytes in the lock range */
+ const char *zType /* Type of lock */
+){
+ struct flock lk;
+
+ memset(&lk, 0, sizeof(lk));
+ lk.l_type = type;
+ lk.l_whence = SEEK_SET;
+ lk.l_start = iOfst;
+ lk.l_len = iCnt;
+ if( fcntl(h, F_GETLK, &lk)==(-1) ){
+ fprintf(stderr, "fcntl(%d) failed: errno=%d\n", h, errno);
+ exit(1);
+ }
+ if( lk.l_type==F_UNLCK ) return 0;
+ printf("%s lock held by %d\n", zType, (int)lk.l_pid);
+ return 1;
+}
+
+/*
+** Location of locking bytes in the database file
+*/
+#define PENDING_BYTE (0x40000000)
+#define RESERVED_BYTE (PENDING_BYTE+1)
+#define SHARED_FIRST (PENDING_BYTE+2)
+#define SHARED_SIZE 510
+
+/*
+** Lock locations for shared-memory locks used by WAL mode.
+*/
+#define SHM_BASE 120
+#define SHM_WRITE SHM_BASE
+#define SHM_CHECKPOINT (SHM_BASE+1)
+#define SHM_RECOVER (SHM_BASE+2)
+#define SHM_READ_FIRST (SHM_BASE+3)
+#define SHM_READ_SIZE 5
+
+
+int main(int argc, char **argv){
+ int hDb; /* File descriptor for the open database file */
+ int hShm; /* File descriptor for WAL shared-memory file */
+ char *zShm; /* Name of the shared-memory file for WAL mode */
+ ssize_t got; /* Bytes read from header */
+ int isWal; /* True if in WAL mode */
+ int nName; /* Length of filename */
+ unsigned char aHdr[100]; /* Database header */
+ int nLock = 0; /* Number of locks held */
+ int i; /* Loop counter */
+
+ if( argc!=2 ) usage(argv[0]);
+ hDb = open(argv[1], O_RDONLY, 0);
+ if( hDb<0 ){
+ fprintf(stderr, "cannot open %s\n", argv[1]);
+ return 1;
+ }
+
+ /* Make sure we are dealing with an database file */
+ got = read(hDb, aHdr, 100);
+ if( got!=100 || memcmp(aHdr, "SQLite format 3",16)!=0 ){
+ fprintf(stderr, "not an SQLite database: %s\n", argv[1]);
+ exit(1);
+ }
+
+ /* First check for an exclusive lock */
+ if( isLocked(hDb, F_RDLCK, SHARED_FIRST, SHARED_SIZE, "EXCLUSIVE") ){
+ return 0;
+ }
+ isWal = aHdr[18]==2;
+ if( isWal==0 ){
+ /* Rollback mode */
+ if( isLocked(hDb, F_RDLCK, PENDING_BYTE, 1, "PENDING") ) return 0;
+ if( isLocked(hDb, F_RDLCK, RESERVED_BYTE, 1, "RESERVED") ) return 0;
+ if( isLocked(hDb, F_WRLCK, SHARED_FIRST, SHARED_SIZE, "SHARED") ){
+ return 0;
+ }
+ }else{
+ /* WAL mode */
+ nName = (int)strlen(argv[1]);
+ zShm = malloc( nName + 100 );
+ if( zShm==0 ){
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ memcpy(zShm, argv[1], nName);
+ memcpy(&zShm[nName], "-shm", 5);
+ hShm = open(zShm, O_RDONLY, 0);
+ if( hShm<0 ){
+ fprintf(stderr, "cannot open %s\n", zShm);
+ return 1;
+ }
+ if( isLocked(hShm, F_RDLCK, SHM_RECOVER, 1, "WAL-RECOVERY") ){
+ return 0;
+ }
+ nLock += isLocked(hShm, F_RDLCK, SHM_CHECKPOINT, 1, "WAL-CHECKPOINT");
+ nLock += isLocked(hShm, F_RDLCK, SHM_WRITE, 1, "WAL-WRITE");
+ for(i=0; i<SHM_READ_SIZE; i++){
+ nLock += isLocked(hShm, F_WRLCK, SHM_READ_FIRST+i, 1, "WAL-READ");
+ }
+ }
+ if( nLock==0 ){
+ printf("file is not locked\n");
+ }
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/lemon.c b/lang/sql/sqlite/tool/lemon.c
index 898022e2..d7179ad4 100644
--- a/lang/sql/sqlite/tool/lemon.c
+++ b/lang/sql/sqlite/tool/lemon.c
@@ -15,7 +15,7 @@
#ifndef __WIN32__
# if defined(_WIN32) || defined(WIN32)
-# define __WIN32__
+# define __WIN32__
# endif
#endif
@@ -41,23 +41,6 @@ extern int access(const char *path, int mode);
#endif
static int showPrecedenceConflict = 0;
-static const char **made_files = NULL;
-static int made_files_count = 0;
-static int successful_exit = 0;
-static void LemonAtExit(void)
-{
- /* if we failed, delete (most) files we made, to unconfuse build tools. */
- int i;
- for (i = 0; i < made_files_count; i++) {
- if (!successful_exit) {
- remove(made_files[i]);
- }
- }
- free(made_files);
- made_files_count = 0;
- made_files = NULL;
-}
-
static char *msort(char*,char**,int(*)(const char*,const char*));
/*
@@ -67,6 +50,107 @@ static char *msort(char*,char**,int(*)(const char*,const char*));
*/
#define lemonStrlen(X) ((int)strlen(X))
+/*
+** Compilers are starting to complain about the use of sprintf() and strcpy(),
+** saying they are unsafe. So we define our own versions of those routines too.
+**
+** There are three routines here: lemon_sprintf(), lemon_vsprintf(), and
+** lemon_addtext(). The first two are replacements for sprintf() and vsprintf().
+** The third is a helper routine for vsnprintf() that adds texts to the end of a
+** buffer, making sure the buffer is always zero-terminated.
+**
+** The string formatter is a minimal subset of stdlib sprintf() supporting only
+** a few simply conversions:
+**
+** %d
+** %s
+** %.*s
+**
+*/
+static void lemon_addtext(
+ char *zBuf, /* The buffer to which text is added */
+ int *pnUsed, /* Slots of the buffer used so far */
+ const char *zIn, /* Text to add */
+ int nIn, /* Bytes of text to add. -1 to use strlen() */
+ int iWidth /* Field width. Negative to left justify */
+){
+ if( nIn<0 ) for(nIn=0; zIn[nIn]; nIn++){}
+ while( iWidth>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth--; }
+ if( nIn==0 ) return;
+ memcpy(&zBuf[*pnUsed], zIn, nIn);
+ *pnUsed += nIn;
+ while( (-iWidth)>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth++; }
+ zBuf[*pnUsed] = 0;
+}
+static int lemon_vsprintf(char *str, const char *zFormat, va_list ap){
+ int i, j, k, c;
+ int nUsed = 0;
+ const char *z;
+ char zTemp[50];
+ str[0] = 0;
+ for(i=j=0; (c = zFormat[i])!=0; i++){
+ if( c=='%' ){
+ int iWidth = 0;
+ lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0);
+ c = zFormat[++i];
+ if( isdigit(c) || (c=='-' && isdigit(zFormat[i+1])) ){
+ if( c=='-' ) i++;
+ while( isdigit(zFormat[i]) ) iWidth = iWidth*10 + zFormat[i++] - '0';
+ if( c=='-' ) iWidth = -iWidth;
+ c = zFormat[i];
+ }
+ if( c=='d' ){
+ int v = va_arg(ap, int);
+ if( v<0 ){
+ lemon_addtext(str, &nUsed, "-", 1, iWidth);
+ v = -v;
+ }else if( v==0 ){
+ lemon_addtext(str, &nUsed, "0", 1, iWidth);
+ }
+ k = 0;
+ while( v>0 ){
+ k++;
+ zTemp[sizeof(zTemp)-k] = (v%10) + '0';
+ v /= 10;
+ }
+ lemon_addtext(str, &nUsed, &zTemp[sizeof(zTemp)-k], k, iWidth);
+ }else if( c=='s' ){
+ z = va_arg(ap, const char*);
+ lemon_addtext(str, &nUsed, z, -1, iWidth);
+ }else if( c=='.' && memcmp(&zFormat[i], ".*s", 3)==0 ){
+ i += 2;
+ k = va_arg(ap, int);
+ z = va_arg(ap, const char*);
+ lemon_addtext(str, &nUsed, z, k, iWidth);
+ }else if( c=='%' ){
+ lemon_addtext(str, &nUsed, "%", 1, 0);
+ }else{
+ fprintf(stderr, "illegal format\n");
+ exit(1);
+ }
+ j = i+1;
+ }
+ }
+ lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0);
+ return nUsed;
+}
+static int lemon_sprintf(char *str, const char *format, ...){
+ va_list ap;
+ int rc;
+ va_start(ap, format);
+ rc = lemon_vsprintf(str, format, ap);
+ va_end(ap);
+ return rc;
+}
+static void lemon_strcpy(char *dest, const char *src){
+ while( (*(dest++) = *(src++))!=0 ){}
+}
+static void lemon_strcat(char *dest, const char *src){
+ while( *dest ) dest++;
+ lemon_strcpy(dest, src);
+}
+
+
/* a few forward declarations... */
struct rule;
struct lemon;
@@ -134,8 +218,6 @@ void ResortStates(struct lemon *);
void SetSize(int); /* All sets will be of size N */
char *SetNew(void); /* A new set for element 0..N */
void SetFree(char*); /* Deallocate a set */
-
-char *SetNew(void); /* A new set for element 0..N */
int SetAdd(char*,int); /* Add element to a set */
int SetUnion(char *,char *); /* A <- A U B, thru element N */
#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */
@@ -672,7 +754,7 @@ void FindRulePrecedences(struct lemon *xp)
}
}else if( sp->prec>=0 ){
rp->precsym = rp->rhs[i];
- }
+ }
}
}
}
@@ -703,8 +785,9 @@ void FindFirstSets(struct lemon *lemp)
for(rp=lemp->rule; rp; rp=rp->next){
if( rp->lhs->lambda ) continue;
for(i=0; i<rp->nrhs; i++){
- struct symbol *sp = rp->rhs[i];
- if( sp->type!=TERMINAL || sp->lambda==LEMON_FALSE ) break;
+ struct symbol *sp = rp->rhs[i];
+ assert( sp->type==NONTERMINAL || sp->lambda==LEMON_FALSE );
+ if( sp->lambda==LEMON_FALSE ) break;
}
if( i==rp->nrhs ){
rp->lhs->lambda = LEMON_TRUE;
@@ -729,12 +812,12 @@ void FindFirstSets(struct lemon *lemp)
progress += SetAdd(s1->firstset,s2->subsym[j]->index);
}
break;
- }else if( s1==s2 ){
+ }else if( s1==s2 ){
if( s1->lambda==LEMON_FALSE ) break;
- }else{
+ }else{
progress += SetUnion(s1->firstset,s2->firstset);
if( s2->lambda==LEMON_FALSE ) break;
- }
+ }
}
}
}while( progress );
@@ -977,15 +1060,15 @@ void FindFollowSets(struct lemon *lemp)
if( change ){
plp->cfp->status = INCOMPLETE;
progress = 1;
- }
- }
+ }
+ }
cfp->status = COMPLETE;
}
}
}while( progress );
}
-static int resolve_conflict(struct action *,struct action *, struct symbol *);
+static int resolve_conflict(struct action *,struct action *);
/* Compute the reduce actions, and resolve conflicts.
*/
@@ -1011,7 +1094,7 @@ void FindActions(struct lemon *lemp)
** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */
Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp);
}
- }
+ }
}
}
}
@@ -1039,7 +1122,7 @@ void FindActions(struct lemon *lemp)
for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){
/* The two actions "ap" and "nap" have the same lookahead.
** Figure out which one should be used */
- lemp->nconflict += resolve_conflict(ap,nap,lemp->errsym);
+ lemp->nconflict += resolve_conflict(ap,nap);
}
}
}
@@ -1074,8 +1157,7 @@ void FindActions(struct lemon *lemp)
*/
static int resolve_conflict(
struct action *apx,
- struct action *apy,
- struct symbol *errsym /* The error symbol (if defined. NULL otherwise) */
+ struct action *apy
){
struct symbol *spx, *spy;
int errcnt = 0;
@@ -1281,11 +1363,11 @@ void Configlist_closure(struct lemon *lemp)
SetAdd(newcfp->fws, xsp->subsym[k]->index);
}
break;
- }else{
+ }else{
SetUnion(newcfp->fws,xsp->firstset);
if( xsp->lambda==LEMON_FALSE ) break;
- }
- }
+ }
+ }
if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp);
}
}
@@ -1386,7 +1468,7 @@ static void handle_D_option(char *z){
fprintf(stderr,"out of memory\n");
exit(1);
}
- strcpy(*paz, z);
+ lemon_strcpy(*paz, z);
for(z=*paz; *z && *z!='='; z++){}
*z = 0;
}
@@ -1397,7 +1479,7 @@ static void handle_T_option(char *z){
if( user_templatename==0 ){
memory_error();
}
- strcpy(user_templatename, z);
+ lemon_strcpy(user_templatename, z);
}
/* The main program. Parse the command line and do it... */
@@ -1433,8 +1515,6 @@ int main(int argc, char **argv)
int exitcode;
struct lemon lem;
- atexit(LemonAtExit);
-
OptInit(argv,options,stderr);
if( version ){
printf("Lemon version 1.0\n");
@@ -1468,12 +1548,15 @@ int main(int argc, char **argv)
}
/* Count and index the symbols of the grammar */
- lem.nsymbol = Symbol_count();
Symbol_new("{default}");
+ lem.nsymbol = Symbol_count();
lem.symbols = Symbol_arrayof();
- for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
- qsort(lem.symbols,lem.nsymbol+1,sizeof(struct symbol*), Symbolcmpp);
- for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
+ for(i=0; i<lem.nsymbol; i++) lem.symbols[i]->index = i;
+ qsort(lem.symbols,lem.nsymbol,sizeof(struct symbol*), Symbolcmpp);
+ for(i=0; i<lem.nsymbol; i++) lem.symbols[i]->index = i;
+ while( lem.symbols[i-1]->type==MULTITERMINAL ){ i--; }
+ assert( strcmp(lem.symbols[i-1]->name,"{default}")==0 );
+ lem.nsymbol = i - 1;
for(i=1; isupper(lem.symbols[i]->name[0]); i++);
lem.nterminal = i;
@@ -1537,7 +1620,6 @@ int main(int argc, char **argv)
/* return 0 on success, 1 on failure. */
exitcode = ((lem.errorcnt > 0) || (lem.nconflict > 0)) ? 1 : 0;
- successful_exit = (exitcode == 0);
exit(exitcode);
return (exitcode);
}
@@ -1568,7 +1650,7 @@ int main(int argc, char **argv)
/*
** Return a pointer to the next structure in the linked list.
*/
-#define NEXT(A) (*(char**)(((unsigned long)A)+offset))
+#define NEXT(A) (*(char**)(((char*)A)+offset))
/*
** Inputs:
@@ -1962,7 +2044,9 @@ enum e_state {
WAITING_FOR_DESTRUCTOR_SYMBOL,
WAITING_FOR_DATATYPE_SYMBOL,
WAITING_FOR_FALLBACK_ID,
- WAITING_FOR_WILDCARD_ID
+ WAITING_FOR_WILDCARD_ID,
+ WAITING_FOR_CLASS_ID,
+ WAITING_FOR_CLASS_TOKEN
};
struct pstate {
char *filename; /* Name of the input file */
@@ -1972,6 +2056,7 @@ struct pstate {
struct lemon *gp; /* Global state vector */
enum e_state state; /* The state of the parser */
struct symbol *fallback; /* The fallback token */
+ struct symbol *tkclass; /* Token class symbol */
struct symbol *lhs; /* Left-hand side of current rule */
const char *lhsalias; /* Alias for the LHS */
int nrhs; /* Number of right-hand side symbols seen */
@@ -2015,10 +2100,10 @@ static void parseonetoken(struct pstate *psp)
}else if( x[0]=='{' ){
if( psp->prevrule==0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
-"There is no prior rule opon which to attach the code \
+"There is no prior rule upon which to attach the code \
fragment which begins on this line.");
psp->errorcnt++;
- }else if( psp->prevrule->code!=0 ){
+ }else if( psp->prevrule->code!=0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Code fragment beginning on this line is not the first \
to follow the previous rule.");
@@ -2026,7 +2111,7 @@ to follow the previous rule.");
}else{
psp->prevrule->line = psp->tokenlineno;
psp->prevrule->code = &x[1];
- }
+ }
}else if( x[0]=='[' ){
psp->state = PRECEDENCE_MARK_1;
}else{
@@ -2119,7 +2204,7 @@ to follow the previous rule.");
"Can't allocate enough memory for this rule.");
psp->errorcnt++;
psp->prevrule = 0;
- }else{
+ }else{
int i;
rp->ruleline = psp->tokenlineno;
rp->rhs = (struct symbol**)&rp[1];
@@ -2127,7 +2212,7 @@ to follow the previous rule.");
for(i=0; i<psp->nrhs; i++){
rp->rhs[i] = psp->rhs[i];
rp->rhsalias[i] = psp->alias[i];
- }
+ }
rp->lhs = psp->lhs;
rp->lhsalias = psp->lhsalias;
rp->nrhs = psp->nrhs;
@@ -2139,12 +2224,12 @@ to follow the previous rule.");
rp->next = 0;
if( psp->firstrule==0 ){
psp->firstrule = psp->lastrule = rp;
- }else{
+ }else{
psp->lastrule->next = rp;
psp->lastrule = rp;
- }
+ }
psp->prevrule = rp;
- }
+ }
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( isalpha(x[0]) ){
if( psp->nrhs>=MAXRHS ){
@@ -2153,11 +2238,11 @@ to follow the previous rule.");
x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
- }else{
+ }else{
psp->rhs[psp->nrhs] = Symbol_new(x);
psp->alias[psp->nrhs] = 0;
psp->nrhs++;
- }
+ }
}else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 ){
struct symbol *msp = psp->rhs[psp->nrhs-1];
if( msp->type!=MULTITERMINAL ){
@@ -2221,24 +2306,24 @@ to follow the previous rule.");
if( strcmp(x,"name")==0 ){
psp->declargslot = &(psp->gp->name);
psp->insertLineMacro = 0;
- }else if( strcmp(x,"include")==0 ){
+ }else if( strcmp(x,"include")==0 ){
psp->declargslot = &(psp->gp->include);
- }else if( strcmp(x,"code")==0 ){
+ }else if( strcmp(x,"code")==0 ){
psp->declargslot = &(psp->gp->extracode);
- }else if( strcmp(x,"token_destructor")==0 ){
+ }else if( strcmp(x,"token_destructor")==0 ){
psp->declargslot = &psp->gp->tokendest;
- }else if( strcmp(x,"default_destructor")==0 ){
+ }else if( strcmp(x,"default_destructor")==0 ){
psp->declargslot = &psp->gp->vardest;
- }else if( strcmp(x,"token_prefix")==0 ){
+ }else if( strcmp(x,"token_prefix")==0 ){
psp->declargslot = &psp->gp->tokenprefix;
psp->insertLineMacro = 0;
- }else if( strcmp(x,"syntax_error")==0 ){
+ }else if( strcmp(x,"syntax_error")==0 ){
psp->declargslot = &(psp->gp->error);
- }else if( strcmp(x,"parse_accept")==0 ){
+ }else if( strcmp(x,"parse_accept")==0 ){
psp->declargslot = &(psp->gp->accept);
- }else if( strcmp(x,"parse_failure")==0 ){
+ }else if( strcmp(x,"parse_failure")==0 ){
psp->declargslot = &(psp->gp->failure);
- }else if( strcmp(x,"stack_overflow")==0 ){
+ }else if( strcmp(x,"stack_overflow")==0 ){
psp->declargslot = &(psp->gp->overflow);
}else if( strcmp(x,"extra_argument")==0 ){
psp->declargslot = &(psp->gp->arg);
@@ -2267,21 +2352,23 @@ to follow the previous rule.");
psp->preccounter++;
psp->declassoc = NONE;
psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
- }else if( strcmp(x,"destructor")==0 ){
+ }else if( strcmp(x,"destructor")==0 ){
psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL;
- }else if( strcmp(x,"type")==0 ){
+ }else if( strcmp(x,"type")==0 ){
psp->state = WAITING_FOR_DATATYPE_SYMBOL;
}else if( strcmp(x,"fallback")==0 ){
psp->fallback = 0;
psp->state = WAITING_FOR_FALLBACK_ID;
}else if( strcmp(x,"wildcard")==0 ){
psp->state = WAITING_FOR_WILDCARD_ID;
+ }else if( strcmp(x,"token_class")==0 ){
+ psp->state = WAITING_FOR_CLASS_ID;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Unknown declaration keyword: \"%%%s\".",x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
- }
+ }
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Illegal declaration keyword: \"%s\".",x);
@@ -2336,10 +2423,10 @@ to follow the previous rule.");
ErrorMsg(psp->filename,psp->tokenlineno,
"Symbol \"%s\" has already be given a precedence.",x);
psp->errorcnt++;
- }else{
+ }else{
sp->prec = psp->preccounter;
sp->assoc = psp->declassoc;
- }
+ }
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Can't assign a precedence to \"%s\".",x);
@@ -2369,7 +2456,7 @@ to follow the previous rule.");
for(z=psp->filename, nBack=0; *z; z++){
if( *z=='\\' ) nBack++;
}
- sprintf(zLine, "#line %d ", psp->tokenlineno);
+ lemon_sprintf(zLine, "#line %d ", psp->tokenlineno);
nLine = lemonStrlen(zLine);
n += nLine + lemonStrlen(psp->filename) + nBack;
}
@@ -2444,6 +2531,40 @@ to follow the previous rule.");
}
}
break;
+ case WAITING_FOR_CLASS_ID:
+ if( !islower(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%token_class must be followed by an identifier: ", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else if( Symbol_find(x) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "Symbol \"%s\" already used", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ psp->tkclass = Symbol_new(x);
+ psp->tkclass->type = MULTITERMINAL;
+ psp->state = WAITING_FOR_CLASS_TOKEN;
+ }
+ break;
+ case WAITING_FOR_CLASS_TOKEN:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( isupper(x[0]) || ((x[0]=='|' || x[0]=='/') && isupper(x[1])) ){
+ struct symbol *msp = psp->tkclass;
+ msp->nsubsym++;
+ msp->subsym = (struct symbol **) realloc(msp->subsym,
+ sizeof(struct symbol*)*msp->nsubsym);
+ if( !isupper(x[0]) ) x++;
+ msp->subsym[msp->nsubsym-1] = Symbol_new(x);
+ }else{
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%token_class argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
case RESYNC_AFTER_RULE_ERROR:
/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
** break; */
@@ -2538,10 +2659,10 @@ void Parse(struct lemon *gp)
filesize = ftell(fp);
rewind(fp);
filebuf = (char *)malloc( filesize+1 );
- if( filebuf==0 ){
- ErrorMsg(ps.filename,0,"Can't allocate %d of memory to hold this file.",
- filesize+1);
+ if( filesize>100000000 || filebuf==0 ){
+ ErrorMsg(ps.filename,0,"Input file too large.");
gp->errorcnt++;
+ fclose(fp);
return;
}
if( fread(filebuf,1,filesize,fp)!=filesize ){
@@ -2549,6 +2670,7 @@ void Parse(struct lemon *gp)
filesize);
free(filebuf);
gp->errorcnt++;
+ fclose(fp);
return;
}
fclose(fp);
@@ -2607,12 +2729,12 @@ void Parse(struct lemon *gp)
if( c=='\n' ) lineno++;
prevc = c;
cp++;
- }
- }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
+ }
+ }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
cp = &cp[2];
while( (c= *cp)!=0 && c!='\n' ) cp++;
if( c ) lineno++;
- }else if( c=='\'' || c=='\"' ){ /* String a character literals */
+ }else if( c=='\'' || c=='\"' ){ /* String a character literals */
int startchar, prevc;
startchar = c;
prevc = 0;
@@ -2620,8 +2742,8 @@ void Parse(struct lemon *gp)
if( c=='\n' ) lineno++;
if( prevc=='\\' ) prevc = 0;
else prevc = c;
- }
- }
+ }
+ }
}
if( c==0 ){
ErrorMsg(ps.filename,ps.tokenlineno,
@@ -2736,10 +2858,10 @@ PRIVATE char *file_makename(struct lemon *lemp, const char *suffix)
fprintf(stderr,"Can't allocate space for a filename.\n");
exit(1);
}
- strcpy(name,lemp->filename);
+ lemon_strcpy(name,lemp->filename);
cp = strrchr(name,'.');
if( cp ) *cp = 0;
- strcat(name,suffix);
+ lemon_strcat(name,suffix);
return name;
}
@@ -2761,23 +2883,6 @@ PRIVATE FILE *file_open(
lemp->errorcnt++;
return 0;
}
-
- /* Add files we create to a list, so we can delete them if we fail. This
- ** is to keep makefiles from getting confused. We don't include .out files,
- ** though: this is debug information, and you don't want it deleted if there
- ** was an error you need to track down.
- */
- if(( *mode=='w' ) && (strcmp(suffix, ".out") != 0)){
- const char **ptr = (const char **)
- realloc(made_files, sizeof (const char **) * (made_files_count + 1));
- const char *fname = Strsafe(lemp->outname);
- if ((ptr == NULL) || (fname == NULL)) {
- free(ptr);
- memory_error();
- }
- made_files = ptr;
- made_files[made_files_count++] = fname;
- }
return fp;
}
@@ -2813,11 +2918,13 @@ void Reprint(struct lemon *lemp)
printf(" ::=");
for(i=0; i<rp->nrhs; i++){
sp = rp->rhs[i];
- printf(" %s", sp->name);
if( sp->type==MULTITERMINAL ){
+ printf(" %s", sp->subsym[0]->name);
for(j=1; j<sp->nsubsym; j++){
printf("|%s", sp->subsym[j]->name);
}
+ }else{
+ printf(" %s", sp->name);
}
/* if( rp->rhsalias[i] ) printf("(%s)",rp->rhsalias[i]); */
}
@@ -2839,11 +2946,13 @@ void ConfigPrint(FILE *fp, struct config *cfp)
if( i==cfp->dot ) fprintf(fp," *");
if( i==rp->nrhs ) break;
sp = rp->rhs[i];
- fprintf(fp," %s", sp->name);
if( sp->type==MULTITERMINAL ){
+ fprintf(fp," %s", sp->subsym[0]->name);
for(j=1; j<sp->nsubsym; j++){
fprintf(fp,"|%s",sp->subsym[j]->name);
}
+ }else{
+ fprintf(fp," %s", sp->name);
}
}
}
@@ -2953,7 +3062,7 @@ void ReportOutput(struct lemon *lemp)
while( cfp ){
char buf[20];
if( cfp->dot==cfp->rp->nrhs ){
- sprintf(buf,"(%d)",cfp->rp->index);
+ lemon_sprintf(buf,"(%d)",cfp->rp->index);
fprintf(fp," %5s ",buf);
}else{
fprintf(fp," ");
@@ -3018,7 +3127,7 @@ PRIVATE char *pathsearch(char *argv0, char *name, int modemask)
c = *cp;
*cp = 0;
path = (char *)malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 );
- if( path ) sprintf(path,"%s/%s",argv0,name);
+ if( path ) lemon_sprintf(path,"%s/%s",argv0,name);
*cp = c;
}else{
pathlist = getenv("PATH");
@@ -3027,13 +3136,13 @@ PRIVATE char *pathsearch(char *argv0, char *name, int modemask)
path = (char *)malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 );
if( (pathbuf != 0) && (path!=0) ){
pathbufptr = pathbuf;
- strcpy(pathbuf, pathlist);
+ lemon_strcpy(pathbuf, pathlist);
while( *pathbuf ){
cp = strchr(pathbuf,':');
if( cp==0 ) cp = &pathbuf[lemonStrlen(pathbuf)];
c = *cp;
*cp = 0;
- sprintf(path,"%s/%s",pathbuf,name);
+ lemon_sprintf(path,"%s/%s",pathbuf,name);
*cp = c;
if( c==0 ) pathbuf[0] = 0;
else pathbuf = &cp[1];
@@ -3124,9 +3233,9 @@ PRIVATE FILE *tplt_open(struct lemon *lemp)
cp = strrchr(lemp->filename,'.');
if( cp ){
- sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
+ lemon_sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
}else{
- sprintf(buf,"%s.lt",lemp->filename);
+ lemon_sprintf(buf,"%s.lt",lemp->filename);
}
if( access(buf,004)==0 ){
tpltname = buf;
@@ -3277,9 +3386,9 @@ PRIVATE char *append_str(const char *zText, int n, int p1, int p2){
while( n-- > 0 ){
c = *(zText++);
if( c=='%' && n>0 && zText[0]=='d' ){
- sprintf(zInt, "%d", p1);
+ lemon_sprintf(zInt, "%d", p1);
p1 = p2;
- strcpy(&z[used], zInt);
+ lemon_strcpy(&z[used], zInt);
used += lemonStrlen(&z[used]);
zText++;
n--;
@@ -3428,12 +3537,16 @@ void print_stack_union(
int maxdtlength; /* Maximum length of any ".datatype" field. */
char *stddt; /* Standardized name for a datatype */
int i,j; /* Loop counters */
- int hash; /* For hashing the name of a type */
+ unsigned hash; /* For hashing the name of a type */
const char *name; /* Name of the parser */
/* Allocate and initialize types[] and allocate stddt[] */
arraysize = lemp->nsymbol * 2;
types = (char**)calloc( arraysize, sizeof(char*) );
+ if( types==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
for(i=0; i<arraysize; i++) types[i] = 0;
maxdtlength = 0;
if( lemp->vartype ){
@@ -3447,7 +3560,7 @@ void print_stack_union(
if( len>maxdtlength ) maxdtlength = len;
}
stddt = (char*)malloc( maxdtlength*2 + 1 );
- if( types==0 || stddt==0 ){
+ if( stddt==0 ){
fprintf(stderr,"Out of memory.\n");
exit(1);
}
@@ -3491,7 +3604,7 @@ void print_stack_union(
break;
}
hash++;
- if( hash>=arraysize ) hash = 0;
+ if( hash>=(unsigned)arraysize ) hash = 0;
}
if( types[hash]==0 ){
sp->dtnum = hash + 1;
@@ -3500,7 +3613,7 @@ void print_stack_union(
fprintf(stderr,"Out of memory.\n");
exit(1);
}
- strcpy(types[hash],stddt);
+ lemon_strcpy(types[hash],stddt);
}
}
@@ -3586,9 +3699,11 @@ static void writeRuleText(FILE *out, struct rule *rp){
fprintf(out,"%s ::=", rp->lhs->name);
for(j=0; j<rp->nrhs; j++){
struct symbol *sp = rp->rhs[j];
- fprintf(out," %s", sp->name);
- if( sp->type==MULTITERMINAL ){
+ if( sp->type!=MULTITERMINAL ){
+ fprintf(out," %s", sp->name);
+ }else{
int k;
+ fprintf(out," %s", sp->subsym[0]->name);
for(k=1; k<sp->nsubsym; k++){
fprintf(out,"|%s",sp->subsym[k]->name);
}
@@ -3889,7 +4004,7 @@ void ReportTable(
/* Generate a table containing the symbolic name of every symbol
*/
for(i=0; i<lemp->nsymbol; i++){
- sprintf(line,"\"%s\",",lemp->symbols[i]->name);
+ lemon_sprintf(line,"\"%s\",",lemp->symbols[i]->name);
fprintf(out," %-15s",line);
if( (i&3)==3 ){ fprintf(out,"\n"); lineno++; }
}
@@ -4054,12 +4169,15 @@ void ReportHeader(struct lemon *lemp)
else prefix = "";
in = file_open(lemp,".h","rb");
if( in ){
+ int nextChar;
for(i=1; i<lemp->nterminal && fgets(line,LINESIZE,in); i++){
- sprintf(pattern,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ lemon_sprintf(pattern,"#define %s%-30s %3d\n",
+ prefix,lemp->symbols[i]->name,i);
if( strcmp(line,pattern) ) break;
}
+ nextChar = fgetc(in);
fclose(in);
- if( i==lemp->nterminal ){
+ if( i==lemp->nterminal && nextChar==EOF ){
/* No change in the file. Don't rewrite it. */
return;
}
@@ -4067,7 +4185,7 @@ void ReportHeader(struct lemon *lemp)
out = file_open(lemp,".h","wb");
if( out ){
for(i=1; i<lemp->nterminal; i++){
- fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ fprintf(out,"#define %s%-30s %3d\n",prefix,lemp->symbols[i]->name,i);
}
fclose(out);
}
@@ -4265,10 +4383,10 @@ int SetUnion(char *s1, char *s2)
** Code for processing tables in the LEMON parser generator.
*/
-PRIVATE int strhash(const char *x)
+PRIVATE unsigned strhash(const char *x)
{
- int h = 0;
- while( *x) h = h*13 + *(x++);
+ unsigned h = 0;
+ while( *x ) h = h*13 + *(x++);
return h;
}
@@ -4284,7 +4402,7 @@ const char *Strsafe(const char *y)
if( y==0 ) return 0;
z = Strsafe_find(y);
if( z==0 && (cpy=(char *)malloc( lemonStrlen(y)+1 ))!=0 ){
- strcpy(cpy,y);
+ lemon_strcpy(cpy,y);
z = cpy;
Strsafe_insert(z);
}
@@ -4323,8 +4441,7 @@ void Strsafe_init(){
if( x1a ){
x1a->size = 1024;
x1a->count = 0;
- x1a->tbl = (x1node*)malloc(
- (sizeof(x1node) + sizeof(x1node*))*1024 );
+ x1a->tbl = (x1node*)calloc(1024, sizeof(x1node) + sizeof(x1node*));
if( x1a->tbl==0 ){
free(x1a);
x1a = 0;
@@ -4340,8 +4457,8 @@ void Strsafe_init(){
int Strsafe_insert(const char *data)
{
x1node *np;
- int h;
- int ph;
+ unsigned h;
+ unsigned ph;
if( x1a==0 ) return 0;
ph = strhash(data);
@@ -4361,8 +4478,7 @@ int Strsafe_insert(const char *data)
struct s_x1 array;
array.size = size = x1a->size*2;
array.count = x1a->count;
- array.tbl = (x1node*)malloc(
- (sizeof(x1node) + sizeof(x1node*))*size );
+ array.tbl = (x1node*)calloc(size, sizeof(x1node) + sizeof(x1node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x1node**)&(array.tbl[size]);
for(i=0; i<size; i++) array.ht[i] = 0;
@@ -4395,7 +4511,7 @@ int Strsafe_insert(const char *data)
** if no such key. */
const char *Strsafe_find(const char *key)
{
- int h;
+ unsigned h;
x1node *np;
if( x1a==0 ) return 0;
@@ -4437,11 +4553,15 @@ struct symbol *Symbol_new(const char *x)
return sp;
}
-/* Compare two symbols for working purposes
+/* Compare two symbols for sorting purposes. Return negative,
+** zero, or positive if a is less then, equal to, or greater
+** than b.
**
** Symbols that begin with upper case letters (terminals or tokens)
** must sort before symbols that begin with lower case letters
-** (non-terminals). Other than that, the order does not matter.
+** (non-terminals). And MULTITERMINAL symbols (created using the
+** %token_class directive) must sort at the very end. Other than
+** that, the order does not matter.
**
** We find experimentally that leaving the symbols in their original
** order (the order they appeared in the grammar file) gives the
@@ -4449,12 +4569,11 @@ struct symbol *Symbol_new(const char *x)
*/
int Symbolcmpp(const void *_a, const void *_b)
{
- const struct symbol **a = (const struct symbol **) _a;
- const struct symbol **b = (const struct symbol **) _b;
- int i1 = (**a).index + 10000000*((**a).name[0]>'Z');
- int i2 = (**b).index + 10000000*((**b).name[0]>'Z');
- assert( i1!=i2 || strcmp((**a).name,(**b).name)==0 );
- return i1-i2;
+ const struct symbol *a = *(const struct symbol **) _a;
+ const struct symbol *b = *(const struct symbol **) _b;
+ int i1 = a->type==MULTITERMINAL ? 3 : a->name[0]>'Z' ? 2 : 1;
+ int i2 = b->type==MULTITERMINAL ? 3 : b->name[0]>'Z' ? 2 : 1;
+ return i1==i2 ? a->index - b->index : i1 - i2;
}
/* There is one instance of the following structure for each
@@ -4489,8 +4608,7 @@ void Symbol_init(){
if( x2a ){
x2a->size = 128;
x2a->count = 0;
- x2a->tbl = (x2node*)malloc(
- (sizeof(x2node) + sizeof(x2node*))*128 );
+ x2a->tbl = (x2node*)calloc(128, sizeof(x2node) + sizeof(x2node*));
if( x2a->tbl==0 ){
free(x2a);
x2a = 0;
@@ -4506,8 +4624,8 @@ void Symbol_init(){
int Symbol_insert(struct symbol *data, const char *key)
{
x2node *np;
- int h;
- int ph;
+ unsigned h;
+ unsigned ph;
if( x2a==0 ) return 0;
ph = strhash(key);
@@ -4527,8 +4645,7 @@ int Symbol_insert(struct symbol *data, const char *key)
struct s_x2 array;
array.size = size = x2a->size*2;
array.count = x2a->count;
- array.tbl = (x2node*)malloc(
- (sizeof(x2node) + sizeof(x2node*))*size );
+ array.tbl = (x2node*)calloc(size, sizeof(x2node) + sizeof(x2node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x2node**)&(array.tbl[size]);
for(i=0; i<size; i++) array.ht[i] = 0;
@@ -4563,7 +4680,7 @@ int Symbol_insert(struct symbol *data, const char *key)
** if no such key. */
struct symbol *Symbol_find(const char *key)
{
- int h;
+ unsigned h;
x2node *np;
if( x2a==0 ) return 0;
@@ -4637,9 +4754,9 @@ PRIVATE int statecmp(struct config *a, struct config *b)
}
/* Hash a state */
-PRIVATE int statehash(struct config *a)
+PRIVATE unsigned statehash(struct config *a)
{
- int h=0;
+ unsigned h=0;
while( a ){
h = h*571 + a->rp->index*37 + a->dot;
a = a->bp;
@@ -4688,8 +4805,7 @@ void State_init(){
if( x3a ){
x3a->size = 128;
x3a->count = 0;
- x3a->tbl = (x3node*)malloc(
- (sizeof(x3node) + sizeof(x3node*))*128 );
+ x3a->tbl = (x3node*)calloc(128, sizeof(x3node) + sizeof(x3node*));
if( x3a->tbl==0 ){
free(x3a);
x3a = 0;
@@ -4705,8 +4821,8 @@ void State_init(){
int State_insert(struct state *data, struct config *key)
{
x3node *np;
- int h;
- int ph;
+ unsigned h;
+ unsigned ph;
if( x3a==0 ) return 0;
ph = statehash(key);
@@ -4726,8 +4842,7 @@ int State_insert(struct state *data, struct config *key)
struct s_x3 array;
array.size = size = x3a->size*2;
array.count = x3a->count;
- array.tbl = (x3node*)malloc(
- (sizeof(x3node) + sizeof(x3node*))*size );
+ array.tbl = (x3node*)calloc(size, sizeof(x3node) + sizeof(x3node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x3node**)&(array.tbl[size]);
for(i=0; i<size; i++) array.ht[i] = 0;
@@ -4762,7 +4877,7 @@ int State_insert(struct state *data, struct config *key)
** if no such key. */
struct state *State_find(struct config *key)
{
- int h;
+ unsigned h;
x3node *np;
if( x3a==0 ) return 0;
@@ -4784,7 +4899,7 @@ struct state **State_arrayof()
int i,size;
if( x3a==0 ) return 0;
size = x3a->count;
- array = (struct state **)malloc( sizeof(struct state *)*size );
+ array = (struct state **)calloc(size, sizeof(struct state *));
if( array ){
for(i=0; i<size; i++) array[i] = x3a->tbl[i].data;
}
@@ -4792,9 +4907,9 @@ struct state **State_arrayof()
}
/* Hash a configuration */
-PRIVATE int confighash(struct config *a)
+PRIVATE unsigned confighash(struct config *a)
{
- int h=0;
+ unsigned h=0;
h = h*571 + a->rp->index*37 + a->dot;
return h;
}
@@ -4830,8 +4945,7 @@ void Configtable_init(){
if( x4a ){
x4a->size = 64;
x4a->count = 0;
- x4a->tbl = (x4node*)malloc(
- (sizeof(x4node) + sizeof(x4node*))*64 );
+ x4a->tbl = (x4node*)calloc(64, sizeof(x4node) + sizeof(x4node*));
if( x4a->tbl==0 ){
free(x4a);
x4a = 0;
@@ -4847,8 +4961,8 @@ void Configtable_init(){
int Configtable_insert(struct config *data)
{
x4node *np;
- int h;
- int ph;
+ unsigned h;
+ unsigned ph;
if( x4a==0 ) return 0;
ph = confighash(data);
@@ -4868,8 +4982,7 @@ int Configtable_insert(struct config *data)
struct s_x4 array;
array.size = size = x4a->size*2;
array.count = x4a->count;
- array.tbl = (x4node*)malloc(
- (sizeof(x4node) + sizeof(x4node*))*size );
+ array.tbl = (x4node*)calloc(size, sizeof(x4node) + sizeof(x4node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x4node**)&(array.tbl[size]);
for(i=0; i<size; i++) array.ht[i] = 0;
diff --git a/lang/sql/sqlite/tool/logest.c b/lang/sql/sqlite/tool/logest.c
new file mode 100644
index 00000000..8dad6cc9
--- /dev/null
+++ b/lang/sql/sqlite/tool/logest.c
@@ -0,0 +1,141 @@
+/*
+** 2013-06-10
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains a simple command-line utility for converting from
+** integers and LogEst values and back again and for doing simple
+** arithmetic operations (multiple and add) on LogEst values.
+**
+** Usage:
+**
+** ./LogEst ARGS
+**
+** Arguments:
+**
+** 'x' Multiple the top two elements of the stack
+** '+' Add the top two elements of the stack
+** NUM Convert NUM from integer to LogEst and push onto the stack
+** ^NUM Interpret NUM as a LogEst and push onto stack.
+**
+** Examples:
+**
+** To convert 123 from LogEst to integer:
+**
+** ./LogEst ^123
+**
+** To convert 123456 from integer to LogEst:
+**
+** ./LogEst 123456
+**
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <assert.h>
+#include <string.h>
+#include "sqlite3.h"
+
+typedef short int LogEst; /* 10 times log2() */
+
+LogEst logEstMultiply(LogEst a, LogEst b){ return a+b; }
+LogEst logEstAdd(LogEst a, LogEst b){
+ static const unsigned char x[] = {
+ 10, 10, /* 0,1 */
+ 9, 9, /* 2,3 */
+ 8, 8, /* 4,5 */
+ 7, 7, 7, /* 6,7,8 */
+ 6, 6, 6, /* 9,10,11 */
+ 5, 5, 5, /* 12-14 */
+ 4, 4, 4, 4, /* 15-18 */
+ 3, 3, 3, 3, 3, 3, /* 19-24 */
+ 2, 2, 2, 2, 2, 2, 2, /* 25-31 */
+ };
+ if( a<b ){ LogEst t = a; a = b; b = t; }
+ if( a>b+49 ) return a;
+ if( a>b+31 ) return a+1;
+ return a+x[a-b];
+}
+LogEst logEstFromInteger(sqlite3_uint64 x){
+ static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 };
+ LogEst y = 40;
+ if( x<8 ){
+ if( x<2 ) return 0;
+ while( x<8 ){ y -= 10; x <<= 1; }
+ }else{
+ while( x>255 ){ y += 40; x >>= 4; }
+ while( x>15 ){ y += 10; x >>= 1; }
+ }
+ return a[x&7] + y - 10;
+}
+static sqlite3_uint64 logEstToInt(LogEst x){
+ sqlite3_uint64 n;
+ if( x<10 ) return 1;
+ n = x%10;
+ x /= 10;
+ if( n>=5 ) n -= 2;
+ else if( n>=1 ) n -= 1;
+ if( x>=3 ) return (n+8)<<(x-3);
+ return (n+8)>>(3-x);
+}
+static LogEst logEstFromDouble(double x){
+ sqlite3_uint64 a;
+ LogEst e;
+ assert( sizeof(x)==8 && sizeof(a)==8 );
+ if( x<=0.0 ) return -32768;
+ if( x<1.0 ) return -logEstFromDouble(1/x);
+ if( x<1024.0 ) return logEstFromInteger((sqlite3_uint64)(1024.0*x)) - 100;
+ if( x<=2000000000.0 ) return logEstFromInteger((sqlite3_uint64)x);
+ memcpy(&a, &x, 8);
+ e = (a>>52) - 1022;
+ return e*10;
+}
+
+int isFloat(const char *z){
+ while( z[0] ){
+ if( z[0]=='.' || z[0]=='E' || z[0]=='e' ) return 1;
+ z++;
+ }
+ return 0;
+}
+
+int main(int argc, char **argv){
+ int i;
+ int n = 0;
+ LogEst a[100];
+ for(i=1; i<argc; i++){
+ const char *z = argv[i];
+ if( z[0]=='+' ){
+ if( n>=2 ){
+ a[n-2] = logEstAdd(a[n-2],a[n-1]);
+ n--;
+ }
+ }else if( z[0]=='x' ){
+ if( n>=2 ){
+ a[n-2] = logEstMultiply(a[n-2],a[n-1]);
+ n--;
+ }
+ }else if( z[0]=='^' ){
+ a[n++] = atoi(z+1);
+ }else if( isFloat(z) ){
+ a[n++] = logEstFromDouble(atof(z));
+ }else{
+ a[n++] = logEstFromInteger(atoi(z));
+ }
+ }
+ for(i=n-1; i>=0; i--){
+ if( a[i]<0 ){
+ printf("%d (%f)\n", a[i], 1.0/(double)logEstToInt(-a[i]));
+ }else{
+ sqlite3_uint64 x = logEstToInt(a[i]+100)*100/1024;
+ printf("%d (%lld.%02lld)\n", a[i], x/100, x%100);
+ }
+ }
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/mkautoconfamal.sh b/lang/sql/sqlite/tool/mkautoconfamal.sh
new file mode 100644
index 00000000..c13f7c99
--- /dev/null
+++ b/lang/sql/sqlite/tool/mkautoconfamal.sh
@@ -0,0 +1,83 @@
+#!/bin/sh
+# This script is used to build the amalgamation autoconf package.
+# It assumes the following:
+#
+# 1. The files "sqlite3.c", "sqlite3.h" and "sqlite3ext.h"
+# are available in the current directory.
+#
+# 2. Variable $TOP is set to the full path of the root directory
+# of the SQLite source tree.
+#
+# 3. There is nothing of value in the ./mkpkg_tmp_dir directory.
+# This is important, as the script executes "rm -rf ./mkpkg_tmp_dir".
+#
+
+
+# Bail out of the script if any command returns a non-zero exit
+# status. Or if the script tries to use an unset variable. These
+# may fail for old /bin/sh interpreters.
+#
+set -e
+set -u
+
+TMPSPACE=./mkpkg_tmp_dir
+VERSION=`cat $TOP/VERSION`
+
+# Set global variable $ARTIFACT to the "3xxyyzz" string incorporated
+# into artifact filenames. And $VERSION2 to the "3.x.y[.z]" form.
+xx=`echo $VERSION|sed 's/3\.\([0-9]*\)\..*/\1/'`
+yy=`echo $VERSION|sed 's/3\.[^.]*\.\([0-9]*\).*/\1/'`
+zz=0
+set +e
+ zz=`echo $VERSION|sed 's/3\.[^.]*\.[^.]*\.\([0-9]*\).*/\1/'|grep -v '\.'`
+set -e
+ARTIFACT=`printf "3%.2d%.2d%.2d" $xx $yy $zz`
+
+rm -rf $TMPSPACE
+cp -R $TOP/autoconf $TMPSPACE
+
+cp sqlite3.c $TMPSPACE
+cp sqlite3.h $TMPSPACE
+cp sqlite3ext.h $TMPSPACE
+cp $TOP/sqlite3.1 $TMPSPACE
+cp $TOP/sqlite3.pc.in $TMPSPACE
+cp $TOP/src/shell.c $TMPSPACE
+
+chmod 755 $TMPSPACE/install-sh
+chmod 755 $TMPSPACE/missing
+chmod 755 $TMPSPACE/depcomp
+chmod 755 $TMPSPACE/config.sub
+chmod 755 $TMPSPACE/config.guess
+
+cat $TMPSPACE/configure.ac |
+sed "s/AC_INIT(sqlite, .*, http:\/\/www.sqlite.org)/AC_INIT(sqlite, $VERSION, http:\/\/www.sqlite.org)/" > $TMPSPACE/tmp
+mv $TMPSPACE/tmp $TMPSPACE/configure.ac
+
+cd $TMPSPACE
+aclocal
+autoconf
+automake
+
+mkdir -p tea/generic
+echo "#ifdef USE_SYSTEM_SQLITE" > tea/generic/tclsqlite3.c
+echo "# include <sqlite3.h>" >> tea/generic/tclsqlite3.c
+echo "#else" >> tea/generic/tclsqlite3.c
+echo "#include \"../../sqlite3.c\"" >> tea/generic/tclsqlite3.c
+echo "#endif" >> tea/generic/tclsqlite3.c
+cat $TOP/src/tclsqlite.c >> tea/generic/tclsqlite3.c
+
+cat tea/configure.in |
+ sed "s/AC_INIT(\[sqlite\], .*)/AC_INIT([sqlite], [$VERSION])/" > tmp
+mv tmp tea/configure.in
+
+cd tea
+autoconf
+rm -rf autom4te.cache
+
+cd ../
+./configure && make dist
+tar -xzf sqlite-$VERSION.tar.gz
+mv sqlite-$VERSION sqlite-autoconf-$ARTIFACT
+tar -czf sqlite-autoconf-$ARTIFACT.tar.gz sqlite-autoconf-$ARTIFACT
+mv sqlite-autoconf-$ARTIFACT.tar.gz ..
+
diff --git a/lang/sql/sqlite/tool/mkkeywordhash.c b/lang/sql/sqlite/tool/mkkeywordhash.c
index 509aeef9..a467931c 100644
--- a/lang/sql/sqlite/tool/mkkeywordhash.c
+++ b/lang/sql/sqlite/tool/mkkeywordhash.c
@@ -138,6 +138,11 @@ struct Keyword {
#else
# define AUTOVACUUM 0x00020000
#endif
+#ifdef SQLITE_OMIT_CTE
+# define CTE 0
+#else
+# define CTE 0x00040000
+#endif
/*
** These are the keywords
@@ -234,6 +239,7 @@ static Keyword aKeywordTable[] = {
{ "PRIMARY", "TK_PRIMARY", ALWAYS },
{ "QUERY", "TK_QUERY", EXPLAIN },
{ "RAISE", "TK_RAISE", TRIGGER },
+ { "RECURSIVE", "TK_RECURSIVE", CTE },
{ "REFERENCES", "TK_REFERENCES", FKEY },
{ "REGEXP", "TK_LIKE_KW", ALWAYS },
{ "REINDEX", "TK_REINDEX", REINDEX },
@@ -262,6 +268,8 @@ static Keyword aKeywordTable[] = {
{ "VALUES", "TK_VALUES", ALWAYS },
{ "VIEW", "TK_VIEW", VIEW },
{ "VIRTUAL", "TK_VIRTUAL", VTAB },
+ { "WITH", "TK_WITH", CTE },
+ { "WITHOUT", "TK_WITHOUT", ALWAYS },
{ "WHEN", "TK_WHEN", ALWAYS },
{ "WHERE", "TK_WHERE", ALWAYS },
};
@@ -360,7 +368,7 @@ int main(int argc, char **argv){
/* Fill in the lengths of strings and hashes for all entries. */
for(i=0; i<nKeyword; i++){
Keyword *p = &aKeywordTable[i];
- p->len = strlen(p->zName);
+ p->len = (int)strlen(p->zName);
assert( p->len<sizeof(p->zOrigName) );
strcpy(p->zOrigName, p->zName);
totalLen += p->len;
diff --git a/lang/sql/sqlite/tool/mkpragmatab.tcl b/lang/sql/sqlite/tool/mkpragmatab.tcl
new file mode 100644
index 00000000..28a1e468
--- /dev/null
+++ b/lang/sql/sqlite/tool/mkpragmatab.tcl
@@ -0,0 +1,434 @@
+#!/usr/bin/tclsh
+#
+# Run this script to generate the pragma name lookup table C code.
+#
+# To add new pragmas, first add the name and other relevant attributes
+# of the pragma to the "pragma_def" object below. Then run this script
+# to generate the C-code for the lookup table and copy/paste the output
+# of this script into the appropriate spot in the pragma.c source file.
+# Then add the extra "case PragTyp_XXXXX:" and subsequent code for the
+# new pragma.
+#
+
+set pragma_def {
+ NAME: full_column_names
+ TYPE: FLAG
+ ARG: SQLITE_FullColNames
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: short_column_names
+ TYPE: FLAG
+ ARG: SQLITE_ShortColNames
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: count_changes
+ TYPE: FLAG
+ ARG: SQLITE_CountRows
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: empty_result_callbacks
+ TYPE: FLAG
+ ARG: SQLITE_NullCallback
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: legacy_file_format
+ TYPE: FLAG
+ ARG: SQLITE_LegacyFileFmt
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: fullfsync
+ TYPE: FLAG
+ ARG: SQLITE_FullFSync
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: checkpoint_fullfsync
+ TYPE: FLAG
+ ARG: SQLITE_CkptFullFSync
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: cache_spill
+ TYPE: FLAG
+ ARG: SQLITE_CacheSpill
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: reverse_unordered_selects
+ TYPE: FLAG
+ ARG: SQLITE_ReverseOrder
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: query_only
+ TYPE: FLAG
+ ARG: SQLITE_QueryOnly
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: automatic_index
+ TYPE: FLAG
+ ARG: SQLITE_AutoIndex
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: !defined(SQLITE_OMIT_AUTOMATIC_INDEX)
+
+ NAME: sql_trace
+ TYPE: FLAG
+ ARG: SQLITE_SqlTrace
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: vdbe_listing
+ TYPE: FLAG
+ ARG: SQLITE_VdbeListing
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: vdbe_trace
+ TYPE: FLAG
+ ARG: SQLITE_VdbeTrace
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: vdbe_addoptrace
+ TYPE: FLAG
+ ARG: SQLITE_VdbeAddopTrace
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: vdbe_debug
+ TYPE: FLAG
+ ARG: SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: vdbe_eqp
+ TYPE: FLAG
+ ARG: SQLITE_VdbeEQP
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: ignore_check_constraints
+ TYPE: FLAG
+ ARG: SQLITE_IgnoreChecks
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: !defined(SQLITE_OMIT_CHECK)
+
+ NAME: writable_schema
+ TYPE: FLAG
+ ARG: SQLITE_WriteSchema|SQLITE_RecoveryMode
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: read_uncommitted
+ TYPE: FLAG
+ ARG: SQLITE_ReadUncommitted
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: recursive_triggers
+ TYPE: FLAG
+ ARG: SQLITE_RecTriggers
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+
+ NAME: foreign_keys
+ TYPE: FLAG
+ ARG: SQLITE_ForeignKeys
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+
+ NAME: defer_foreign_keys
+ TYPE: FLAG
+ ARG: SQLITE_DeferFKs
+ IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ IF: !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+
+ NAME: default_cache_size
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED)
+
+ NAME: page_size
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: secure_delete
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: page_count
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: max_page_count
+ TYPE: PAGE_COUNT
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: locking_mode
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: journal_mode
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: journal_size_limit
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: cache_size
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: mmap_size
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: auto_vacuum
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_AUTOVACUUM)
+
+ NAME: incremental_vacuum
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_AUTOVACUUM)
+
+ NAME: temp_store
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: temp_store_directory
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: data_store_directory
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN
+
+ NAME: lock_proxy_file
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE
+
+ NAME: synchronous
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+
+ NAME: table_info
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+
+ NAME: stats
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+
+ NAME: index_info
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+
+ NAME: index_list
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+
+ NAME: database_list
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+
+ NAME: collation_list
+ IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+
+ NAME: foreign_key_list
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_FOREIGN_KEY)
+
+ NAME: foreign_key_check
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+
+ NAME: parser_trace
+ IF: defined(SQLITE_DEBUG)
+
+ NAME: case_sensitive_like
+
+ NAME: integrity_check
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_INTEGRITY_CHECK)
+
+ NAME: quick_check
+ TYPE: INTEGRITY_CHECK
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_INTEGRITY_CHECK)
+
+ NAME: encoding
+ IF: !defined(SQLITE_OMIT_UTF16)
+
+ NAME: schema_version
+ TYPE: HEADER_VALUE
+ IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+
+ NAME: user_version
+ TYPE: HEADER_VALUE
+ IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+
+ NAME: freelist_count
+ TYPE: HEADER_VALUE
+ IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+
+ NAME: application_id
+ TYPE: HEADER_VALUE
+ IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+
+ NAME: compile_options
+ IF: !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS)
+
+ NAME: wal_checkpoint
+ FLAG: NeedSchema
+ IF: !defined(SQLITE_OMIT_WAL)
+
+ NAME: wal_autocheckpoint
+ IF: !defined(SQLITE_OMIT_WAL)
+
+ NAME: shrink_memory
+
+ NAME: busy_timeout
+
+ NAME: lock_status
+ IF: defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+
+ NAME: key
+ IF: defined(SQLITE_HAS_CODEC)
+
+ NAME: rekey
+ IF: defined(SQLITE_HAS_CODEC)
+
+ NAME: hexkey
+ IF: defined(SQLITE_HAS_CODEC)
+
+ NAME: hexrekey
+ TYPE: HEXKEY
+ IF: defined(SQLITE_HAS_CODEC)
+
+ NAME: activate_extensions
+ IF: defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD)
+
+ NAME: soft_heap_limit
+}
+fconfigure stdout -translation lf
+set name {}
+set type {}
+set if {}
+set flags {}
+set arg 0
+proc record_one {} {
+ global name type if arg allbyname typebyif flags
+ if {$name==""} return
+ set allbyname($name) [list $type $arg $if $flags]
+ set name {}
+ set type {}
+ set if {}
+ set flags {}
+ set arg 0
+}
+foreach line [split $pragma_def \n] {
+ set line [string trim $line]
+ if {$line==""} continue
+ foreach {id val} [split $line :] break
+ set val [string trim $val]
+ if {$id=="NAME"} {
+ record_one
+ set name $val
+ set type [string toupper $val]
+ } elseif {$id=="TYPE"} {
+ set type $val
+ } elseif {$id=="ARG"} {
+ set arg $val
+ } elseif {$id=="IF"} {
+ lappend if $val
+ } elseif {$id=="FLAG"} {
+ foreach term [split $val] {
+ lappend flags $term
+ set allflags($term) 1
+ }
+ } else {
+ error "bad pragma_def line: $line"
+ }
+}
+record_one
+set allnames [lsort [array names allbyname]]
+
+# Generate #defines for all pragma type names. Group the pragmas that are
+# omit in default builds (defined(SQLITE_DEBUG) and defined(SQLITE_HAS_CODEC))
+# at the end.
+#
+set pnum 0
+foreach name $allnames {
+ set type [lindex $allbyname($name) 0]
+ if {[info exists seentype($type)]} continue
+ set if [lindex $allbyname($name) 2]
+ if {[regexp SQLITE_DEBUG $if] || [regexp SQLITE_HAS_CODEC $if]} continue
+ set seentype($type) 1
+ puts [format {#define %-35s %4d} PragTyp_$type $pnum]
+ incr pnum
+}
+foreach name $allnames {
+ set type [lindex $allbyname($name) 0]
+ if {[info exists seentype($type)]} continue
+ set if [lindex $allbyname($name) 2]
+ if {[regexp SQLITE_DEBUG $if]} continue
+ set seentype($type) 1
+ puts [format {#define %-35s %4d} PragTyp_$type $pnum]
+ incr pnum
+}
+foreach name $allnames {
+ set type [lindex $allbyname($name) 0]
+ if {[info exists seentype($type)]} continue
+ set seentype($type) 1
+ puts [format {#define %-35s %4d} PragTyp_$type $pnum]
+ incr pnum
+}
+
+# Generate #defines for flags
+#
+set fv 1
+foreach f [lsort [array names allflags]] {
+ puts [format {#define PragFlag_%-20s 0x%02x} $f $fv]
+ set fv [expr {$fv*2}]
+}
+
+# Generate the lookup table
+#
+puts "static const struct sPragmaNames \173"
+puts " const char *const zName; /* Name of pragma */"
+puts " u8 ePragTyp; /* PragTyp_XXX value */"
+puts " u8 mPragFlag; /* Zero or more PragFlag_XXX values */"
+puts " u32 iArg; /* Extra argument */"
+puts "\175 aPragmaNames\[\] = \173"
+
+set current_if {}
+set spacer [format { %26s } {}]
+foreach name $allnames {
+ foreach {type arg if flag} $allbyname($name) break
+ if {$if!=$current_if} {
+ if {$current_if!=""} {
+ foreach this_if $current_if {
+ puts "#endif"
+ }
+ }
+ set current_if $if
+ if {$current_if!=""} {
+ foreach this_if $current_if {
+ puts "#if $this_if"
+ }
+ }
+ }
+ set typex [format PragTyp_%-23s $type,]
+ if {$flag==""} {
+ set flagx "0"
+ } else {
+ set flagx PragFlag_[join $flag {|PragFlag_}]
+ }
+ puts " \173 /* zName: */ \"$name\","
+ puts " /* ePragTyp: */ PragTyp_$type,"
+ puts " /* ePragFlag: */ $flagx,"
+ puts " /* iArg: */ $arg \175,"
+}
+if {$current_if!=""} {
+ foreach this_if $current_if {
+ puts "#endif"
+ }
+}
+puts "\175;"
+
+# count the number of pragmas, for information purposes
+#
+set allcnt 0
+set dfltcnt 0
+foreach name $allnames {
+ incr allcnt
+ set if [lindex $allbyname($name) 2]
+ if {[regexp {^defined} $if] || [regexp {[^!]defined} $if]} continue
+ incr dfltcnt
+}
+puts "/* Number of pragmas: $dfltcnt on by default, $allcnt total. */"
diff --git a/lang/sql/sqlite/tool/mksqlite3c-noext.tcl b/lang/sql/sqlite/tool/mksqlite3c-noext.tcl
new file mode 100644
index 00000000..017ad629
--- /dev/null
+++ b/lang/sql/sqlite/tool/mksqlite3c-noext.tcl
@@ -0,0 +1,305 @@
+#!/usr/bin/tclsh
+#
+# To build a single huge source file holding all of SQLite (or at
+# least the core components - the test harness, shell, and TCL
+# interface are omitted.) first do
+#
+# make target_source
+#
+# The make target above moves all of the source code files into
+# a subdirectory named "tsrc". (This script expects to find the files
+# there and will not work if they are not found.) There are a few
+# generated C code files that are also added to the tsrc directory.
+# For example, the "parse.c" and "parse.h" files to implement the
+# the parser are derived from "parse.y" using lemon. And the
+# "keywordhash.h" files is generated by a program named "mkkeywordhash".
+#
+# After the "tsrc" directory has been created and populated, run
+# this script:
+#
+# tclsh mksqlite3c.tcl
+#
+# The amalgamated SQLite code will be written into sqlite3.c
+#
+
+# Begin by reading the "sqlite3.h" header file. Extract the version number
+# from in this file. The versioon number is needed to generate the header
+# comment of the amalgamation.
+#
+if {[lsearch $argv --nostatic]>=0} {
+ set addstatic 0
+} else {
+ set addstatic 1
+}
+if {[lsearch $argv --linemacros]>=0} {
+ set linemacros 1
+} else {
+ set linemacros 0
+}
+set in [open tsrc/sqlite3.h]
+set cnt 0
+set VERSION ?????
+while {![eof $in]} {
+ set line [gets $in]
+ if {$line=="" && [eof $in]} break
+ incr cnt
+ regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
+}
+close $in
+
+# Open the output file and write a header comment at the beginning
+# of the file.
+#
+set out [open sqlite3.c w]
+# Force the output to use unix line endings, even on Windows.
+fconfigure $out -translation lf
+set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
+puts $out [subst \
+{/******************************************************************************
+** This file is an amalgamation of many separate C source files from SQLite
+** version $VERSION. By combining all the individual C code files into this
+** single large file, the entire code can be compiled as a single translation
+** unit. This allows many compilers to do optimizations that would not be
+** possible if the files were compiled separately. Performance improvements
+** of 5% or more are commonly seen when SQLite is compiled as a single
+** translation unit.
+**
+** This file is all you need to compile SQLite. To use SQLite in other
+** programs, you need this file and the "sqlite3.h" header file that defines
+** the programming interface to the SQLite library. (If you do not have
+** the "sqlite3.h" header file at hand, you will find a copy embedded within
+** the text of this file. Search for "Begin file sqlite3.h" to find the start
+** of the embedded sqlite3.h header file.) Additional code files may be needed
+** if you want a wrapper to interface SQLite with your choice of programming
+** language. The code for the "sqlite3" command-line shell is also in a
+** separate file. This file contains only code for the core SQLite library.
+*/
+#define SQLITE_CORE 1
+#define SQLITE_AMALGAMATION 1}]
+if {$addstatic} {
+ puts $out \
+{#ifndef SQLITE_PRIVATE
+# define SQLITE_PRIVATE static
+#endif
+#ifndef SQLITE_API
+# define SQLITE_API
+#endif}
+}
+
+# These are the header files used by SQLite. The first time any of these
+# files are seen in a #include statement in the C code, include the complete
+# text of the file in-line. The file only needs to be included once.
+#
+foreach hdr {
+ btree.h
+ btreeInt.h
+ hash.h
+ hwtime.h
+ keywordhash.h
+ mutex.h
+ opcodes.h
+ os_common.h
+ os.h
+ pager.h
+ parse.h
+ pcache.h
+ sqlite3ext.h
+ sqlite3.h
+ sqliteicu.h
+ sqliteInt.h
+ sqliteLimit.h
+ vdbe.h
+ vdbeInt.h
+ wal.h
+} {
+ set available_hdr($hdr) 1
+}
+set available_hdr(sqliteInt.h) 0
+
+# 78 stars used for comment formatting.
+set s78 \
+{*****************************************************************************}
+
+# Insert a comment into the code
+#
+proc section_comment {text} {
+ global out s78
+ set n [string length $text]
+ set nstar [expr {60 - $n}]
+ set stars [string range $s78 0 $nstar]
+ puts $out "/************** $text $stars/"
+}
+
+# Read the source file named $filename and write it into the
+# sqlite3.c output file. If any #include statements are seen,
+# process them approprately.
+#
+proc copy_file {filename} {
+ global seen_hdr available_hdr out addstatic linemacros
+ set ln 0
+ set tail [file tail $filename]
+ section_comment "Begin file $tail"
+ if {$linemacros} {puts $out "#line 1 \"$filename\""}
+ set in [open $filename r]
+ set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)}
+ set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \**(sqlite3[_a-zA-Z0-9]+)\(}
+ if {[file extension $filename]==".h"} {
+ set declpattern " *$declpattern"
+ }
+ set declpattern ^$declpattern
+ while {![eof $in]} {
+ set line [gets $in]
+ incr ln
+ if {[regexp {^\s*#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
+ if {[info exists available_hdr($hdr)]} {
+ if {$available_hdr($hdr)} {
+ if {$hdr!="os_common.h" && $hdr!="hwtime.h"} {
+ set available_hdr($hdr) 0
+ }
+ section_comment "Include $hdr in the middle of $tail"
+ copy_file tsrc/$hdr
+ section_comment "Continuing where we left off in $tail"
+ if {$linemacros} {puts $out "#line [expr {$ln+1}] \"$filename\""}
+ }
+ } elseif {![info exists seen_hdr($hdr)]} {
+ set seen_hdr($hdr) 1
+ puts $out $line
+ } else {
+ puts $out "/* $line */"
+ }
+ } elseif {[regexp {^#ifdef __cplusplus} $line]} {
+ puts $out "#if 0"
+ } elseif {!$linemacros && [regexp {^#line} $line]} {
+ # Skip #line directives.
+ } elseif {$addstatic && ![regexp {^(static|typedef)} $line]} {
+ regsub {^SQLITE_API } $line {} line
+ if {[regexp $declpattern $line all funcname]} {
+ # Add the SQLITE_PRIVATE or SQLITE_API keyword before functions.
+ # so that linkage can be modified at compile-time.
+ if {[regexp {^sqlite3_} $funcname]} {
+ puts $out "SQLITE_API $line"
+ } else {
+ puts $out "SQLITE_PRIVATE $line"
+ }
+ } elseif {[regexp $varpattern $line all varname]} {
+ # Add the SQLITE_PRIVATE before variable declarations or
+ # definitions for internal use
+ if {![regexp {^sqlite3_} $varname]} {
+ regsub {^extern } $line {} line
+ puts $out "SQLITE_PRIVATE $line"
+ } else {
+ if {[regexp {const char sqlite3_version\[\];} $line]} {
+ set line {const char sqlite3_version[] = SQLITE_VERSION;}
+ }
+ regsub {^SQLITE_EXTERN } $line {} line
+ puts $out "SQLITE_API $line"
+ }
+ } elseif {[regexp {^(SQLITE_EXTERN )?void \(\*sqlite3IoTrace\)} $line]} {
+ regsub {^SQLITE_EXTERN } $line {} line
+ puts $out "SQLITE_PRIVATE $line"
+ } elseif {[regexp {^void \(\*sqlite3Os} $line]} {
+ puts $out "SQLITE_PRIVATE $line"
+ } else {
+ puts $out $line
+ }
+ } else {
+ puts $out $line
+ }
+ }
+ close $in
+ section_comment "End of $tail"
+}
+
+
+# Process the source files. Process files containing commonly
+# used subroutines first in order to help the compiler find
+# inlining opportunities.
+#
+foreach file {
+ sqliteInt.h
+
+ global.c
+ ctime.c
+ status.c
+ date.c
+ os.c
+
+ fault.c
+ mem0.c
+ mem1.c
+ mem2.c
+ mem3.c
+ mem5.c
+ mutex.c
+ mutex_noop.c
+ mutex_unix.c
+ mutex_w32.c
+ malloc.c
+ printf.c
+ random.c
+ utf.c
+ util.c
+ hash.c
+ opcodes.c
+
+ os_unix.c
+ os_win.c
+
+ bitvec.c
+ pcache.c
+ pcache1.c
+ rowset.c
+ pager.c
+ wal.c
+
+ btmutex.c
+ btree.c
+ backup.c
+
+ vdbemem.c
+ vdbeaux.c
+ vdbeapi.c
+ vdbetrace.c
+ vdbe.c
+ vdbeblob.c
+ vdbesort.c
+ journal.c
+ memjournal.c
+
+ walker.c
+ resolve.c
+ expr.c
+ alter.c
+ analyze.c
+ attach.c
+ auth.c
+ build.c
+ callback.c
+ delete.c
+ func.c
+ fkey.c
+ insert.c
+ legacy.c
+ loadext.c
+ pragma.c
+ prepare.c
+ select.c
+ table.c
+ trigger.c
+ update.c
+ vacuum.c
+ vtab.c
+ where.c
+
+ parse.c
+
+ tokenize.c
+ complete.c
+
+ main.c
+ notify.c
+} {
+ copy_file tsrc/$file
+}
+
+close $out
diff --git a/lang/sql/sqlite/tool/mksqlite3c.tcl b/lang/sql/sqlite/tool/mksqlite3c.tcl
index cf30d657..4fc881e3 100644
--- a/lang/sql/sqlite/tool/mksqlite3c.tcl
+++ b/lang/sql/sqlite/tool/mksqlite3c.tcl
@@ -23,7 +23,7 @@
#
# Begin by reading the "sqlite3.h" header file. Extract the version number
-# from in this file. The versioon number is needed to generate the header
+# from in this file. The version number is needed to generate the header
# comment of the amalgamation.
#
if {[lsearch $argv --nostatic]>=0} {
@@ -31,6 +31,11 @@ if {[lsearch $argv --nostatic]>=0} {
} else {
set addstatic 1
}
+if {[lsearch $argv --linemacros]>=0} {
+ set linemacros 1
+} else {
+ set linemacros 0
+}
set in [open tsrc/sqlite3.h]
set cnt 0
set VERSION ?????
@@ -46,6 +51,8 @@ close $in
# of the file.
#
set out [open sqlite3.c w]
+# Force the output to use unix line endings, even on Windows.
+fconfigure $out -translation lf
set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
puts $out [subst \
{/******************************************************************************
@@ -97,7 +104,6 @@ foreach hdr {
opcodes.h
os_common.h
os.h
- os_os2.h
pager.h
parse.h
pcache.h
@@ -110,10 +116,12 @@ foreach hdr {
vdbe.h
vdbeInt.h
wal.h
+ whereInt.h
} {
set available_hdr($hdr) 1
}
set available_hdr(sqliteInt.h) 0
+set available_hdr(sqlite3.h) 0
# 78 stars used for comment formatting.
set s78 \
@@ -131,12 +139,14 @@ proc section_comment {text} {
# Read the source file named $filename and write it into the
# sqlite3.c output file. If any #include statements are seen,
-# process them approprately.
+# process them appropriately.
#
proc copy_file {filename} {
- global seen_hdr available_hdr out addstatic
+ global seen_hdr available_hdr out addstatic linemacros
+ set ln 0
set tail [file tail $filename]
section_comment "Begin file $tail"
+ if {$linemacros} {puts $out "#line 1 \"$filename\""}
set in [open $filename r]
set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)}
set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \**(sqlite3[_a-zA-Z0-9]+)\(}
@@ -146,6 +156,7 @@ proc copy_file {filename} {
set declpattern ^$declpattern
while {![eof $in]} {
set line [gets $in]
+ incr ln
if {[regexp {^\s*#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
if {[info exists available_hdr($hdr)]} {
if {$available_hdr($hdr)} {
@@ -155,14 +166,23 @@ proc copy_file {filename} {
section_comment "Include $hdr in the middle of $tail"
copy_file tsrc/$hdr
section_comment "Continuing where we left off in $tail"
+ if {$linemacros} {puts $out "#line [expr {$ln+1}] \"$filename\""}
}
} elseif {![info exists seen_hdr($hdr)]} {
set seen_hdr($hdr) 1
puts $out $line
+ } elseif {[regexp {/\*\s+amalgamator:\s+keep\s+\*/} $line]} {
+ # This include file must be kept because there was a "keep"
+ # directive inside of a line comment.
+ puts $out $line
+ } else {
+ # Comment out the entire line, replacing any nested comment
+ # begin/end markers with the harmless substring "**".
+ puts $out "/* [string map [list /* ** */ **] $line] */"
}
} elseif {[regexp {^#ifdef __cplusplus} $line]} {
puts $out "#if 0"
- } elseif {[regexp {^#line} $line]} {
+ } elseif {!$linemacros && [regexp {^#line} $line]} {
# Skip #line directives.
} elseif {$addstatic && ![regexp {^(static|typedef)} $line]} {
regsub {^SQLITE_API } $line {} line
@@ -209,6 +229,7 @@ proc copy_file {filename} {
# inlining opportunities.
#
foreach file {
+ sqlite3.h
sqliteInt.h
global.c
@@ -225,7 +246,6 @@ foreach file {
mem5.c
mutex.c
mutex_noop.c
- mutex_os2.c
mutex_unix.c
mutex_w32.c
malloc.c
@@ -236,7 +256,6 @@ foreach file {
hash.c
opcodes.c
- os_os2.c
os_unix.c
os_win.c
@@ -261,6 +280,7 @@ foreach file {
vdbetrace.c
vdbe.c
vdbeblob.c
+ vdbesort.c
journal.c
memjournal.c
@@ -304,8 +324,11 @@ foreach file {
fts3_porter.c
fts3_tokenizer.c
fts3_tokenizer1.c
+ fts3_tokenize_vtab.c
fts3_write.c
fts3_snippet.c
+ fts3_unicode.c
+ fts3_unicode2.c
rtree.c
icu.c
diff --git a/lang/sql/sqlite/tool/mksqlite3h.tcl b/lang/sql/sqlite/tool/mksqlite3h.tcl
index 554069c3..a89b9f9b 100644
--- a/lang/sql/sqlite/tool/mksqlite3h.tcl
+++ b/lang/sql/sqlite/tool/mksqlite3h.tcl
@@ -65,9 +65,17 @@ close $in
set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+sqlite3_[_a-zA-Z0-9]+(\[|;| =)}
set declpattern {^ *[a-zA-Z][a-zA-Z_0-9 ]+ \**sqlite3_[_a-zA-Z0-9]+\(}
-# Process the src/sqlite.h.in ext/rtree/sqlite3rtree.h files.
+# Force the output to use unix line endings, even on Windows.
+fconfigure stdout -translation lf
+
+set filelist [subst {
+ $TOP/src/sqlite.h.in
+ $TOP/ext/rtree/sqlite3rtree.h
+}]
+
+# Process the source files.
#
-foreach file [list $TOP/src/sqlite.h.in $TOP/ext/rtree/sqlite3rtree.h] {
+foreach file $filelist {
set in [open $file]
while {![eof $in]} {
diff --git a/lang/sql/sqlite/tool/mksqlite3internalh.tcl b/lang/sql/sqlite/tool/mksqlite3internalh.tcl
index f02a62df..406ef5c4 100644
--- a/lang/sql/sqlite/tool/mksqlite3internalh.tcl
+++ b/lang/sql/sqlite/tool/mksqlite3internalh.tcl
@@ -61,7 +61,6 @@ foreach hdr {
opcodes.h
os_common.h
os.h
- os_os2.h
pager.h
parse.h
sqlite3ext.h
diff --git a/lang/sql/sqlite/tool/mkvsix.tcl b/lang/sql/sqlite/tool/mkvsix.tcl
new file mode 100644
index 00000000..65fa7312
--- /dev/null
+++ b/lang/sql/sqlite/tool/mkvsix.tcl
@@ -0,0 +1,655 @@
+#!/usr/bin/tclsh
+#
+# This script is used to generate a VSIX (Visual Studio Extension) file for
+# SQLite usable by Visual Studio.
+#
+# PREREQUISITES
+#
+# 1. Tcl 8.4 and later are supported, earlier versions have not been tested.
+#
+# 2. The "sqlite3.h" file is assumed to exist in the parent directory of the
+# directory containing this script. The [optional] second command line
+# argument to this script may be used to specify an alternate location.
+# This script also assumes that the "sqlite3.h" file corresponds with the
+# version of the binaries to be packaged. This assumption is not verified
+# by this script.
+#
+# 3. The temporary directory specified in the TEMP or TMP environment variables
+# must refer to an existing directory writable by the current user.
+#
+# 4. The "zip" and "unzip" command line tools must be located either in a
+# directory contained in the PATH environment variable or specified as the
+# exact file names to execute in the "ZipTool" and "UnZipTool" environment
+# variables, respectively.
+#
+# 5. The template VSIX file (which is basically a zip file) must be located in
+# a "win" directory inside the directory containing this script. It should
+# not contain any executable binaries. It should only contain dynamic
+# textual content files to be processed using [subst] and/or static content
+# files to be copied verbatim.
+#
+# 6. The executable and other compiled binary files to be packaged into the
+# final VSIX file (e.g. DLLs, LIBs, and PDBs) must be located in a single
+# directory tree. The top-level directory of the tree must be specified as
+# the first command line argument to this script. The second level
+# sub-directory names must match those of the build configuration (e.g.
+# "Debug" or "Retail"). The third level sub-directory names must match
+# those of the platform (e.g. "x86", "x64", and "ARM"). For example, the
+# binary files to be packaged would need to be organized as follows when
+# packaging the "Debug" and "Retail" build configurations for the "x86" and
+# "x64" platforms (in this example, "C:\temp" is the top-level directory as
+# specified in the first command line argument):
+#
+# C:\Temp\Debug\x86\sqlite3.lib
+# C:\Temp\Debug\x86\sqlite3.dll
+# C:\Temp\Debug\x86\sqlite3.pdb
+# C:\Temp\Debug\x64\sqlite3.lib
+# C:\Temp\Debug\x64\sqlite3.dll
+# C:\Temp\Debug\x64\sqlite3.pdb
+# C:\Temp\Retail\x86\sqlite3.lib
+# C:\Temp\Retail\x86\sqlite3.dll
+# C:\Temp\Retail\x86\sqlite3.pdb
+# C:\Temp\Retail\x64\sqlite3.lib
+# C:\Temp\Retail\x64\sqlite3.dll
+# C:\Temp\Retail\x64\sqlite3.pdb
+#
+# The above directory tree organization is performed automatically if the
+# "tool\build-all-msvc.bat" batch script is used to build the binary files
+# to be packaged.
+#
+# USAGE
+#
+# The first argument to this script is required and must be the name of the
+# top-level directory containing the directories and files organized into a
+# tree as described in item 6 of the PREREQUISITES section, above. The second
+# argument is optional and if present must contain the name of the directory
+# containing the root of the source tree for SQLite. The third argument is
+# optional and if present must contain the flavor the VSIX package to build.
+# Currently, the only supported package flavors are "WinRT", "WinRT81", and
+# "WP80". The fourth argument is optional and if present must be a string
+# containing a list of platforms to include in the VSIX package. The format
+# of the platform list string is "platform1,platform2,platform3". Typically,
+# when on Windows, this script is executed using commands similar to the
+# following from a normal Windows command prompt:
+#
+# CD /D C:\dev\sqlite\core
+# tclsh85 tool\mkvsix.tcl C:\Temp
+#
+# In the example above, "C:\dev\sqlite\core" represents the root of the source
+# tree for SQLite and "C:\Temp" represents the top-level directory containing
+# the executable and other compiled binary files, organized into a directory
+# tree as described in item 6 of the PREREQUISITES section, above.
+#
+# This script should work on non-Windows platforms as well, provided that all
+# the requirements listed in the PREREQUISITES section are met.
+#
+# NOTES
+#
+# The temporary directory is used as a staging area for the final VSIX file.
+# The template VSIX file is extracted, its contents processed, and then the
+# resulting files are packaged into the final VSIX file.
+#
+package require Tcl 8.4
+
+proc fail { {error ""} {usage false} } {
+ if {[string length $error] > 0} then {
+ puts stdout $error
+ if {!$usage} then {exit 1}
+ }
+
+ puts stdout "usage:\
+[file tail [info nameofexecutable]]\
+[file tail [info script]] <binaryDirectory> \[sourceDirectory\]\
+\[packageFlavor\] \[platformNames\]"
+
+ exit 1
+}
+
+proc getEnvironmentVariable { name } {
+ #
+ # NOTE: Returns the value of the specified environment variable or an empty
+ # string for environment variables that do not exist in the current
+ # process environment.
+ #
+ return [expr {[info exists ::env($name)] ? $::env($name) : ""}]
+}
+
+proc getTemporaryPath {} {
+ #
+ # NOTE: Returns the normalized path to the first temporary directory found
+ # in the typical set of environment variables used for that purpose
+ # or an empty string to signal a failure to locate such a directory.
+ #
+ set names [list]
+
+ foreach name [list TEMP TMP] {
+ lappend names [string toupper $name] [string tolower $name] \
+ [string totitle $name]
+ }
+
+ foreach name $names {
+ set value [getEnvironmentVariable $name]
+
+ if {[string length $value] > 0} then {
+ return [file normalize $value]
+ }
+ }
+
+ return ""
+}
+
+proc appendArgs { args } {
+ #
+ # NOTE: Returns all passed arguments joined together as a single string with
+ # no intervening spaces between arguments.
+ #
+ eval append result $args
+}
+
+proc readFile { fileName } {
+ #
+ # NOTE: Reads and returns the entire contents of the specified file, which
+ # may contain binary data.
+ #
+ set file_id [open $fileName RDONLY]
+ fconfigure $file_id -encoding binary -translation binary
+ set result [read $file_id]
+ close $file_id
+ return $result
+}
+
+proc writeFile { fileName data } {
+ #
+ # NOTE: Writes the entire contents of the specified file, which may contain
+ # binary data.
+ #
+ set file_id [open $fileName {WRONLY CREAT TRUNC}]
+ fconfigure $file_id -encoding binary -translation binary
+ puts -nonewline $file_id $data
+ close $file_id
+ return ""
+}
+
+proc substFile { fileName } {
+ #
+ # NOTE: Performs all Tcl command, variable, and backslash substitutions in
+ # the specified file and then rewrites the contents of that same file
+ # with the substituted data.
+ #
+ return [writeFile $fileName [uplevel 1 [list subst [readFile $fileName]]]]
+}
+
+proc replaceFileNameTokens { fileName name buildName platformName } {
+ #
+ # NOTE: Returns the specified file name containing the platform name instead
+ # of platform placeholder tokens.
+ #
+ return [string map [list <build> $buildName <platform> $platformName \
+ <name> $name] $fileName]
+}
+
+#
+# NOTE: This is the entry point for this script.
+#
+set script [file normalize [info script]]
+
+if {[string length $script] == 0} then {
+ fail "script file currently being evaluated is unknown" true
+}
+
+set path [file dirname $script]
+set rootName [file rootname [file tail $script]]
+
+###############################################################################
+
+#
+# NOTE: Process and verify all the command line arguments.
+#
+set argc [llength $argv]
+if {$argc < 1 || $argc > 4} then {fail}
+
+set binaryDirectory [lindex $argv 0]
+
+if {[string length $binaryDirectory] == 0} then {
+ fail "invalid binary directory"
+}
+
+if {![file exists $binaryDirectory] || \
+ ![file isdirectory $binaryDirectory]} then {
+ fail "binary directory does not exist"
+}
+
+if {$argc >= 2} then {
+ set sourceDirectory [lindex $argv 1]
+} else {
+ #
+ # NOTE: Assume that the source directory is the parent directory of the one
+ # that contains this script file.
+ #
+ set sourceDirectory [file dirname $path]
+}
+
+if {[string length $sourceDirectory] == 0} then {
+ fail "invalid source directory"
+}
+
+if {![file exists $sourceDirectory] || \
+ ![file isdirectory $sourceDirectory]} then {
+ fail "source directory does not exist"
+}
+
+if {$argc >= 3} then {
+ set packageFlavor [lindex $argv 2]
+} else {
+ #
+ # NOTE: Assume the package flavor is WinRT.
+ #
+ set packageFlavor WinRT
+}
+
+if {[string length $packageFlavor] == 0} then {
+ fail "invalid package flavor"
+}
+
+if {[string equal -nocase $packageFlavor WinRT]} then {
+ set shortName SQLite.WinRT
+ set displayName "SQLite for Windows Runtime"
+ set targetPlatformIdentifier Windows
+ set targetPlatformVersion v8.0
+ set minVsVersion 11.0
+ set extraSdkPath ""
+ set extraFileListAttributes [appendArgs \
+ "\r\n " {AppliesTo="WindowsAppContainer"} \
+ "\r\n " {DependsOn="Microsoft.VCLibs, version=11.0"}]
+} elseif {[string equal -nocase $packageFlavor WinRT81]} then {
+ set shortName SQLite.WinRT81
+ set displayName "SQLite for Windows Runtime (Windows 8.1)"
+ set targetPlatformIdentifier Windows
+ set targetPlatformVersion v8.1
+ set minVsVersion 12.0
+ set extraSdkPath ""
+ set extraFileListAttributes [appendArgs \
+ "\r\n " {AppliesTo="WindowsAppContainer"} \
+ "\r\n " {DependsOn="Microsoft.VCLibs, version=12.0"}]
+} elseif {[string equal -nocase $packageFlavor WP80]} then {
+ set shortName SQLite.WP80
+ set displayName "SQLite for Windows Phone"
+ set targetPlatformIdentifier "Windows Phone"
+ set targetPlatformVersion v8.0
+ set minVsVersion 11.0
+ set extraSdkPath "\\..\\$targetPlatformIdentifier"
+ set extraFileListAttributes ""
+} elseif {[string equal -nocase $packageFlavor Win32]} then {
+ set shortName SQLite.Win32
+ set displayName "SQLite for Windows"
+ set targetPlatformIdentifier Windows
+ set targetPlatformVersion v8.0
+ set minVsVersion 11.0
+ set extraSdkPath ""
+ set extraFileListAttributes [appendArgs \
+ "\r\n " {AppliesTo="VisualC"} \
+ "\r\n " {DependsOn="Microsoft.VCLibs, version=11.0"}]
+} else {
+ fail "unsupported package flavor, must be one of: WinRT WinRT81 WP80 Win32"
+}
+
+if {$argc >= 4} then {
+ set platformNames [list]
+
+ foreach platformName [split [lindex $argv 3] ", "] {
+ if {[string length $platformName] > 0} then {
+ lappend platformNames $platformName
+ }
+ }
+}
+
+###############################################################################
+
+#
+# NOTE: Evaluate the user-specific customizations file, if it exists.
+#
+set userFile [file join $path [appendArgs \
+ $rootName . $tcl_platform(user) .tcl]]
+
+if {[file exists $userFile] && \
+ [file isfile $userFile]} then {
+ source $userFile
+}
+
+###############################################################################
+
+set templateFile [file join $path win sqlite.vsix]
+
+if {![file exists $templateFile] || \
+ ![file isfile $templateFile]} then {
+ fail [appendArgs "template file \"" $templateFile "\" does not exist"]
+}
+
+set currentDirectory [pwd]
+set outputFile [file join $currentDirectory [appendArgs sqlite- \
+ $packageFlavor -output.vsix]]
+
+if {[file exists $outputFile]} then {
+ fail [appendArgs "output file \"" $outputFile "\" already exists"]
+}
+
+###############################################################################
+
+#
+# NOTE: Make sure that a valid temporary directory exists.
+#
+set temporaryDirectory [getTemporaryPath]
+
+if {[string length $temporaryDirectory] == 0 || \
+ ![file exists $temporaryDirectory] || \
+ ![file isdirectory $temporaryDirectory]} then {
+ fail "cannot locate a usable temporary directory"
+}
+
+#
+# NOTE: Setup the staging directory to have a unique name inside of the
+# configured temporary directory.
+#
+set stagingDirectory [file normalize [file join $temporaryDirectory \
+ [appendArgs $rootName . [pid]]]]
+
+###############################################################################
+
+#
+# NOTE: Configure the external zipping tool. First, see if it has already
+# been pre-configured. If not, try to query it from the environment.
+# Finally, fallback on the default of simply "zip", which will then
+# be assumed to exist somewhere along the PATH.
+#
+if {![info exists zip]} then {
+ if {[info exists env(ZipTool)]} then {
+ set zip $env(ZipTool)
+ }
+ if {![info exists zip] || ![file exists $zip]} then {
+ set zip zip
+ }
+}
+
+#
+# NOTE: Configure the external unzipping tool. First, see if it has already
+# been pre-configured. If not, try to query it from the environment.
+# Finally, fallback on the default of simply "unzip", which will then
+# be assumed to exist somewhere along the PATH.
+#
+if {![info exists unzip]} then {
+ if {[info exists env(UnZipTool)]} then {
+ set unzip $env(UnZipTool)
+ }
+ if {![info exists unzip] || ![file exists $unzip]} then {
+ set unzip unzip
+ }
+}
+
+###############################################################################
+
+#
+# NOTE: Attempt to extract the SQLite version from the "sqlite3.h" header file
+# in the source directory. This script assumes that the header file has
+# already been generated by the build process.
+#
+set pattern {^#define\s+SQLITE_VERSION\s+"(.*)"$}
+set data [readFile [file join $sourceDirectory sqlite3.h]]
+
+if {![regexp -line -- $pattern $data dummy version]} then {
+ fail [appendArgs "cannot locate SQLITE_VERSION value in \"" \
+ [file join $sourceDirectory sqlite3.h] \"]
+}
+
+###############################################################################
+
+#
+# NOTE: Setup all the master file list data. This includes the source file
+# names, the destination file names, and the file processing flags. The
+# possible file processing flags are:
+#
+# "buildNeutral" -- This flag indicates the file location and content do
+# not depend on the build configuration.
+#
+# "platformNeutral" -- This flag indicates the file location and content
+# do not depend on the build platform.
+#
+# "subst" -- This flag indicates that the file contains dynamic textual
+# content that needs to be processed using [subst] prior to
+# packaging the file into the final VSIX package. The primary
+# use of this flag is to insert the name of the VSIX package,
+# some package flavor-specific value, or the SQLite version
+# into a file.
+#
+# "noDebug" -- This flag indicates that the file should be skipped when
+# processing the debug build.
+#
+# "noRetail" -- This flag indicates that the file should be skipped when
+# processing the retail build.
+#
+# "move" -- This flag indicates that the file should be moved from the
+# source to the destination instead of being copied.
+#
+# This file metadata may be overridden, either in whole or in part, via
+# the user-specific customizations file.
+#
+if {![info exists fileNames(source)]} then {
+ set fileNames(source) [list "" "" \
+ [file join $stagingDirectory DesignTime <build> <platform> sqlite3.props] \
+ [file join $sourceDirectory sqlite3.h] \
+ [file join $binaryDirectory <build> <platform> sqlite3.lib] \
+ [file join $binaryDirectory <build> <platform> sqlite3.dll]]
+
+ if {![info exists no(symbols)]} then {
+ lappend fileNames(source) \
+ [file join $binaryDirectory <build> <platform> sqlite3.pdb]
+ }
+}
+
+if {![info exists fileNames(destination)]} then {
+ set fileNames(destination) [list \
+ [file join $stagingDirectory extension.vsixmanifest] \
+ [file join $stagingDirectory SDKManifest.xml] \
+ [file join $stagingDirectory DesignTime <build> <platform> <name>.props] \
+ [file join $stagingDirectory DesignTime <build> <platform> sqlite3.h] \
+ [file join $stagingDirectory DesignTime <build> <platform> sqlite3.lib] \
+ [file join $stagingDirectory Redist <build> <platform> sqlite3.dll]]
+
+ if {![info exists no(symbols)]} then {
+ lappend fileNames(destination) \
+ [file join $stagingDirectory Redist <build> <platform> sqlite3.pdb]
+ }
+}
+
+if {![info exists fileNames(flags)]} then {
+ set fileNames(flags) [list \
+ [list buildNeutral platformNeutral subst] \
+ [list buildNeutral platformNeutral subst] \
+ [list buildNeutral platformNeutral subst move] \
+ [list buildNeutral platformNeutral] \
+ [list] [list] [list noRetail]]
+
+ if {![info exists no(symbols)]} then {
+ lappend fileNames(flags) [list noRetail]
+ }
+}
+
+###############################################################################
+
+#
+# NOTE: Setup the list of builds supported by this script. These may be
+# overridden via the user-specific customizations file.
+#
+if {![info exists buildNames]} then {
+ set buildNames [list Debug Retail]
+}
+
+###############################################################################
+
+#
+# NOTE: Setup the list of platforms supported by this script. These may be
+# overridden via the command line or the user-specific customizations
+# file.
+#
+if {![info exists platformNames]} then {
+ set platformNames [list x86 x64 ARM]
+}
+
+###############################################################################
+
+#
+# NOTE: Make sure the staging directory exists, creating it if necessary.
+#
+file mkdir $stagingDirectory
+
+#
+# NOTE: Build the Tcl command used to extract the template VSIX package to
+# the staging directory.
+#
+set extractCommand [list exec -- $unzip $templateFile -d $stagingDirectory]
+
+#
+# NOTE: Extract the template VSIX package to the staging directory.
+#
+eval $extractCommand
+
+###############################################################################
+
+#
+# NOTE: Process each file in the master file list. There are actually three
+# parallel lists that contain the source file names, the destination file
+# names, and the file processing flags. If the "buildNeutral" flag is
+# present, the file location and content do not depend on the build
+# configuration and "CommonConfiguration" will be used in place of the
+# build configuration name. If the "platformNeutral" flag is present,
+# the file location and content do not depend on the build platform and
+# "neutral" will be used in place of the build platform name. If the
+# "subst" flag is present, the file is assumed to be a text file that may
+# contain Tcl variable, command, and backslash replacements, to be
+# dynamically replaced during processing using the Tcl [subst] command.
+# If the "noDebug" flag is present, the file will be skipped when
+# processing for the debug build. If the "noRetail" flag is present, the
+# file will be skipped when processing for the retail build. If the
+# "move" flag is present, the source file will be deleted after it is
+# copied to the destination file. If the source file name is an empty
+# string, the destination file name will be assumed to already exist in
+# the staging directory and will not be copied; however, Tcl variable,
+# command, and backslash replacements may still be performed on the
+# destination file prior to the final VSIX package being built if the
+# "subst" flag is present.
+#
+foreach sourceFileName $fileNames(source) \
+ destinationFileName $fileNames(destination) \
+ fileFlags $fileNames(flags) {
+ #
+ # NOTE: Process the file flags into separate boolean variables that may be
+ # used within the loop.
+ #
+ set isBuildNeutral [expr {[lsearch $fileFlags buildNeutral] != -1}]
+ set isPlatformNeutral [expr {[lsearch $fileFlags platformNeutral] != -1}]
+ set isMove [expr {[lsearch $fileFlags move] != -1}]
+ set useSubst [expr {[lsearch $fileFlags subst] != -1}]
+
+ #
+ # NOTE: If the current file is build-neutral, then only one build will
+ # be processed for it, namely "CommonConfiguration"; otherwise, each
+ # supported build will be processed for it individually.
+ #
+ foreach buildName \
+ [expr {$isBuildNeutral ? [list CommonConfiguration] : $buildNames}] {
+ #
+ # NOTE: Should the current file be skipped for this build?
+ #
+ if {[lsearch $fileFlags no${buildName}] != -1} then {
+ continue
+ }
+
+ #
+ # NOTE: If the current file is platform-neutral, then only one platform
+ # will be processed for it, namely "neutral"; otherwise, each
+ # supported platform will be processed for it individually.
+ #
+ foreach platformName \
+ [expr {$isPlatformNeutral ? [list neutral] : $platformNames}] {
+ #
+ # NOTE: Use the actual platform name in the destination file name.
+ #
+ set newDestinationFileName [replaceFileNameTokens $destinationFileName \
+ $shortName $buildName $platformName]
+
+ #
+ # NOTE: Does the source file need to be copied to the destination file?
+ #
+ if {[string length $sourceFileName] > 0} then {
+ #
+ # NOTE: First, make sure the destination directory exists.
+ #
+ file mkdir [file dirname $newDestinationFileName]
+
+ #
+ # NOTE: Then, copy the source file to the destination file verbatim.
+ #
+ set newSourceFileName [replaceFileNameTokens $sourceFileName \
+ $shortName $buildName $platformName]
+
+ file copy $newSourceFileName $newDestinationFileName
+
+ #
+ # NOTE: If this is a move instead of a copy, delete the source file
+ # now.
+ #
+ if {$isMove} then {
+ file delete $newSourceFileName
+ }
+ }
+
+ #
+ # NOTE: Does the destination file contain dynamic replacements that must
+ # be processed now?
+ #
+ if {$useSubst} then {
+ #
+ # NOTE: Perform any dynamic replacements contained in the destination
+ # file and then re-write it in-place.
+ #
+ substFile $newDestinationFileName
+ }
+ }
+ }
+}
+
+###############################################################################
+
+#
+# NOTE: Change the current directory to the staging directory so that the
+# external archive building tool can pickup the necessary files using
+# relative paths.
+#
+cd $stagingDirectory
+
+#
+# NOTE: Build the Tcl command used to archive the final VSIX package in the
+# output directory.
+#
+set archiveCommand [list exec -- $zip -r $outputFile *]
+
+#
+# NOTE: Build the final VSIX package archive in the output directory.
+#
+eval $archiveCommand
+
+#
+# NOTE: Change back to the previously saved current directory.
+#
+cd $currentDirectory
+
+#
+# NOTE: Cleanup the temporary staging directory.
+#
+file delete -force $stagingDirectory
+
+###############################################################################
+
+#
+# NOTE: Success, emit the fully qualified path of the generated VSIX file.
+#
+puts stdout $outputFile
diff --git a/lang/sql/sqlite/tool/offsets.c b/lang/sql/sqlite/tool/offsets.c
new file mode 100644
index 00000000..8e098e71
--- /dev/null
+++ b/lang/sql/sqlite/tool/offsets.c
@@ -0,0 +1,329 @@
+/*
+** This program searches an SQLite database file for the lengths and
+** offsets for all TEXT or BLOB entries for a particular column of a
+** particular table. The rowid, size and offset for the column are
+** written to standard output. There are three arguments, which are the
+** name of the database file, the table, and the column.
+*/
+#include "sqlite3.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+typedef unsigned char u8;
+typedef struct GState GState;
+
+#define ArraySize(X) (sizeof(X)/sizeof(X[0]))
+
+/*
+** Global state information for this program.
+*/
+struct GState {
+ char *zErr; /* Error message text */
+ FILE *f; /* Open database file */
+ int szPg; /* Page size for the database file */
+ int iRoot; /* Root page of the table */
+ int iCol; /* Column number for the column */
+ int pgno; /* Current page number */
+ u8 *aPage; /* Current page content */
+ u8 *aStack[20]; /* Page stack */
+ int aPgno[20]; /* Page number stack */
+ int nStack; /* Depth of stack */
+ int bTrace; /* True for tracing output */
+};
+
+/*
+** Write an error.
+*/
+static void ofstError(GState *p, const char *zFormat, ...){
+ va_list ap;
+ sqlite3_free(p->zErr);
+ va_start(ap, zFormat);
+ p->zErr = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+}
+
+/*
+** Write a trace message
+*/
+static void ofstTrace(GState *p, const char *zFormat, ...){
+ va_list ap;
+ if( p->bTrace ){
+ va_start(ap, zFormat);
+ vprintf(zFormat, ap);
+ va_end(ap);
+ }
+}
+
+/*
+** Find the root page of the table and the column number of the column.
+*/
+static void ofstRootAndColumn(
+ GState *p, /* Global state */
+ const char *zFile, /* Name of the database file */
+ const char *zTable, /* Name of the table */
+ const char *zColumn /* Name of the column */
+){
+ sqlite3 *db = 0;
+ sqlite3_stmt *pStmt = 0;
+ char *zSql = 0;
+ int rc;
+ if( p->zErr ) return;
+ rc = sqlite3_open(zFile, &db);
+ if( rc ){
+ ofstError(p, "cannot open database file \"%s\"", zFile);
+ goto rootAndColumn_exit;
+ }
+ zSql = sqlite3_mprintf("SELECT rootpage FROM sqlite_master WHERE name=%Q",
+ zTable);
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc ) ofstError(p, "%s: [%s]", sqlite3_errmsg(db), zSql);
+ sqlite3_free(zSql);
+ if( p->zErr ) goto rootAndColumn_exit;
+ if( sqlite3_step(pStmt)!=SQLITE_ROW ){
+ ofstError(p, "cannot find table [%s]\n", zTable);
+ sqlite3_finalize(pStmt);
+ goto rootAndColumn_exit;
+ }
+ p->iRoot = sqlite3_column_int(pStmt , 0);
+ sqlite3_finalize(pStmt);
+
+ p->iCol = -1;
+ zSql = sqlite3_mprintf("PRAGMA table_info(%Q)", zTable);
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc ) ofstError(p, "%s: [%s}", sqlite3_errmsg(db), zSql);
+ sqlite3_free(zSql);
+ if( p->zErr ) goto rootAndColumn_exit;
+ while( sqlite3_step(pStmt)==SQLITE_ROW ){
+ const char *zCol = sqlite3_column_text(pStmt, 1);
+ if( strlen(zCol)==strlen(zColumn)
+ && sqlite3_strnicmp(zCol, zColumn, strlen(zCol))==0
+ ){
+ p->iCol = sqlite3_column_int(pStmt, 0);
+ break;
+ }
+ }
+ sqlite3_finalize(pStmt);
+ if( p->iCol<0 ){
+ ofstError(p, "no such column: %s.%s", zTable, zColumn);
+ goto rootAndColumn_exit;
+ }
+
+ zSql = sqlite3_mprintf("PRAGMA page_size");
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc ) ofstError(p, "%s: [%s]", sqlite3_errmsg(db), zSql);
+ sqlite3_free(zSql);
+ if( p->zErr ) goto rootAndColumn_exit;
+ if( sqlite3_step(pStmt)!=SQLITE_ROW ){
+ ofstError(p, "cannot find page size");
+ }else{
+ p->szPg = sqlite3_column_int(pStmt, 0);
+ }
+ sqlite3_finalize(pStmt);
+
+rootAndColumn_exit:
+ sqlite3_close(db);
+ return;
+}
+
+/*
+** Pop a page from the stack
+*/
+static void ofstPopPage(GState *p){
+ if( p->nStack<=0 ) return;
+ p->nStack--;
+ sqlite3_free(p->aStack[p->nStack]);
+ p->pgno = p->aPgno[p->nStack-1];
+ p->aPage = p->aStack[p->nStack-1];
+}
+
+
+/*
+** Push a new page onto the stack.
+*/
+static void ofstPushPage(GState *p, int pgno){
+ u8 *pPage;
+ size_t got;
+ if( p->zErr ) return;
+ if( p->nStack >= ArraySize(p->aStack) ){
+ ofstError(p, "page stack overflow");
+ return;
+ }
+ p->aPgno[p->nStack] = pgno;
+ p->aStack[p->nStack] = pPage = sqlite3_malloc( p->szPg );
+ if( pPage==0 ){
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ p->nStack++;
+ p->aPage = pPage;
+ p->pgno = pgno;
+ fseek(p->f, (pgno-1)*p->szPg, SEEK_SET);
+ got = fread(pPage, 1, p->szPg, p->f);
+ if( got!=p->szPg ){
+ ofstError(p, "unable to read page %d", pgno);
+ ofstPopPage(p);
+ }
+}
+
+/* Read a two-byte integer at the given offset into the current page */
+static int ofst2byte(GState *p, int ofst){
+ int x = p->aPage[ofst];
+ return (x<<8) + p->aPage[ofst+1];
+}
+
+/* Read a four-byte integer at the given offset into the current page */
+static int ofst4byte(GState *p, int ofst){
+ int x = p->aPage[ofst];
+ x = (x<<8) + p->aPage[ofst+1];
+ x = (x<<8) + p->aPage[ofst+2];
+ x = (x<<8) + p->aPage[ofst+3];
+ return x;
+}
+
+/* Read a variable-length integer. Update the offset */
+static sqlite3_int64 ofstVarint(GState *p, int *pOfst){
+ sqlite3_int64 x = 0;
+ u8 *a = &p->aPage[*pOfst];
+ int n = 0;
+ while( n<8 && (a[0] & 0x80)!=0 ){
+ x = (x<<7) + (a[0] & 0x7f);
+ n++;
+ a++;
+ }
+ if( n==8 ){
+ x = (x<<8) + a[0];
+ }else{
+ x = (x<<7) + a[0];
+ }
+ *pOfst += (n+1);
+ return x;
+}
+
+/* Return the absolute offset into a file for the given offset
+** into the current page */
+static int ofstInFile(GState *p, int ofst){
+ return p->szPg*(p->pgno-1) + ofst;
+}
+
+/* Return the size (in bytes) of the data corresponding to the
+** given serial code */
+static int ofstSerialSize(int scode){
+ if( scode<5 ) return scode;
+ if( scode==5 ) return 6;
+ if( scode<8 ) return 8;
+ if( scode<12 ) return 0;
+ return (scode-12)/2;
+}
+
+/* Forward reference */
+static void ofstWalkPage(GState*, int);
+
+/* Walk an interior btree page */
+static void ofstWalkInteriorPage(GState *p){
+ int nCell;
+ int i;
+ int ofst;
+ int iChild;
+
+ nCell = ofst2byte(p, 3);
+ for(i=0; i<nCell; i++){
+ ofst = ofst2byte(p, 12+i*2);
+ iChild = ofst4byte(p, ofst);
+ ofstWalkPage(p, iChild);
+ if( p->zErr ) return;
+ }
+ ofstWalkPage(p, ofst4byte(p, 8));
+}
+
+/* Walk a leaf btree page */
+static void ofstWalkLeafPage(GState *p){
+ int nCell;
+ int i;
+ int ofst;
+ int nPayload;
+ sqlite3_int64 rowid;
+ int nHdr;
+ int j;
+ int scode;
+ int sz;
+ int dataOfst;
+ char zMsg[200];
+
+ nCell = ofst2byte(p, 3);
+ for(i=0; i<nCell; i++){
+ ofst = ofst2byte(p, 8+i*2);
+ nPayload = ofstVarint(p, &ofst);
+ rowid = ofstVarint(p, &ofst);
+ if( nPayload > p->szPg-35 ){
+ sqlite3_snprintf(sizeof(zMsg), zMsg,
+ "# overflow rowid %lld", rowid);
+ printf("%s\n", zMsg);
+ continue;
+ }
+ dataOfst = ofst;
+ nHdr = ofstVarint(p, &ofst);
+ dataOfst += nHdr;
+ for(j=0; j<p->iCol; j++){
+ scode = ofstVarint(p, &ofst);
+ dataOfst += ofstSerialSize(scode);
+ }
+ scode = ofstVarint(p, &ofst);
+ sz = ofstSerialSize(scode);
+ sqlite3_snprintf(sizeof(zMsg), zMsg,
+ "rowid %12lld size %5d offset %8d",
+ rowid, sz, ofstInFile(p, dataOfst));
+ printf("%s\n", zMsg);
+ }
+}
+
+/*
+** Output results from a single page.
+*/
+static void ofstWalkPage(GState *p, int pgno){
+ if( p->zErr ) return;
+ ofstPushPage(p, pgno);
+ if( p->zErr ) return;
+ if( p->aPage[0]==5 ){
+ ofstWalkInteriorPage(p);
+ }else if( p->aPage[0]==13 ){
+ ofstWalkLeafPage(p);
+ }else{
+ ofstError(p, "page %d has a faulty type byte: %d", pgno, p->aPage[0]);
+ }
+ ofstPopPage(p);
+}
+
+int main(int argc, char **argv){
+ GState g;
+ memset(&g, 0, sizeof(g));
+ if( argc>2 && strcmp(argv[1],"--trace")==0 ){
+ g.bTrace = 1;
+ argc--;
+ argv++;
+ }
+ if( argc!=4 ){
+ fprintf(stderr, "Usage: %s DATABASE TABLE COLUMN\n", *argv);
+ exit(1);
+ }
+ ofstRootAndColumn(&g, argv[1], argv[2], argv[3]);
+ if( g.zErr ){
+ fprintf(stderr, "%s\n", g.zErr);
+ exit(1);
+ }
+ ofstTrace(&g, "# szPg = %d\n", g.szPg);
+ ofstTrace(&g, "# iRoot = %d\n", g.iRoot);
+ ofstTrace(&g, "# iCol = %d\n", g.iCol);
+ g.f = fopen(argv[1], "rb");
+ if( g.f==0 ){
+ fprintf(stderr, "cannot open \"%s\"\n", argv[1]);
+ exit(1);
+ }
+ ofstWalkPage(&g, g.iRoot);
+ if( g.zErr ){
+ fprintf(stderr, "%s\n", g.zErr);
+ exit(1);
+ }
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/omittest.tcl b/lang/sql/sqlite/tool/omittest.tcl
index f1963ff1..5437f2eb 100644
--- a/lang/sql/sqlite/tool/omittest.tcl
+++ b/lang/sql/sqlite/tool/omittest.tcl
@@ -31,8 +31,8 @@ should work. The following properties are required:
More precisely, the following two invocations must be supported:
- make -f $::MAKEFILE testfixture OPTS="-DSQLITE_OMIT_ALTERTABLE=1"
- make -f $::MAKEFILE test
+ $::MAKEBIN -f $::MAKEFILE testfixture OPTS="-DSQLITE_OMIT_ALTERTABLE=1"
+ $::MAKEBIN -f $::MAKEFILE test
Makefiles generated by the sqlite configure program cannot be used as
they do not respect the OPTS variable.
@@ -48,19 +48,16 @@ they do not respect the OPTS variable.
#
#
proc run_quick_test {dir omit_symbol_list} {
- set target "testfixture"
# Compile the value of the OPTS Makefile variable.
- set opts "-DSQLITE_MEMDEBUG -DSQLITE_DEBUG -DSQLITE_NO_SYNC"
+ set opts ""
if {$::tcl_platform(platform)=="windows"} {
- append opts " -DSQLITE_OS_WIN=1"
+ append opts "OPTS += -DSQLITE_OS_WIN=1\n"
set target "testfixture.exe"
- } elseif {$::tcl_platform(platform)=="os2"} {
- append opts " -DSQLITE_OS_OS2=1"
} else {
- append opts " -DSQLITE_OS_UNIX=1"
+ append opts "OPTS += -DSQLITE_OS_UNIX=1\n"
}
foreach sym $omit_symbol_list {
- append opts " -D${sym}=1"
+ append opts "OPTS += -D${sym}=1\n"
}
# Create the directory and do the build. If an error occurs return
@@ -68,12 +65,20 @@ proc run_quick_test {dir omit_symbol_list} {
file mkdir $dir
puts -nonewline "Building $dir..."
flush stdout
-catch {
- file copy -force ./config.h $dir
- file copy -force ./libtool $dir
-}
+ catch {
+ file copy -force ./config.h $dir
+ file copy -force ./libtool $dir
+ }
+ set fd [open $::MAKEFILE]
+ set mkfile [read $fd]
+ close $fd
+ regsub {\ninclude} $mkfile "\n$opts\ninclude" mkfile
+ set fd [open $dir/makefile w]
+ puts $fd $mkfile
+ close $fd
+
set rc [catch {
- exec make -C $dir -f $::MAKEFILE $target OPTS=$opts >& $dir/build.log
+ exec $::MAKEBIN -C $dir -f makefile clean $::TARGET >& $dir/build.log
}]
if {$rc} {
puts "No good. See $dir/build.log."
@@ -86,7 +91,7 @@ catch {
# of trying to build the sqlite shell. The sqlite shell won't build
# with some of the OMIT options (i.e OMIT_COMPLETE).
set sqlite3_dummy $dir/sqlite3
- if {$::tcl_platform(platform)=="windows" || $::tcl_platform(platform)=="os2"} {
+ if {$::tcl_platform(platform)=="windows"} {
append sqlite3_dummy ".exe"
}
if {![file exists $sqlite3_dummy]} {
@@ -102,7 +107,7 @@ catch {
puts -nonewline "Testing $dir..."
flush stdout
set rc [catch {
- exec make -C $dir -f $::MAKEFILE test OPTS=$opts >& $dir/test.log
+ exec $::MAKEBIN -C $dir -f makefile test >& $dir/test.log
}]
if {$rc} {
puts "No good. See $dir/test.log."
@@ -119,12 +124,14 @@ catch {
# option.
#
proc process_options {argv} {
- if {$::tcl_platform(platform)=="windows" || $::tcl_platform(platform)=="os2"} {
- set ::MAKEFILE ./Makefile ;# Default value
+ set ::MAKEBIN make ;# Default value
+ if {$::tcl_platform(platform)=="windows"} {
+ set ::MAKEFILE ./Makefile ;# Default value on Windows
} else {
set ::MAKEFILE ./Makefile.linux-gcc ;# Default value
}
set ::SKIP_RUN 0 ;# Default to attempt test
+ set ::TARGET testfixture ;# Default thing to build
for {set i 0} {$i < [llength $argv]} {incr i} {
switch -- [lindex $argv $i] {
@@ -133,6 +140,16 @@ proc process_options {argv} {
set ::MAKEFILE [lindex $argv $i]
}
+ -nmake {
+ set ::MAKEBIN nmake
+ set ::MAKEFILE ./Makefile.msc
+ }
+
+ -target {
+ incr i
+ set ::TARGET [lindex $argv $i]
+ }
+
-skip_run {
set ::SKIP_RUN 1
}
@@ -173,10 +190,10 @@ proc main {argv} {
SQLITE_OMIT_COMPILEOPTION_DIAGS \
SQLITE_OMIT_COMPLETE \
SQLITE_OMIT_COMPOUND_SELECT \
+ SQLITE_OMIT_CTE \
SQLITE_OMIT_DATETIME_FUNCS \
SQLITE_OMIT_DECLTYPE \
SQLITE_OMIT_DEPRECATED \
- xxxSQLITE_OMIT_DISKIO \
SQLITE_OMIT_EXPLAIN \
SQLITE_OMIT_FLAG_PRAGMAS \
SQLITE_OMIT_FLOATING_POINT \
@@ -218,15 +235,11 @@ proc main {argv} {
SQLITE_DISABLE_DIRSYNC \
SQLITE_DISABLE_LFS \
SQLITE_ENABLE_ATOMIC_WRITE \
- xxxSQLITE_ENABLE_CEROD \
SQLITE_ENABLE_COLUMN_METADATA \
SQLITE_ENABLE_EXPENSIVE_ASSERT \
- xxxSQLITE_ENABLE_FTS1 \
- xxxSQLITE_ENABLE_FTS2 \
SQLITE_ENABLE_FTS3 \
SQLITE_ENABLE_FTS3_PARENTHESIS \
SQLITE_ENABLE_FTS4 \
- xxxSQLITE_ENABLE_ICU \
SQLITE_ENABLE_IOTRACE \
SQLITE_ENABLE_LOAD_EXTENSION \
SQLITE_ENABLE_LOCKING_STYLE \
@@ -235,7 +248,7 @@ proc main {argv} {
SQLITE_ENABLE_MEMSYS5 \
SQLITE_ENABLE_OVERSIZE_CELL_CHECK \
SQLITE_ENABLE_RTREE \
- SQLITE_ENABLE_STAT2 \
+ SQLITE_ENABLE_STAT3 \
SQLITE_ENABLE_UNLOCK_NOTIFY \
SQLITE_ENABLE_UPDATE_DELETE_LIMIT \
]
@@ -251,7 +264,7 @@ proc main {argv} {
exit -1
}
- set dirname "test_[string range $sym 7 end]"
+ set dirname "test_[regsub -nocase {^x*SQLITE_} $sym {}]"
run_quick_test $dirname $sym
} else {
# First try a test with all OMIT symbols except SQLITE_OMIT_FLOATING_POINT
@@ -270,14 +283,14 @@ proc main {argv} {
# are the OMIT_FLOATING_POINT and OMIT_PRAGMA symbols, even though we
# know they will fail. It's good to be reminded of this from time to time.
foreach sym $::OMIT_SYMBOLS {
- set dirname "test_[string range $sym 7 end]"
+ set dirname "test_[regsub -nocase {^x*SQLITE_} $sym {}]"
run_quick_test $dirname $sym
}
# Try the ENABLE/DISABLE symbols one at a time.
# We don't do them all at once since some are conflicting.
foreach sym $::ENABLE_SYMBOLS {
- set dirname "test_[string range $sym 7 end]"
+ set dirname "test_[regsub -nocase {^x*SQLITE_} $sym {}]"
run_quick_test $dirname $sym
}
}
diff --git a/lang/sql/sqlite/tool/pagesig.c b/lang/sql/sqlite/tool/pagesig.c
new file mode 100644
index 00000000..540c9d72
--- /dev/null
+++ b/lang/sql/sqlite/tool/pagesig.c
@@ -0,0 +1,92 @@
+/*
+** 2013-10-01
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Compute hash signatures for every page of a database file. This utility
+** program is useful for analyzing the output logs generated by the
+** ext/misc/vfslog.c extension.
+*/
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+
+/*
+** Compute signature for a block of content.
+**
+** For blocks of 16 or fewer bytes, the signature is just a hex dump of
+** the entire block.
+**
+** For blocks of more than 16 bytes, the signature is a hex dump of the
+** first 8 bytes followed by a 64-bit has of the entire block.
+*/
+static void vlogSignature(unsigned char *p, int n, char *zCksum){
+ unsigned int s0 = 0, s1 = 0;
+ unsigned int *pI;
+ int i;
+ if( n<=16 ){
+ for(i=0; i<n; i++) sprintf(zCksum+i*2, "%02x", p[i]);
+ }else{
+ pI = (unsigned int*)p;
+ for(i=0; i<n-7; i+=8){
+ s0 += pI[0] + s1;
+ s1 += pI[1] + s0;
+ pI += 2;
+ }
+ for(i=0; i<8; i++) sprintf(zCksum+i*2, "%02x", p[i]);
+ sprintf(zCksum+i*2, "-%08x%08x", s0, s1);
+ }
+}
+
+/*
+** Open a file. Find its page size. Read each page, and compute and
+** display the page signature.
+*/
+static void computeSigs(const char *zFilename){
+ FILE *in = fopen(zFilename, "rb");
+ unsigned pgsz;
+ size_t got;
+ unsigned n;
+ unsigned char aBuf[50];
+ unsigned char aPage[65536];
+
+ if( in==0 ){
+ fprintf(stderr, "cannot open \"%s\"\n", zFilename);
+ return;
+ }
+ got = fread(aBuf, 1, sizeof(aBuf), in);
+ if( got!=sizeof(aBuf) ){
+ goto endComputeSigs;
+ }
+ pgsz = aBuf[16]*256 + aBuf[17];
+ if( pgsz==1 ) pgsz = 65536;
+ if( (pgsz & (pgsz-1))!=0 ){
+ fprintf(stderr, "invalid page size: %02x%02x\n", aBuf[16], aBuf[17]);
+ goto endComputeSigs;
+ }
+ rewind(in);
+ for(n=1; (got=fread(aPage, 1, pgsz, in))==pgsz; n++){
+ vlogSignature(aPage, pgsz, aBuf);
+ printf("%4d: %s\n", n, aBuf);
+ }
+
+endComputeSigs:
+ fclose(in);
+}
+
+/*
+** Find page signatures for all named files.
+*/
+int main(int argc, char **argv){
+ int i;
+ for(i=1; i<argc; i++) computeSigs(argv[i]);
+ return 0;
+}
diff --git a/lang/sql/sqlite/tool/shell1.test b/lang/sql/sqlite/tool/shell1.test
deleted file mode 100644
index a344ee62..00000000
--- a/lang/sql/sqlite/tool/shell1.test
+++ /dev/null
@@ -1,714 +0,0 @@
-# 2009 Nov 11
-#
-# The author disclaims copyright to this source code. In place of
-# a legal notice, here is a blessing:
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-#
-# The focus of this file is testing the CLI shell tool.
-#
-# $Id: shell1.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
-#
-
-# Test plan:
-#
-# shell1-1.*: Basic command line option handling.
-# shell1-2.*: Basic "dot" command token parsing.
-# shell1-3.*: Basic test that "dot" command can be called.
-#
-
-package require sqlite3
-
-set CLI "./sqlite3"
-
-proc do_test {name cmd expected} {
- puts -nonewline "$name ..."
- set res [uplevel $cmd]
- if {$res eq $expected} {
- puts Ok
- } else {
- puts Error
- puts " Got: $res"
- puts " Expected: $expected"
- exit
- }
-}
-
-proc execsql {sql} {
- uplevel [list db eval $sql]
-}
-
-proc catchsql {sql} {
- set rc [catch {uplevel [list db eval $sql]} msg]
- list $rc $msg
-}
-
-proc catchcmd {db {cmd ""}} {
- global CLI
- set out [open cmds.txt w]
- puts $out $cmd
- close $out
- set line "exec $CLI $db < cmds.txt"
- set rc [catch { eval $line } msg]
- list $rc $msg
-}
-
-file delete -force test.db test.db.journal
-sqlite3 db test.db
-
-#----------------------------------------------------------------------------
-# Test cases shell1-1.*: Basic command line option handling.
-#
-
-# invalid option
-do_test shell1-1.1.1 {
- set res [catchcmd "-bad test.db" ""]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Error: unknown option: -bad} $res]
-} {1 1}
-# error on extra options
-do_test shell1-1.1.2 {
- set res [catchcmd "-bad test.db \"select 3\" \"select 4\"" ""]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Error: too many options: "select 4"} $res]
-} {1 1}
-# error on extra options
-do_test shell1-1.1.3 {
- set res [catchcmd "-bad FOO test.db BAD" ".quit"]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Error: too many options: "BAD"} $res]
-} {1 1}
-
-# -help
-do_test shell1-1.2.1 {
- set res [catchcmd "-help test.db" ""]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Usage} $res] \
- [regexp {\-init} $res] \
- [regexp {\-version} $res]
-} {1 1 1 1}
-
-# -init filename read/process named file
-do_test shell1-1.3.1 {
- catchcmd "-init FOO test.db" ""
-} {0 {}}
-do_test shell1-1.3.2 {
- set res [catchcmd "-init FOO test.db .quit BAD" ""]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Error: too many options: "BAD"} $res]
-} {1 1}
-
-# -echo print commands before execution
-do_test shell1-1.4.1 {
- catchcmd "-echo test.db" ""
-} {0 {}}
-
-# -[no]header turn headers on or off
-do_test shell1-1.5.1 {
- catchcmd "-header test.db" ""
-} {0 {}}
-do_test shell1-1.5.2 {
- catchcmd "-noheader test.db" ""
-} {0 {}}
-
-# -bail stop after hitting an error
-do_test shell1-1.6.1 {
- catchcmd "-bail test.db" ""
-} {0 {}}
-
-# -interactive force interactive I/O
-do_test shell1-1.7.1 {
- set res [catchcmd "-interactive test.db" ".quit"]
- set rc [lindex $res 0]
- list $rc \
- [regexp {SQLite version} $res] \
- [regexp {Enter SQL statements} $res]
-} {0 1 1}
-
-# -batch force batch I/O
-do_test shell1-1.8.1 {
- catchcmd "-batch test.db" ""
-} {0 {}}
-
-# -column set output mode to 'column'
-do_test shell1-1.9.1 {
- catchcmd "-column test.db" ""
-} {0 {}}
-
-# -csv set output mode to 'csv'
-do_test shell1-1.10.1 {
- catchcmd "-csv test.db" ""
-} {0 {}}
-
-# -html set output mode to HTML
-do_test shell1-1.11.1 {
- catchcmd "-html test.db" ""
-} {0 {}}
-
-# -line set output mode to 'line'
-do_test shell1-1.12.1 {
- catchcmd "-line test.db" ""
-} {0 {}}
-
-# -list set output mode to 'list'
-do_test shell1-1.13.1 {
- catchcmd "-list test.db" ""
-} {0 {}}
-
-# -separator 'x' set output field separator (|)
-do_test shell1-1.14.1 {
- catchcmd "-separator 'x' test.db" ""
-} {0 {}}
-do_test shell1-1.14.2 {
- catchcmd "-separator x test.db" ""
-} {0 {}}
-do_test shell1-1.14.3 {
- set res [catchcmd "-separator" ""]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Error: missing argument for option: -separator} $res]
-} {1 1}
-
-# -stats print memory stats before each finalize
-do_test shell1-1.14b.1 {
- catchcmd "-stats test.db" ""
-} {0 {}}
-
-# -nullvalue 'text' set text string for NULL values
-do_test shell1-1.15.1 {
- catchcmd "-nullvalue 'x' test.db" ""
-} {0 {}}
-do_test shell1-1.15.2 {
- catchcmd "-nullvalue x test.db" ""
-} {0 {}}
-do_test shell1-1.15.3 {
- set res [catchcmd "-nullvalue" ""]
- set rc [lindex $res 0]
- list $rc \
- [regexp {Error: missing argument for option: -nullvalue} $res]
-} {1 1}
-
-# -version show SQLite version
-do_test shell1-1.16.1 {
- catchcmd "-version test.db" ""
-} {0 3.7.6.2}
-
-#----------------------------------------------------------------------------
-# Test cases shell1-2.*: Basic "dot" command token parsing.
-#
-
-# check first token handling
-do_test shell1-2.1.1 {
- catchcmd "test.db" ".foo"
-} {1 {Error: unknown command or invalid arguments: "foo". Enter ".help" for help}}
-do_test shell1-2.1.2 {
- catchcmd "test.db" ".\"foo OFF\""
-} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
-do_test shell1-2.1.3 {
- catchcmd "test.db" ".\'foo OFF\'"
-} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
-
-# unbalanced quotes
-do_test shell1-2.2.1 {
- catchcmd "test.db" ".\"foo OFF"
-} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
-do_test shell1-2.2.2 {
- catchcmd "test.db" ".\'foo OFF"
-} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
-do_test shell1-2.2.3 {
- catchcmd "test.db" ".explain \"OFF"
-} {0 {}}
-do_test shell1-2.2.4 {
- catchcmd "test.db" ".explain \'OFF"
-} {0 {}}
-do_test shell1-2.2.5 {
- catchcmd "test.db" ".mode \"insert FOO"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-do_test shell1-2.2.6 {
- catchcmd "test.db" ".mode \'insert FOO"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-
-# check multiple tokens, and quoted tokens
-do_test shell1-2.3.1 {
- catchcmd "test.db" ".explain 1"
-} {0 {}}
-do_test shell1-2.3.2 {
- catchcmd "test.db" ".explain on"
-} {0 {}}
-do_test shell1-2.3.3 {
- catchcmd "test.db" ".explain \"1 2 3\""
-} {0 {}}
-do_test shell1-2.3.4 {
- catchcmd "test.db" ".explain \"OFF\""
-} {0 {}}
-do_test shell1-2.3.5 {
- catchcmd "test.db" ".\'explain\' \'OFF\'"
-} {0 {}}
-do_test shell1-2.3.6 {
- catchcmd "test.db" ".explain \'OFF\'"
-} {0 {}}
-do_test shell1-2.3.7 {
- catchcmd "test.db" ".\'explain\' \'OFF\'"
-} {0 {}}
-
-# check quoted args are unquoted
-do_test shell1-2.4.1 {
- catchcmd "test.db" ".mode FOO"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-do_test shell1-2.4.2 {
- catchcmd "test.db" ".mode csv"
-} {0 {}}
-do_test shell1-2.4.2 {
- catchcmd "test.db" ".mode \"csv\""
-} {0 {}}
-
-
-#----------------------------------------------------------------------------
-# Test cases shell1-3.*: Basic test that "dot" command can be called.
-#
-
-# .backup ?DB? FILE Backup DB (default "main") to FILE
-do_test shell1-3.1.1 {
- catchcmd "test.db" ".backup"
-} {1 {Error: unknown command or invalid arguments: "backup". Enter ".help" for help}}
-do_test shell1-3.1.2 {
- catchcmd "test.db" ".backup FOO"
-} {0 {}}
-do_test shell1-3.1.3 {
- catchcmd "test.db" ".backup FOO BAR"
-} {1 {Error: unknown database FOO}}
-do_test shell1-3.1.4 {
- # too many arguments
- catchcmd "test.db" ".backup FOO BAR BAD"
-} {1 {Error: unknown command or invalid arguments: "backup". Enter ".help" for help}}
-
-# .bail ON|OFF Stop after hitting an error. Default OFF
-do_test shell1-3.2.1 {
- catchcmd "test.db" ".bail"
-} {1 {Error: unknown command or invalid arguments: "bail". Enter ".help" for help}}
-do_test shell1-3.2.2 {
- catchcmd "test.db" ".bail ON"
-} {0 {}}
-do_test shell1-3.2.3 {
- catchcmd "test.db" ".bail OFF"
-} {0 {}}
-do_test shell1-3.2.4 {
- # too many arguments
- catchcmd "test.db" ".bail OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "bail". Enter ".help" for help}}
-
-# .databases List names and files of attached databases
-do_test shell1-3.3.1 {
- set res [catchcmd "test.db" ".databases"]
- regexp {0.*main.*test\.db} $res
-} {1}
-do_test shell1-3.3.2 {
- # too many arguments
- catchcmd "test.db" ".databases BAD"
-} {1 {Error: unknown command or invalid arguments: "databases". Enter ".help" for help}}
-
-# .dump ?TABLE? ... Dump the database in an SQL text format
-# If TABLE specified, only dump tables matching
-# LIKE pattern TABLE.
-do_test shell1-3.4.1 {
- set res [catchcmd "test.db" ".dump"]
- list [regexp {BEGIN TRANSACTION;} $res] \
- [regexp {COMMIT;} $res]
-} {1 1}
-do_test shell1-3.4.2 {
- set res [catchcmd "test.db" ".dump FOO"]
- list [regexp {BEGIN TRANSACTION;} $res] \
- [regexp {COMMIT;} $res]
-} {1 1}
-do_test shell1-3.4.3 {
- # too many arguments
- catchcmd "test.db" ".dump FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "dump". Enter ".help" for help}}
-
-# .echo ON|OFF Turn command echo on or off
-do_test shell1-3.5.1 {
- catchcmd "test.db" ".echo"
-} {1 {Error: unknown command or invalid arguments: "echo". Enter ".help" for help}}
-do_test shell1-3.5.2 {
- catchcmd "test.db" ".echo ON"
-} {0 {}}
-do_test shell1-3.5.3 {
- catchcmd "test.db" ".echo OFF"
-} {0 {}}
-do_test shell1-3.5.4 {
- # too many arguments
- catchcmd "test.db" ".echo OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "echo". Enter ".help" for help}}
-
-# .exit Exit this program
-do_test shell1-3.6.1 {
- catchcmd "test.db" ".exit"
-} {0 {}}
-do_test shell1-3.6.2 {
- # too many arguments
- catchcmd "test.db" ".exit BAD"
-} {1 {Error: unknown command or invalid arguments: "exit". Enter ".help" for help}}
-
-# .explain ON|OFF Turn output mode suitable for EXPLAIN on or off.
-do_test shell1-3.7.1 {
- catchcmd "test.db" ".explain"
- # explain is the exception to the booleans. without an option, it turns it on.
-} {0 {}}
-do_test shell1-3.7.2 {
- catchcmd "test.db" ".explain ON"
-} {0 {}}
-do_test shell1-3.7.3 {
- catchcmd "test.db" ".explain OFF"
-} {0 {}}
-do_test shell1-3.7.4 {
- # too many arguments
- catchcmd "test.db" ".explain OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "explain". Enter ".help" for help}}
-
-
-# .header(s) ON|OFF Turn display of headers on or off
-do_test shell1-3.9.1 {
- catchcmd "test.db" ".header"
-} {1 {Error: unknown command or invalid arguments: "header". Enter ".help" for help}}
-do_test shell1-3.9.2 {
- catchcmd "test.db" ".header ON"
-} {0 {}}
-do_test shell1-3.9.3 {
- catchcmd "test.db" ".header OFF"
-} {0 {}}
-do_test shell1-3.9.4 {
- # too many arguments
- catchcmd "test.db" ".header OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "header". Enter ".help" for help}}
-
-do_test shell1-3.9.5 {
- catchcmd "test.db" ".headers"
-} {1 {Error: unknown command or invalid arguments: "headers". Enter ".help" for help}}
-do_test shell1-3.9.6 {
- catchcmd "test.db" ".headers ON"
-} {0 {}}
-do_test shell1-3.9.7 {
- catchcmd "test.db" ".headers OFF"
-} {0 {}}
-do_test shell1-3.9.8 {
- # too many arguments
- catchcmd "test.db" ".headers OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "headers". Enter ".help" for help}}
-
-# .help Show this message
-do_test shell1-3.10.1 {
- set res [catchcmd "test.db" ".help"]
- # look for a few of the possible help commands
- list [regexp {.help} $res] \
- [regexp {.quit} $res] \
- [regexp {.show} $res]
-} {1 1 1}
-do_test shell1-3.10.2 {
- # we allow .help to take extra args (it is help after all)
- set res [catchcmd "test.db" ".help BAD"]
- # look for a few of the possible help commands
- list [regexp {.help} $res] \
- [regexp {.quit} $res] \
- [regexp {.show} $res]
-} {1 1 1}
-
-# .import FILE TABLE Import data from FILE into TABLE
-do_test shell1-3.11.1 {
- catchcmd "test.db" ".import"
-} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
-do_test shell1-3.11.2 {
- catchcmd "test.db" ".import FOO"
-} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
-do_test shell1-3.11.2 {
- catchcmd "test.db" ".import FOO BAR"
-} {1 {Error: no such table: BAR}}
-do_test shell1-3.11.3 {
- # too many arguments
- catchcmd "test.db" ".import FOO BAR BAD"
-} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
-
-# .indices ?TABLE? Show names of all indices
-# If TABLE specified, only show indices for tables
-# matching LIKE pattern TABLE.
-do_test shell1-3.12.1 {
- catchcmd "test.db" ".indices"
-} {0 {}}
-do_test shell1-3.12.2 {
- catchcmd "test.db" ".indices FOO"
-} {0 {}}
-do_test shell1-3.12.3 {
- # too many arguments
- catchcmd "test.db" ".indices FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "indices". Enter ".help" for help}}
-
-# .mode MODE ?TABLE? Set output mode where MODE is one of:
-# csv Comma-separated values
-# column Left-aligned columns. (See .width)
-# html HTML <table> code
-# insert SQL insert statements for TABLE
-# line One value per line
-# list Values delimited by .separator string
-# tabs Tab-separated values
-# tcl TCL list elements
-do_test shell1-3.13.1 {
- catchcmd "test.db" ".mode"
-} {1 {Error: unknown command or invalid arguments: "mode". Enter ".help" for help}}
-do_test shell1-3.13.2 {
- catchcmd "test.db" ".mode FOO"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-do_test shell1-3.13.3 {
- catchcmd "test.db" ".mode csv"
-} {0 {}}
-do_test shell1-3.13.4 {
- catchcmd "test.db" ".mode column"
-} {0 {}}
-do_test shell1-3.13.5 {
- catchcmd "test.db" ".mode html"
-} {0 {}}
-do_test shell1-3.13.6 {
- catchcmd "test.db" ".mode insert"
-} {0 {}}
-do_test shell1-3.13.7 {
- catchcmd "test.db" ".mode line"
-} {0 {}}
-do_test shell1-3.13.8 {
- catchcmd "test.db" ".mode list"
-} {0 {}}
-do_test shell1-3.13.9 {
- catchcmd "test.db" ".mode tabs"
-} {0 {}}
-do_test shell1-3.13.10 {
- catchcmd "test.db" ".mode tcl"
-} {0 {}}
-do_test shell1-3.13.11 {
- # too many arguments
- catchcmd "test.db" ".mode tcl BAD"
-} {1 {Error: invalid arguments: "BAD". Enter ".help" for help}}
-
-# don't allow partial mode type matches
-do_test shell1-3.13.12 {
- catchcmd "test.db" ".mode l"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-do_test shell1-3.13.13 {
- catchcmd "test.db" ".mode li"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-do_test shell1-3.13.14 {
- catchcmd "test.db" ".mode lin"
-} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
-
-# .nullvalue STRING Print STRING in place of NULL values
-do_test shell1-3.14.1 {
- catchcmd "test.db" ".nullvalue"
-} {1 {Error: unknown command or invalid arguments: "nullvalue". Enter ".help" for help}}
-do_test shell1-3.14.2 {
- catchcmd "test.db" ".nullvalue FOO"
-} {0 {}}
-do_test shell1-3.14.3 {
- # too many arguments
- catchcmd "test.db" ".nullvalue FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "nullvalue". Enter ".help" for help}}
-
-# .output FILENAME Send output to FILENAME
-do_test shell1-3.15.1 {
- catchcmd "test.db" ".output"
-} {1 {Error: unknown command or invalid arguments: "output". Enter ".help" for help}}
-do_test shell1-3.15.2 {
- catchcmd "test.db" ".output FOO"
-} {0 {}}
-do_test shell1-3.15.3 {
- # too many arguments
- catchcmd "test.db" ".output FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "output". Enter ".help" for help}}
-
-# .output stdout Send output to the screen
-do_test shell1-3.16.1 {
- catchcmd "test.db" ".output stdout"
-} {0 {}}
-do_test shell1-3.16.2 {
- # too many arguments
- catchcmd "test.db" ".output stdout BAD"
-} {1 {Error: unknown command or invalid arguments: "output". Enter ".help" for help}}
-
-# .prompt MAIN CONTINUE Replace the standard prompts
-do_test shell1-3.17.1 {
- catchcmd "test.db" ".prompt"
-} {1 {Error: unknown command or invalid arguments: "prompt". Enter ".help" for help}}
-do_test shell1-3.17.2 {
- catchcmd "test.db" ".prompt FOO"
-} {0 {}}
-do_test shell1-3.17.3 {
- catchcmd "test.db" ".prompt FOO BAR"
-} {0 {}}
-do_test shell1-3.17.4 {
- # too many arguments
- catchcmd "test.db" ".prompt FOO BAR BAD"
-} {1 {Error: unknown command or invalid arguments: "prompt". Enter ".help" for help}}
-
-# .quit Exit this program
-do_test shell1-3.18.1 {
- catchcmd "test.db" ".quit"
-} {0 {}}
-do_test shell1-3.18.2 {
- # too many arguments
- catchcmd "test.db" ".quit BAD"
-} {1 {Error: unknown command or invalid arguments: "quit". Enter ".help" for help}}
-
-# .read FILENAME Execute SQL in FILENAME
-do_test shell1-3.19.1 {
- catchcmd "test.db" ".read"
-} {1 {Error: unknown command or invalid arguments: "read". Enter ".help" for help}}
-do_test shell1-3.19.2 {
- file delete -force FOO
- catchcmd "test.db" ".read FOO"
-} {1 {Error: cannot open "FOO"}}
-do_test shell1-3.19.3 {
- # too many arguments
- catchcmd "test.db" ".read FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "read". Enter ".help" for help}}
-
-# .restore ?DB? FILE Restore content of DB (default "main") from FILE
-do_test shell1-3.20.1 {
- catchcmd "test.db" ".restore"
-} {1 {Error: unknown command or invalid arguments: "restore". Enter ".help" for help}}
-do_test shell1-3.20.2 {
- catchcmd "test.db" ".restore FOO"
-} {0 {}}
-do_test shell1-3.20.3 {
- catchcmd "test.db" ".restore FOO BAR"
-} {1 {Error: unknown database FOO}}
-do_test shell1-3.20.4 {
- # too many arguments
- catchcmd "test.db" ".restore FOO BAR BAD"
-} {1 {Error: unknown command or invalid arguments: "restore". Enter ".help" for help}}
-
-# .schema ?TABLE? Show the CREATE statements
-# If TABLE specified, only show tables matching
-# LIKE pattern TABLE.
-do_test shell1-3.21.1 {
- catchcmd "test.db" ".schema"
-} {0 {}}
-do_test shell1-3.21.2 {
- catchcmd "test.db" ".schema FOO"
-} {0 {}}
-do_test shell1-3.21.3 {
- # too many arguments
- catchcmd "test.db" ".schema FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "schema". Enter ".help" for help}}
-
-# .separator STRING Change separator used by output mode and .import
-do_test shell1-3.22.1 {
- catchcmd "test.db" ".separator"
-} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
-do_test shell1-3.22.2 {
- catchcmd "test.db" ".separator FOO"
-} {0 {}}
-do_test shell1-3.22.3 {
- # too many arguments
- catchcmd "test.db" ".separator FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
-
-# .show Show the current values for various settings
-do_test shell1-3.23.1 {
- set res [catchcmd "test.db" ".show"]
- list [regexp {echo:} $res] \
- [regexp {explain:} $res] \
- [regexp {headers:} $res] \
- [regexp {mode:} $res] \
- [regexp {nullvalue:} $res] \
- [regexp {output:} $res] \
- [regexp {separator:} $res] \
- [regexp {stats:} $res] \
- [regexp {width:} $res]
-} {1 1 1 1 1 1 1 1 1}
-do_test shell1-3.23.2 {
- # too many arguments
- catchcmd "test.db" ".show BAD"
-} {1 {Error: unknown command or invalid arguments: "show". Enter ".help" for help}}
-
-# .stats ON|OFF Turn stats on or off
-do_test shell1-3.23b.1 {
- catchcmd "test.db" ".stats"
-} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
-do_test shell1-3.23b.2 {
- catchcmd "test.db" ".stats ON"
-} {0 {}}
-do_test shell1-3.23b.3 {
- catchcmd "test.db" ".stats OFF"
-} {0 {}}
-do_test shell1-3.23b.4 {
- # too many arguments
- catchcmd "test.db" ".stats OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
-
-# .tables ?TABLE? List names of tables
-# If TABLE specified, only list tables matching
-# LIKE pattern TABLE.
-do_test shell1-3.24.1 {
- catchcmd "test.db" ".tables"
-} {0 {}}
-do_test shell1-3.24.2 {
- catchcmd "test.db" ".tables FOO"
-} {0 {}}
-do_test shell1-3.24.3 {
- # too many arguments
- catchcmd "test.db" ".tables FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "tables". Enter ".help" for help}}
-
-# .timeout MS Try opening locked tables for MS milliseconds
-do_test shell1-3.25.1 {
- catchcmd "test.db" ".timeout"
-} {1 {Error: unknown command or invalid arguments: "timeout". Enter ".help" for help}}
-do_test shell1-3.25.2 {
- catchcmd "test.db" ".timeout zzz"
- # this should be treated the same as a '0' timeout
-} {0 {}}
-do_test shell1-3.25.3 {
- catchcmd "test.db" ".timeout 1"
-} {0 {}}
-do_test shell1-3.25.4 {
- # too many arguments
- catchcmd "test.db" ".timeout 1 BAD"
-} {1 {Error: unknown command or invalid arguments: "timeout". Enter ".help" for help}}
-
-# .width NUM NUM ... Set column widths for "column" mode
-do_test shell1-3.26.1 {
- catchcmd "test.db" ".width"
-} {1 {Error: unknown command or invalid arguments: "width". Enter ".help" for help}}
-do_test shell1-3.26.2 {
- catchcmd "test.db" ".width xxx"
- # this should be treated the same as a '0' width for col 1
-} {0 {}}
-do_test shell1-3.26.3 {
- catchcmd "test.db" ".width xxx yyy"
- # this should be treated the same as a '0' width for col 1 and 2
-} {0 {}}
-do_test shell1-3.26.4 {
- catchcmd "test.db" ".width 1 1"
- # this should be treated the same as a '1' width for col 1 and 2
-} {0 {}}
-
-# .timer ON|OFF Turn the CPU timer measurement on or off
-do_test shell1-3.27.1 {
- catchcmd "test.db" ".timer"
-} {1 {Error: unknown command or invalid arguments: "timer". Enter ".help" for help}}
-do_test shell1-3.27.2 {
- catchcmd "test.db" ".timer ON"
-} {0 {}}
-do_test shell1-3.27.3 {
- catchcmd "test.db" ".timer OFF"
-} {0 {}}
-do_test shell1-3.27.4 {
- # too many arguments
- catchcmd "test.db" ".timer OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "timer". Enter ".help" for help}}
-
-puts "CLI tests completed successfully"
diff --git a/lang/sql/sqlite/tool/shell2.test b/lang/sql/sqlite/tool/shell2.test
deleted file mode 100644
index b63fafc3..00000000
--- a/lang/sql/sqlite/tool/shell2.test
+++ /dev/null
@@ -1,222 +0,0 @@
-# 2009 Nov 11
-#
-# The author disclaims copyright to this source code. In place of
-# a legal notice, here is a blessing:
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-#
-# The focus of this file is testing the CLI shell tool.
-#
-# $Id: shell2.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
-#
-
-# Test plan:
-#
-# shell2-1.*: Misc. test of various tickets and reported errors.
-#
-
-package require sqlite3
-
-set CLI "./sqlite3"
-
-proc do_test {name cmd expected} {
- puts -nonewline "$name ..."
- set res [uplevel $cmd]
- if {$res eq $expected} {
- puts Ok
- } else {
- puts Error
- puts " Got: $res"
- puts " Expected: $expected"
- exit
- }
-}
-
-proc execsql {sql} {
- uplevel [list db eval $sql]
-}
-
-proc catchsql {sql} {
- set rc [catch {uplevel [list db eval $sql]} msg]
- list $rc $msg
-}
-
-proc catchcmd {db {cmd ""}} {
- global CLI
- set out [open cmds.txt w]
- puts $out $cmd
- close $out
- set line "exec $CLI $db < cmds.txt"
- set rc [catch { eval $line } msg]
- list $rc $msg
-}
-
-file delete -force test.db test.db.journal
-sqlite3 db test.db
-
-
-#----------------------------------------------------------------------------
-# shell2-1.*: Misc. test of various tickets and reported errors.
-#
-
-# Batch mode not creating databases.
-# Reported on mailing list by Ken Zalewski.
-# Ticket [aeff892c57].
-do_test shell2-1.1.1 {
- file delete -force foo.db
- set rc [ catchcmd "-batch foo.db" "CREATE TABLE t1(a);" ]
- set fexist [file exist foo.db]
- list $rc $fexist
-} {{0 {}} 1}
-
-# Shell silently ignores extra parameters.
-# Ticket [f5cb008a65].
-do_test shell2-1.2.1 {
- set rc [catch { eval exec $CLI \":memory:\" \"select 3\" \"select 4\" } msg]
- list $rc \
- [regexp {Error: too many options: "select 4"} $msg]
-} {1 1}
-
-# Test a problem reported on the mailing list. The shell was at one point
-# returning the generic SQLITE_ERROR message ("SQL error or missing database")
-# instead of the "too many levels..." message in the test below.
-#
-do_test shell2-1.3 {
- catchcmd "-batch test.db" {
- PRAGMA recursive_triggers = ON;
- CREATE TABLE t5(a PRIMARY KEY, b, c);
- INSERT INTO t5 VALUES(1, 2, 3);
- CREATE TRIGGER au_tble AFTER UPDATE ON t5 BEGIN
- UPDATE OR IGNORE t5 SET a = new.a, c = 10;
- END;
-
- UPDATE OR REPLACE t5 SET a = 4 WHERE a = 1;
- }
-} {1 {Error: near line 9: too many levels of trigger recursion}}
-
-
-
-# Shell not echoing all commands with echo on.
-# Ticket [eb620916be].
-
-# Test with echo off
-# NB. whitespace is important
-do_test shell2-1.4.1 {
- file delete -force foo.db
- catchcmd "foo.db" {CREATE TABLE foo(a);
-INSERT INTO foo(a) VALUES(1);
-SELECT * FROM foo;}
-} {0 1}
-
-# Test with echo on using command line option
-# NB. whitespace is important
-do_test shell2-1.4.2 {
- file delete -force foo.db
- catchcmd "-echo foo.db" {CREATE TABLE foo(a);
-INSERT INTO foo(a) VALUES(1);
-SELECT * FROM foo;}
-} {0 {CREATE TABLE foo(a);
-INSERT INTO foo(a) VALUES(1);
-SELECT * FROM foo;
-1}}
-
-# Test with echo on using dot command
-# NB. whitespace is important
-do_test shell2-1.4.3 {
- file delete -force foo.db
- catchcmd "foo.db" {.echo ON
-CREATE TABLE foo(a);
-INSERT INTO foo(a) VALUES(1);
-SELECT * FROM foo;}
-} {0 {CREATE TABLE foo(a);
-INSERT INTO foo(a) VALUES(1);
-SELECT * FROM foo;
-1}}
-
-# Test with echo on using dot command and
-# turning off mid- processing.
-# NB. whitespace is important
-do_test shell2-1.4.4 {
- file delete -force foo.db
- catchcmd "foo.db" {.echo ON
-CREATE TABLE foo(a);
-.echo OFF
-INSERT INTO foo(a) VALUES(1);
-SELECT * FROM foo;}
-} {0 {CREATE TABLE foo(a);
-.echo OFF
-1}}
-
-# Test with echo on using dot command and
-# multiple commands per line.
-# NB. whitespace is important
-do_test shell2-1.4.5 {
- file delete -force foo.db
- catchcmd "foo.db" {.echo ON
-CREATE TABLE foo1(a);
-INSERT INTO foo1(a) VALUES(1);
-CREATE TABLE foo2(b);
-INSERT INTO foo2(b) VALUES(1);
-SELECT * FROM foo1; SELECT * FROM foo2;
-INSERT INTO foo1(a) VALUES(2); INSERT INTO foo2(b) VALUES(2);
-SELECT * FROM foo1; SELECT * FROM foo2;
-}
-} {0 {CREATE TABLE foo1(a);
-INSERT INTO foo1(a) VALUES(1);
-CREATE TABLE foo2(b);
-INSERT INTO foo2(b) VALUES(1);
-SELECT * FROM foo1;
-1
-SELECT * FROM foo2;
-1
-INSERT INTO foo1(a) VALUES(2);
-INSERT INTO foo2(b) VALUES(2);
-SELECT * FROM foo1;
-1
-2
-SELECT * FROM foo2;
-1
-2}}
-
-# Test with echo on and headers on using dot command and
-# multiple commands per line.
-# NB. whitespace is important
-do_test shell2-1.4.6 {
- file delete -force foo.db
- catchcmd "foo.db" {.echo ON
-.headers ON
-CREATE TABLE foo1(a);
-INSERT INTO foo1(a) VALUES(1);
-CREATE TABLE foo2(b);
-INSERT INTO foo2(b) VALUES(1);
-SELECT * FROM foo1; SELECT * FROM foo2;
-INSERT INTO foo1(a) VALUES(2); INSERT INTO foo2(b) VALUES(2);
-SELECT * FROM foo1; SELECT * FROM foo2;
-}
-} {0 {.headers ON
-CREATE TABLE foo1(a);
-INSERT INTO foo1(a) VALUES(1);
-CREATE TABLE foo2(b);
-INSERT INTO foo2(b) VALUES(1);
-SELECT * FROM foo1;
-a
-1
-SELECT * FROM foo2;
-b
-1
-INSERT INTO foo1(a) VALUES(2);
-INSERT INTO foo2(b) VALUES(2);
-SELECT * FROM foo1;
-a
-1
-2
-SELECT * FROM foo2;
-b
-1
-2}}
-
-puts "CLI tests completed successfully"
diff --git a/lang/sql/sqlite/tool/shell3.test b/lang/sql/sqlite/tool/shell3.test
deleted file mode 100644
index d37adff2..00000000
--- a/lang/sql/sqlite/tool/shell3.test
+++ /dev/null
@@ -1,124 +0,0 @@
-# 2009 Dec 16
-#
-# The author disclaims copyright to this source code. In place of
-# a legal notice, here is a blessing:
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-#
-# The focus of this file is testing the CLI shell tool.
-#
-# $Id: shell2.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
-#
-
-# Test plan:
-#
-# shell3-1.*: Basic tests for running SQL statments from command line.
-# shell3-2.*: Basic tests for running SQL file from command line.
-#
-
-package require sqlite3
-
-set CLI "./sqlite3"
-
-proc do_test {name cmd expected} {
- puts -nonewline "$name ..."
- set res [uplevel $cmd]
- if {$res eq $expected} {
- puts Ok
- } else {
- puts Error
- puts " Got: $res"
- puts " Expected: $expected"
- exit
- }
-}
-
-proc execsql {sql} {
- uplevel [list db eval $sql]
-}
-
-proc catchsql {sql} {
- set rc [catch {uplevel [list db eval $sql]} msg]
- list $rc $msg
-}
-
-proc catchcmd {db {cmd ""}} {
- global CLI
- set out [open cmds.txt w]
- puts $out $cmd
- close $out
- set line "exec $CLI $db < cmds.txt"
- set rc [catch { eval $line } msg]
- list $rc $msg
-}
-
-file delete -force test.db test.db.journal
-sqlite3 db test.db
-
-
-#----------------------------------------------------------------------------
-# shell3-1.*: Basic tests for running SQL statments from command line.
-#
-
-# Run SQL statement from command line
-do_test shell3-1.1 {
- file delete -force foo.db
- set rc [ catchcmd "foo.db \"CREATE TABLE t1(a);\"" ]
- set fexist [file exist foo.db]
- list $rc $fexist
-} {{0 {}} 1}
-do_test shell3-1.2 {
- catchcmd "foo.db" ".tables"
-} {0 t1}
-do_test shell3-1.3 {
- catchcmd "foo.db \"DROP TABLE t1;\""
-} {0 {}}
-do_test shell3-1.4 {
- catchcmd "foo.db" ".tables"
-} {0 {}}
-do_test shell3-1.5 {
- catchcmd "foo.db \"CREATE TABLE t1(a); DROP TABLE t1;\""
-} {0 {}}
-do_test shell3-1.6 {
- catchcmd "foo.db" ".tables"
-} {0 {}}
-do_test shell3-1.7 {
- catchcmd "foo.db \"CREATE TABLE\""
-} {1 {Error: near "TABLE": syntax error}}
-
-#----------------------------------------------------------------------------
-# shell3-2.*: Basic tests for running SQL file from command line.
-#
-
-# Run SQL file from command line
-do_test shell3-2.1 {
- file delete -force foo.db
- set rc [ catchcmd "foo.db" "CREATE TABLE t1(a);" ]
- set fexist [file exist foo.db]
- list $rc $fexist
-} {{0 {}} 1}
-do_test shell3-2.2 {
- catchcmd "foo.db" ".tables"
-} {0 t1}
-do_test shell3-2.3 {
- catchcmd "foo.db" "DROP TABLE t1;"
-} {0 {}}
-do_test shell3-2.4 {
- catchcmd "foo.db" ".tables"
-} {0 {}}
-do_test shell3-2.5 {
- catchcmd "foo.db" "CREATE TABLE t1(a); DROP TABLE t1;"
-} {0 {}}
-do_test shell3-2.6 {
- catchcmd "foo.db" ".tables"
-} {0 {}}
-do_test shell3-2.7 {
- catchcmd "foo.db" "CREATE TABLE"
-} {1 {Error: incomplete SQL: CREATE TABLE}}
-
-
-puts "CLI tests completed successfully"
diff --git a/lang/sql/sqlite/tool/shell4.test b/lang/sql/sqlite/tool/shell4.test
deleted file mode 100644
index 085c279b..00000000
--- a/lang/sql/sqlite/tool/shell4.test
+++ /dev/null
@@ -1,129 +0,0 @@
-# 2010 July 28
-#
-# The author disclaims copyright to this source code. In place of
-# a legal notice, here is a blessing:
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-#
-# The focus of this file is testing the CLI shell tool.
-# These tests are specific to the .stats command.
-#
-# $Id: shell4.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
-#
-
-# Test plan:
-#
-# shell4-1.*: Basic tests specific to the "stats" command.
-#
-
-set CLI "./sqlite3"
-
-proc do_test {name cmd expected} {
- puts -nonewline "$name ..."
- set res [uplevel $cmd]
- if {$res eq $expected} {
- puts Ok
- } else {
- puts Error
- puts " Got: $res"
- puts " Expected: $expected"
- exit
- }
-}
-
-proc catchcmd {db {cmd ""}} {
- global CLI
- set out [open cmds.txt w]
- puts $out $cmd
- close $out
- set line "exec $CLI $db < cmds.txt"
- set rc [catch { eval $line } msg]
- list $rc $msg
-}
-
-file delete -force test.db test.db.journal
-
-#----------------------------------------------------------------------------
-# Test cases shell4-1.*: Tests specific to the "stats" command.
-#
-
-# should default to off
-do_test shell4-1.1.1 {
- set res [catchcmd "test.db" ".show"]
- list [regexp {stats: off} $res]
-} {1}
-
-do_test shell4-1.1.2 {
- set res [catchcmd "test.db" ".show"]
- list [regexp {stats: on} $res]
-} {0}
-
-# -stats should turn it on
-do_test shell4-1.2.1 {
- set res [catchcmd "-stats test.db" ".show"]
- list [regexp {stats: on} $res]
-} {1}
-
-do_test shell4-1.2.2 {
- set res [catchcmd "-stats test.db" ".show"]
- list [regexp {stats: off} $res]
-} {0}
-
-# .stats ON|OFF Turn stats on or off
-do_test shell4-1.3.1 {
- catchcmd "test.db" ".stats"
-} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
-do_test shell4-1.3.2 {
- catchcmd "test.db" ".stats ON"
-} {0 {}}
-do_test shell4-1.3.3 {
- catchcmd "test.db" ".stats OFF"
-} {0 {}}
-do_test shell4-1.3.4 {
- # too many arguments
- catchcmd "test.db" ".stats OFF BAD"
-} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
-
-# NB. whitespace is important
-do_test shell4-1.4.1 {
- set res [catchcmd "test.db" {.show}]
- list [regexp {stats: off} $res]
-} {1}
-
-do_test shell4-1.4.2 {
- set res [catchcmd "test.db" {.stats ON
-.show
-}]
- list [regexp {stats: on} $res]
-} {1}
-
-do_test shell4-1.4.3 {
- set res [catchcmd "test.db" {.stats OFF
-.show
-}]
- list [regexp {stats: off} $res]
-} {1}
-
-# make sure stats not present when off
-do_test shell4-1.5.1 {
- set res [catchcmd "test.db" {SELECT 1;}]
- list [regexp {Memory Used} $res] \
- [regexp {Heap Usage} $res] \
- [regexp {Autoindex Inserts} $res]
-} {0 0 0}
-
-# make sure stats are present when on
-do_test shell4-1.5.2 {
- set res [catchcmd "test.db" {.stats ON
-SELECT 1;
-}]
- list [regexp {Memory Used} $res] \
- [regexp {Heap Usage} $res] \
- [regexp {Autoindex Inserts} $res]
-} {1 1 1}
-
-puts "CLI tests completed successfully"
diff --git a/lang/sql/sqlite/tool/shell5.test b/lang/sql/sqlite/tool/shell5.test
deleted file mode 100644
index a82f9797..00000000
--- a/lang/sql/sqlite/tool/shell5.test
+++ /dev/null
@@ -1,243 +0,0 @@
-# 2010 August 4
-#
-# The author disclaims copyright to this source code. In place of
-# a legal notice, here is a blessing:
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-#
-# The focus of this file is testing the CLI shell tool.
-# These tests are specific to the .import command.
-#
-# $Id: shell5.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
-#
-
-# Test plan:
-#
-# shell5-1.*: Basic tests specific to the ".import" command.
-#
-
-set CLI "./sqlite3"
-
-proc do_test {name cmd expected} {
- puts -nonewline "$name ..."
- set res [uplevel $cmd]
- if {$res eq $expected} {
- puts Ok
- } else {
- puts Error
- puts " Got: $res"
- puts " Expected: $expected"
- exit
- }
-}
-
-proc catchcmd {db {cmd ""}} {
- global CLI
- set out [open cmds.txt w]
- puts $out $cmd
- close $out
- set line "exec $CLI $db < cmds.txt"
- set rc [catch { eval $line } msg]
- list $rc $msg
-}
-
-file delete -force test.db test.db.journal
-
-#----------------------------------------------------------------------------
-# Test cases shell5-1.*: Basic handling of the .import and .separator commands.
-#
-
-# .import FILE TABLE Import data from FILE into TABLE
-do_test shell5-1.1.1 {
- catchcmd "test.db" ".import"
-} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
-do_test shell5-1.1.2 {
- catchcmd "test.db" ".import FOO"
-} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
-do_test shell5-1.1.2 {
- catchcmd "test.db" ".import FOO BAR"
-} {1 {Error: no such table: BAR}}
-do_test shell5-1.1.3 {
- # too many arguments
- catchcmd "test.db" ".import FOO BAR BAD"
-} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
-
-# .separator STRING Change separator used by output mode and .import
-do_test shell1-1.2.1 {
- catchcmd "test.db" ".separator"
-} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
-do_test shell1-1.2.2 {
- catchcmd "test.db" ".separator FOO"
-} {0 {}}
-do_test shell1-1.2.3 {
- # too many arguments
- catchcmd "test.db" ".separator FOO BAD"
-} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
-
-# separator should default to "|"
-do_test shell5-1.3.1 {
- set res [catchcmd "test.db" ".show"]
- list [regexp {separator: \"\|\"} $res]
-} {1}
-
-# set separator to different value.
-# check that .show reports new value
-do_test shell5-1.3.2 {
- set res [catchcmd "test.db" {.separator ,
-.show}]
- list [regexp {separator: \",\"} $res]
-} {1}
-
-# import file doesn't exist
-do_test shell5-1.4.1 {
- file delete -force FOO
- set res [catchcmd "test.db" {CREATE TABLE t1(a, b);
-.import FOO t1}]
-} {1 {Error: cannot open "FOO"}}
-
-# empty import file
-do_test shell5-1.4.2 {
- file delete -force shell5.csv
- set in [open shell5.csv w]
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 0}
-
-# import file with 1 row, 1 column (expecting 2 cols)
-do_test shell5-1.4.3 {
- set in [open shell5.csv w]
- puts $in "1"
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1}]
-} {1 {Error: shell5.csv line 1: expected 2 columns of data but found 1}}
-
-# import file with 1 row, 3 columns (expecting 2 cols)
-do_test shell5-1.4.4 {
- set in [open shell5.csv w]
- puts $in "1|2|3"
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1}]
-} {1 {Error: shell5.csv line 1: expected 2 columns of data but found 3}}
-
-# import file with 1 row, 2 columns
-do_test shell5-1.4.5 {
- set in [open shell5.csv w]
- puts $in "1|2"
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 1}
-
-# import file with 2 rows, 2 columns
-# note we end up with 3 rows because of the 1 row
-# imported above.
-do_test shell5-1.4.6 {
- set in [open shell5.csv w]
- puts $in "2|3"
- puts $in "3|4"
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 3}
-
-# import file with 1 row, 2 columns, using a comma
-do_test shell5-1.4.7 {
- set in [open shell5.csv w]
- puts $in "4,5"
- close $in
- set res [catchcmd "test.db" {.separator ,
-.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 4}
-
-# import file with 1 row, 2 columns, text data
-do_test shell5-1.4.8.1 {
- set in [open shell5.csv w]
- puts $in "5|Now is the time for all good men to come to the aid of their country."
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 5}
-
-do_test shell5-1.4.8.2 {
- catchcmd "test.db" {SELECT b FROM t1 WHERE a='5';}
-} {0 {Now is the time for all good men to come to the aid of their country.}}
-
-# import file with 1 row, 2 columns, quoted text data
-# note that currently sqlite doesn't support quoted fields, and
-# imports the entire field, quotes and all.
-do_test shell5-1.4.9.1 {
- set in [open shell5.csv w]
- puts $in "6|'Now is the time for all good men to come to the aid of their country.'"
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 6}
-
-do_test shell5-1.4.9.2 {
- catchcmd "test.db" {SELECT b FROM t1 WHERE a='6';}
-} {0 {'Now is the time for all good men to come to the aid of their country.'}}
-
-# import file with 1 row, 2 columns, quoted text data
-do_test shell5-1.4.10.1 {
- set in [open shell5.csv w]
- puts $in "7|\"Now is the time for all good men to come to the aid of their country.\""
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT COUNT(*) FROM t1;}]
-} {0 7}
-
-do_test shell5-1.4.10.2 {
- catchcmd "test.db" {SELECT b FROM t1 WHERE a='7';}
-} {0 {"Now is the time for all good men to come to the aid of their country."}}
-
-# check importing very long field
-do_test shell5-1.5.1 {
- set str [string repeat X 999]
- set in [open shell5.csv w]
- puts $in "8|$str"
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t1
-SELECT length(b) FROM t1 WHERE a='8';}]
-} {0 999}
-
-# try importing into a table with a large number of columns.
-# This is limited by SQLITE_MAX_VARIABLE_NUMBER, which defaults to 999.
-set cols 999
-do_test shell5-1.6.1 {
- set sql {CREATE TABLE t2(}
- set data {}
- for {set i 1} {$i<$cols} {incr i} {
- append sql "c$i,"
- append data "$i|"
- }
- append sql "c$cols);"
- append data "$cols"
- catchcmd "test.db" $sql
- set in [open shell5.csv w]
- puts $in $data
- close $in
- set res [catchcmd "test.db" {.import shell5.csv t2
-SELECT COUNT(*) FROM t2;}]
-} {0 1}
-
-# try importing a large number of rows
-set rows 999999
-do_test shell5-1.7.1 {
- set in [open shell5.csv w]
- for {set i 1} {$i<=$rows} {incr i} {
- puts $in $i
- }
- close $in
- set res [catchcmd "test.db" {CREATE TABLE t3(a);
-.import shell5.csv t3
-SELECT COUNT(*) FROM t3;}]
-} [list 0 $rows]
-
-
-puts "CLI tests completed successfully"
diff --git a/lang/sql/sqlite/tool/showdb.c b/lang/sql/sqlite/tool/showdb.c
index c954153c..4d274a7a 100644
--- a/lang/sql/sqlite/tool/showdb.c
+++ b/lang/sql/sqlite/tool/showdb.c
@@ -6,9 +6,14 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+
+#if !defined(_MSC_VER)
#include <unistd.h>
+#endif
+
#include <stdlib.h>
#include <string.h>
+#include "sqlite3.h"
static int pagesize = 1024; /* Size of a database page */
@@ -114,7 +119,7 @@ static unsigned char *print_byte_range(
/*
** Print an entire page of content as hex
*/
-static print_page(int iPg){
+static void print_page(int iPg){
int iStart;
unsigned char *aData;
iStart = (iPg-1)*pagesize;
@@ -126,7 +131,7 @@ static print_page(int iPg){
/* Print a line of decode output showing a 4-byte integer.
*/
-static print_decode_line(
+static void print_decode_line(
unsigned char *aData, /* Content being decoded */
int ofst, int nByte, /* Start and size of decode */
const char *zMsg /* Message to append */
@@ -171,7 +176,7 @@ static void print_db_header(void){
print_decode_line(aData, 56, 4, "Text encoding");
print_decode_line(aData, 60, 4, "User version");
print_decode_line(aData, 64, 4, "Incremental-vacuum mode");
- print_decode_line(aData, 68, 4, "meta[7]");
+ print_decode_line(aData, 68, 4, "Application ID");
print_decode_line(aData, 72, 4, "meta[8]");
print_decode_line(aData, 76, 4, "meta[9]");
print_decode_line(aData, 80, 4, "meta[10]");
@@ -423,7 +428,7 @@ static void decode_trunk_page(
int detail, /* Show leaf pages if true */
int recursive /* Follow the trunk change if true */
){
- int n, i, k;
+ int n, i;
unsigned char *a;
while( pgno>0 ){
a = getContent((pgno-1)*pagesize, pagesize);
@@ -451,6 +456,293 @@ static void decode_trunk_page(
}
/*
+** A short text comment on the use of each page.
+*/
+static char **zPageUse;
+
+/*
+** Add a comment on the use of a page.
+*/
+static void page_usage_msg(int pgno, const char *zFormat, ...){
+ va_list ap;
+ char *zMsg;
+
+ va_start(ap, zFormat);
+ zMsg = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+ if( pgno<=0 || pgno>mxPage ){
+ printf("ERROR: page %d out of range 1..%d: %s\n",
+ pgno, mxPage, zMsg);
+ sqlite3_free(zMsg);
+ return;
+ }
+ if( zPageUse[pgno]!=0 ){
+ printf("ERROR: page %d used multiple times:\n", pgno);
+ printf("ERROR: previous: %s\n", zPageUse[pgno]);
+ printf("ERROR: current: %s\n", zMsg);
+ sqlite3_free(zPageUse[pgno]);
+ }
+ zPageUse[pgno] = zMsg;
+}
+
+/*
+** Find overflow pages of a cell and describe their usage.
+*/
+static void page_usage_cell(
+ unsigned char cType, /* Page type */
+ unsigned char *a, /* Cell content */
+ int pgno, /* page containing the cell */
+ int cellno /* Index of the cell on the page */
+){
+ int i;
+ int n = 0;
+ i64 nPayload;
+ i64 rowid;
+ int nLocal;
+ i = 0;
+ if( cType<=5 ){
+ a += 4;
+ n += 4;
+ }
+ if( cType!=5 ){
+ i = decodeVarint(a, &nPayload);
+ a += i;
+ n += i;
+ nLocal = localPayload(nPayload, cType);
+ }else{
+ nPayload = nLocal = 0;
+ }
+ if( cType==5 || cType==13 ){
+ i = decodeVarint(a, &rowid);
+ a += i;
+ n += i;
+ }
+ if( nLocal<nPayload ){
+ int ovfl = decodeInt32(a+nLocal);
+ int cnt = 0;
+ while( ovfl && (cnt++)<mxPage ){
+ page_usage_msg(ovfl, "overflow %d from cell %d of page %d",
+ cnt, cellno, pgno);
+ a = getContent((ovfl-1)*pagesize, 4);
+ ovfl = decodeInt32(a);
+ free(a);
+ }
+ }
+}
+
+
+/*
+** Describe the usages of a b-tree page
+*/
+static void page_usage_btree(
+ int pgno, /* Page to describe */
+ int parent, /* Parent of this page. 0 for root pages */
+ int idx, /* Which child of the parent */
+ const char *zName /* Name of the table */
+){
+ unsigned char *a;
+ const char *zType = "corrupt node";
+ int nCell;
+ int i;
+ int hdr = pgno==1 ? 100 : 0;
+
+ if( pgno<=0 || pgno>mxPage ) return;
+ a = getContent((pgno-1)*pagesize, pagesize);
+ switch( a[hdr] ){
+ case 2: zType = "interior node of index"; break;
+ case 5: zType = "interior node of table"; break;
+ case 10: zType = "leaf of index"; break;
+ case 13: zType = "leaf of table"; break;
+ }
+ if( parent ){
+ page_usage_msg(pgno, "%s [%s], child %d of page %d",
+ zType, zName, idx, parent);
+ }else{
+ page_usage_msg(pgno, "root %s [%s]", zType, zName);
+ }
+ nCell = a[hdr+3]*256 + a[hdr+4];
+ if( a[hdr]==2 || a[hdr]==5 ){
+ int cellstart = hdr+12;
+ unsigned int child;
+ for(i=0; i<nCell; i++){
+ int ofst;
+
+ ofst = cellstart + i*2;
+ ofst = a[ofst]*256 + a[ofst+1];
+ child = decodeInt32(a+ofst);
+ page_usage_btree(child, pgno, i, zName);
+ }
+ child = decodeInt32(a+cellstart-4);
+ page_usage_btree(child, pgno, i, zName);
+ }
+ if( a[hdr]==2 || a[hdr]==10 || a[hdr]==13 ){
+ int cellstart = hdr + 8 + 4*(a[hdr]<=5);
+ for(i=0; i<nCell; i++){
+ int ofst;
+ ofst = cellstart + i*2;
+ ofst = a[ofst]*256 + a[ofst+1];
+ page_usage_cell(a[hdr], a+ofst, pgno, i);
+ }
+ }
+ free(a);
+}
+
+/*
+** Determine page usage by the freelist
+*/
+static void page_usage_freelist(int pgno){
+ unsigned char *a;
+ int cnt = 0;
+ int i;
+ int n;
+ int iNext;
+ int parent = 1;
+
+ while( pgno>0 && pgno<=mxPage && (cnt++)<mxPage ){
+ page_usage_msg(pgno, "freelist trunk #%d child of %d", cnt, parent);
+ a = getContent((pgno-1)*pagesize, pagesize);
+ iNext = decodeInt32(a);
+ n = decodeInt32(a+4);
+ for(i=0; i<n; i++){
+ int child = decodeInt32(a + (i*4+8));
+ page_usage_msg(child, "freelist leaf, child %d of trunk page %d",
+ i, pgno);
+ }
+ free(a);
+ parent = pgno;
+ pgno = iNext;
+ }
+}
+
+/*
+** Determine pages used as PTRMAP pages
+*/
+static void page_usage_ptrmap(unsigned char *a){
+ if( a[55] ){
+ int usable = pagesize - a[20];
+ int pgno = 2;
+ int perPage = usable/5;
+ while( pgno<=mxPage ){
+ page_usage_msg(pgno, "PTRMAP page covering %d..%d",
+ pgno+1, pgno+perPage);
+ pgno += perPage + 1;
+ }
+ }
+}
+
+/*
+** Try to figure out how every page in the database file is being used.
+*/
+static void page_usage_report(const char *zDbName){
+ int i, j;
+ int rc;
+ sqlite3 *db;
+ sqlite3_stmt *pStmt;
+ unsigned char *a;
+ char zQuery[200];
+
+ /* Avoid the pathological case */
+ if( mxPage<1 ){
+ printf("empty database\n");
+ return;
+ }
+
+ /* Open the database file */
+ rc = sqlite3_open(zDbName, &db);
+ if( rc ){
+ printf("cannot open database: %s\n", sqlite3_errmsg(db));
+ sqlite3_close(db);
+ return;
+ }
+
+ /* Set up global variables zPageUse[] and mxPage to record page
+ ** usages */
+ zPageUse = sqlite3_malloc( sizeof(zPageUse[0])*(mxPage+1) );
+ if( zPageUse==0 ) out_of_memory();
+ memset(zPageUse, 0, sizeof(zPageUse[0])*(mxPage+1));
+
+ /* Discover the usage of each page */
+ a = getContent(0, 100);
+ page_usage_freelist(decodeInt32(a+32));
+ page_usage_ptrmap(a);
+ free(a);
+ page_usage_btree(1, 0, 0, "sqlite_master");
+ sqlite3_exec(db, "PRAGMA writable_schema=ON", 0, 0, 0);
+ for(j=0; j<2; j++){
+ sqlite3_snprintf(sizeof(zQuery), zQuery,
+ "SELECT type, name, rootpage FROM SQLITE_MASTER WHERE rootpage"
+ " ORDER BY rowid %s", j?"DESC":"");
+ rc = sqlite3_prepare_v2(db, zQuery, -1, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ while( sqlite3_step(pStmt)==SQLITE_ROW ){
+ int pgno = sqlite3_column_int(pStmt, 2);
+ page_usage_btree(pgno, 0, 0, (const char*)sqlite3_column_text(pStmt,1));
+ }
+ }else{
+ printf("ERROR: cannot query database: %s\n", sqlite3_errmsg(db));
+ }
+ rc = sqlite3_finalize(pStmt);
+ if( rc==SQLITE_OK ) break;
+ }
+ sqlite3_close(db);
+
+ /* Print the report and free memory used */
+ for(i=1; i<=mxPage; i++){
+ printf("%5d: %s\n", i, zPageUse[i] ? zPageUse[i] : "???");
+ sqlite3_free(zPageUse[i]);
+ }
+ sqlite3_free(zPageUse);
+ zPageUse = 0;
+}
+
+/*
+** Try to figure out how every page in the database file is being used.
+*/
+static void ptrmap_coverage_report(const char *zDbName){
+ unsigned int pgno;
+ unsigned char *aHdr;
+ unsigned char *a;
+ int usable;
+ int perPage;
+ unsigned int i;
+
+ /* Avoid the pathological case */
+ if( mxPage<1 ){
+ printf("empty database\n");
+ return;
+ }
+
+ /* Make sure PTRMAPs are used in this database */
+ aHdr = getContent(0, 100);
+ if( aHdr[55]==0 ){
+ printf("database does not use PTRMAP pages\n");
+ return;
+ }
+ usable = pagesize - aHdr[20];
+ perPage = usable/5;
+ free(aHdr);
+ printf("%5d: root of sqlite_master\n", 1);
+ for(pgno=2; pgno<=mxPage; pgno += perPage+1){
+ printf("%5d: PTRMAP page covering %d..%d\n", pgno,
+ pgno+1, pgno+perPage);
+ a = getContent((pgno-1)*pagesize, usable);
+ for(i=0; i+5<=usable && pgno+1+i/5<=mxPage; i+=5){
+ const char *zType = "???";
+ unsigned int iFrom = decodeInt32(&a[i+1]);
+ switch( a[i] ){
+ case 1: zType = "b-tree root page"; break;
+ case 2: zType = "freelist page"; break;
+ case 3: zType = "first page of overflow"; break;
+ case 4: zType = "later page of overflow"; break;
+ case 5: zType = "b-tree non-root page"; break;
+ }
+ printf("%5d: %s, parent=%u\n", pgno+1+i/5, zType, iFrom);
+ }
+ free(a);
+ }
+}
+
+/*
** Print a usage comment
*/
static void usage(const char *argv0){
@@ -458,13 +750,15 @@ static void usage(const char *argv0){
fprintf(stderr,
"args:\n"
" dbheader Show database header\n"
+ " pgidx Index of how each page is used\n"
+ " ptrmap Show all PTRMAP page content\n"
" NNN..MMM Show hex of pages NNN through MMM\n"
" NNN..end Show hex of pages NNN through end of file\n"
" NNNb Decode btree page NNN\n"
" NNNbc Decode btree page NNN and show content\n"
" NNNbm Decode btree page NNN and show a layout map\n"
" NNNt Decode freelist trunk page NNN\n"
- " NNNtd Show leave freelist pages on the decode\n"
+ " NNNtd Show leaf freelist pages on the decode\n"
" NNNtr Recurisvely decode freelist starting at NNN\n"
);
}
@@ -503,6 +797,18 @@ int main(int argc, char **argv){
print_db_header();
continue;
}
+ if( strcmp(argv[i], "pgidx")==0 ){
+ page_usage_report(argv[1]);
+ continue;
+ }
+ if( strcmp(argv[i], "ptrmap")==0 ){
+ ptrmap_coverage_report(argv[1]);
+ continue;
+ }
+ if( strcmp(argv[i], "help")==0 ){
+ usage(argv[0]);
+ continue;
+ }
if( !isdigit(argv[i][0]) ){
fprintf(stderr, "%s: unknown option: [%s]\n", argv[0], argv[i]);
continue;
@@ -528,7 +834,6 @@ int main(int argc, char **argv){
free(a);
continue;
}else if( zLeft && zLeft[0]=='t' ){
- unsigned char *a;
int detail = 0;
int recursive = 0;
int i;
@@ -554,4 +859,5 @@ int main(int argc, char **argv){
}
}
close(db);
+ return 0;
}
diff --git a/lang/sql/sqlite/tool/showwal.c b/lang/sql/sqlite/tool/showwal.c
index ae25a59f..2888c10a 100644
--- a/lang/sql/sqlite/tool/showwal.c
+++ b/lang/sql/sqlite/tool/showwal.c
@@ -18,6 +18,65 @@ static int perLine = 16; /* HEX elements to print per line */
typedef long long int i64; /* Datatype for 64-bit integers */
+/* Information for computing the checksum */
+typedef struct Cksum Cksum;
+struct Cksum {
+ int bSwap; /* True to do byte swapping on 32-bit words */
+ unsigned s0, s1; /* Current checksum value */
+};
+
+/*
+** extract a 32-bit big-endian integer
+*/
+static unsigned int getInt32(const unsigned char *a){
+ unsigned int x = (a[0]<<24) + (a[1]<<16) + (a[2]<<8) + a[3];
+ return x;
+}
+
+/*
+** Swap bytes on a 32-bit unsigned integer
+*/
+static unsigned int swab32(unsigned int x){
+ return (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8)
+ + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24);
+}
+
+/* Extend the checksum. Reinitialize the checksum if bInit is true.
+*/
+static void extendCksum(
+ Cksum *pCksum,
+ unsigned char *aData,
+ unsigned int nByte,
+ int bInit
+){
+ unsigned int *a32;
+ if( bInit ){
+ int a = 0;
+ *((char*)&a) = 1;
+ if( a==1 ){
+ /* Host is little-endian */
+ pCksum->bSwap = getInt32(aData)!=0x377f0682;
+ }else{
+ /* Host is big-endian */
+ pCksum->bSwap = getInt32(aData)!=0x377f0683;
+ }
+ pCksum->s0 = 0;
+ pCksum->s1 = 0;
+ }
+ a32 = (unsigned int*)aData;
+ while( nByte>0 ){
+ unsigned int x0 = a32[0];
+ unsigned int x1 = a32[1];
+ if( pCksum->bSwap ){
+ x0 = swab32(x0);
+ x1 = swab32(x1);
+ }
+ pCksum->s0 += x0 + pCksum->s1;
+ pCksum->s1 += x1 + pCksum->s0;
+ nByte -= 8;
+ a32 += 2;
+ }
+}
/*
** Convert the var-int format into i64. Return the number of bytes
@@ -152,39 +211,46 @@ static void print_frame(int iFrame){
}
/*
-** extract a 32-bit big-endian integer
-*/
-static unsigned int getInt32(const unsigned char *a){
- unsigned int x = (a[0]<<24) + (a[1]<<16) + (a[2]<<8) + a[3];
- return x;
-}
-
-/*
-** Print an entire page of content as hex
+** Summarize a single frame on a single line.
*/
-static void print_oneline_frame(int iFrame){
+static void print_oneline_frame(int iFrame, Cksum *pCksum){
int iStart;
unsigned char *aData;
+ unsigned int s0, s1;
iStart = 32 + (iFrame-1)*(pagesize+24);
aData = getContent(iStart, 24);
- fprintf(stdout, "Frame %4d: %6d %6d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ extendCksum(pCksum, aData, 8, 0);
+ extendCksum(pCksum, getContent(iStart+24, pagesize), pagesize, 0);
+ s0 = getInt32(aData+16);
+ s1 = getInt32(aData+20);
+ fprintf(stdout, "Frame %4d: %6d %6d 0x%08x,%08x 0x%08x,%08x %s\n",
iFrame,
getInt32(aData),
getInt32(aData+4),
getInt32(aData+8),
getInt32(aData+12),
- getInt32(aData+16),
- getInt32(aData+20)
+ s0,
+ s1,
+ (s0==pCksum->s0 && s1==pCksum->s1) ? "" : "cksum-fail"
);
+
+ /* Reset the checksum so that a single frame checksum failure will not
+ ** cause all subsequent frames to also show a failure. */
+ pCksum->s0 = s0;
+ pCksum->s1 = s1;
free(aData);
}
/*
** Decode the WAL header.
*/
-static void print_wal_header(void){
+static void print_wal_header(Cksum *pCksum){
unsigned char *aData;
aData = getContent(0, 32);
+ if( pCksum ){
+ extendCksum(pCksum, aData, 24, 1);
+ printf("Checksum byte order: %s\n", pCksum->bSwap ? "swapped" : "native");
+ }
printf("WAL Header:\n");
print_decode_line(aData, 0, 4,1,"Magic. 0x377f0682 (le) or 0x377f0683 (be)");
print_decode_line(aData, 4, 4, 0, "File format");
@@ -194,60 +260,199 @@ static void print_wal_header(void){
print_decode_line(aData, 20,4, 1, "Salt-2");
print_decode_line(aData, 24,4, 1, "Checksum-1");
print_decode_line(aData, 28,4, 1, "Checksum-2");
+ if( pCksum ){
+ if( pCksum->s0!=getInt32(aData+24) ){
+ printf("**** cksum-1 mismatch: 0x%08x\n", pCksum->s0);
+ }
+ if( pCksum->s1!=getInt32(aData+28) ){
+ printf("**** cksum-2 mismatch: 0x%08x\n", pCksum->s1);
+ }
+ }
free(aData);
}
+/*
+** Describe cell content.
+*/
+static int describeContent(
+ unsigned char *a, /* Cell content */
+ int nLocal, /* Bytes in a[] */
+ char *zDesc /* Write description here */
+){
+ int nDesc = 0;
+ int n, i, j;
+ i64 x, v;
+ const unsigned char *pData;
+ const unsigned char *pLimit;
+ char sep = ' ';
+
+ pLimit = &a[nLocal];
+ n = decodeVarint(a, &x);
+ pData = &a[x];
+ a += n;
+ i = x - n;
+ while( i>0 && pData<=pLimit ){
+ n = decodeVarint(a, &x);
+ a += n;
+ i -= n;
+ nLocal -= n;
+ zDesc[0] = sep;
+ sep = ',';
+ nDesc++;
+ zDesc++;
+ if( x==0 ){
+ sprintf(zDesc, "*"); /* NULL is a "*" */
+ }else if( x>=1 && x<=6 ){
+ v = (signed char)pData[0];
+ pData++;
+ switch( x ){
+ case 6: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2;
+ case 5: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2;
+ case 4: v = (v<<8) + pData[0]; pData++;
+ case 3: v = (v<<8) + pData[0]; pData++;
+ case 2: v = (v<<8) + pData[0]; pData++;
+ }
+ sprintf(zDesc, "%lld", v);
+ }else if( x==7 ){
+ sprintf(zDesc, "real");
+ pData += 8;
+ }else if( x==8 ){
+ sprintf(zDesc, "0");
+ }else if( x==9 ){
+ sprintf(zDesc, "1");
+ }else if( x>=12 ){
+ int size = (x-12)/2;
+ if( (x&1)==0 ){
+ sprintf(zDesc, "blob(%d)", size);
+ }else{
+ sprintf(zDesc, "txt(%d)", size);
+ }
+ pData += size;
+ }
+ j = strlen(zDesc);
+ zDesc += j;
+ nDesc += j;
+ }
+ return nDesc;
+}
+
+/*
+** Compute the local payload size given the total payload size and
+** the page size.
+*/
+static int localPayload(i64 nPayload, char cType){
+ int maxLocal;
+ int minLocal;
+ int surplus;
+ int nLocal;
+ if( cType==13 ){
+ /* Table leaf */
+ maxLocal = pagesize-35;
+ minLocal = (pagesize-12)*32/255-23;
+ }else{
+ maxLocal = (pagesize-12)*64/255-23;
+ minLocal = (pagesize-12)*32/255-23;
+ }
+ if( nPayload>maxLocal ){
+ surplus = minLocal + (nPayload-minLocal)%(pagesize-4);
+ if( surplus<=maxLocal ){
+ nLocal = surplus;
+ }else{
+ nLocal = minLocal;
+ }
+ }else{
+ nLocal = nPayload;
+ }
+ return nLocal;
+}
/*
** Create a description for a single cell.
+**
+** The return value is the local cell size.
*/
-static int describeCell(unsigned char cType, unsigned char *a, char **pzDesc){
+static int describeCell(
+ unsigned char cType, /* Page type */
+ unsigned char *a, /* Cell content */
+ int showCellContent, /* Show cell content if true */
+ char **pzDesc /* Store description here */
+){
int i;
int nDesc = 0;
int n = 0;
int leftChild;
i64 nPayload;
i64 rowid;
- static char zDesc[100];
+ int nLocal;
+ static char zDesc[1000];
i = 0;
if( cType<=5 ){
leftChild = ((a[0]*256 + a[1])*256 + a[2])*256 + a[3];
a += 4;
n += 4;
- sprintf(zDesc, "left-child: %d ", leftChild);
+ sprintf(zDesc, "lx: %d ", leftChild);
nDesc = strlen(zDesc);
}
if( cType!=5 ){
i = decodeVarint(a, &nPayload);
a += i;
n += i;
- sprintf(&zDesc[nDesc], "sz: %lld ", nPayload);
+ sprintf(&zDesc[nDesc], "n: %lld ", nPayload);
nDesc += strlen(&zDesc[nDesc]);
+ nLocal = localPayload(nPayload, cType);
+ }else{
+ nPayload = nLocal = 0;
}
if( cType==5 || cType==13 ){
i = decodeVarint(a, &rowid);
a += i;
n += i;
- sprintf(&zDesc[nDesc], "rowid: %lld ", rowid);
+ sprintf(&zDesc[nDesc], "r: %lld ", rowid);
nDesc += strlen(&zDesc[nDesc]);
}
+ if( nLocal<nPayload ){
+ int ovfl;
+ unsigned char *b = &a[nLocal];
+ ovfl = ((b[0]*256 + b[1])*256 + b[2])*256 + b[3];
+ sprintf(&zDesc[nDesc], "ov: %d ", ovfl);
+ nDesc += strlen(&zDesc[nDesc]);
+ n += 4;
+ }
+ if( showCellContent && cType!=5 ){
+ nDesc += describeContent(a, nLocal, &zDesc[nDesc-1]);
+ }
*pzDesc = zDesc;
- return n;
+ return nLocal+n;
}
/*
** Decode a btree page
*/
-static void decode_btree_page(unsigned char *a, int pgno, int hdrSize){
+static void decode_btree_page(
+ unsigned char *a, /* Content of the btree page to be decoded */
+ int pgno, /* Page number */
+ int hdrSize, /* Size of the page1-header in bytes */
+ const char *zArgs /* Flags to control formatting */
+){
const char *zType = "unknown";
int nCell;
- int i;
+ int i, j;
int iCellPtr;
+ int showCellContent = 0;
+ int showMap = 0;
+ char *zMap = 0;
switch( a[0] ){
case 2: zType = "index interior node"; break;
case 5: zType = "table interior node"; break;
case 10: zType = "index leaf"; break;
case 13: zType = "table leaf"; break;
}
+ while( zArgs[0] ){
+ switch( zArgs[0] ){
+ case 'c': showCellContent = 1; break;
+ case 'm': showMap = 1; break;
+ }
+ zArgs++;
+ }
printf("Decode of btree page %d:\n", pgno);
print_decode_line(a, 0, 1, 0, zType);
print_decode_line(a, 1, 2, 0, "Offset to first freeblock");
@@ -261,13 +466,40 @@ static void decode_btree_page(unsigned char *a, int pgno, int hdrSize){
}else{
iCellPtr = 8;
}
+ if( nCell>0 ){
+ printf(" key: lx=left-child n=payload-size r=rowid\n");
+ }
+ if( showMap ){
+ zMap = malloc(pagesize);
+ memset(zMap, '.', pagesize);
+ memset(zMap, '1', hdrSize);
+ memset(&zMap[hdrSize], 'H', iCellPtr);
+ memset(&zMap[hdrSize+iCellPtr], 'P', 2*nCell);
+ }
for(i=0; i<nCell; i++){
int cofst = iCellPtr + i*2;
char *zDesc;
+ int n;
+
cofst = a[cofst]*256 + a[cofst+1];
- describeCell(a[0], &a[cofst-hdrSize], &zDesc);
+ n = describeCell(a[0], &a[cofst-hdrSize], showCellContent, &zDesc);
+ if( showMap ){
+ char zBuf[30];
+ memset(&zMap[cofst], '*', n);
+ zMap[cofst] = '[';
+ zMap[cofst+n-1] = ']';
+ sprintf(zBuf, "%d", i);
+ j = strlen(zBuf);
+ if( j<=n-2 ) memcpy(&zMap[cofst+1], zBuf, j);
+ }
printf(" %03x: cell[%d] %s\n", cofst, i, zDesc);
}
+ if( showMap ){
+ for(i=0; i<pagesize; i+=64){
+ printf(" %03x: %.64s\n", i, &zMap[i]);
+ }
+ free(zMap);
+ }
}
int main(int argc, char **argv){
@@ -298,15 +530,18 @@ int main(int argc, char **argv){
printf("Available pages: 1..%d\n", mxFrame);
if( argc==2 ){
int i;
- print_wal_header();
- for(i=1; i<=mxFrame; i++) print_oneline_frame(i);
+ Cksum x;
+ print_wal_header(&x);
+ for(i=1; i<=mxFrame; i++){
+ print_oneline_frame(i, &x);
+ }
}else{
int i;
for(i=2; i<argc; i++){
int iStart, iEnd;
char *zLeft;
if( strcmp(argv[i], "header")==0 ){
- print_wal_header();
+ print_wal_header(0);
continue;
}
if( !isdigit(argv[i][0]) ){
@@ -318,11 +553,11 @@ int main(int argc, char **argv){
iEnd = mxFrame;
}else if( zLeft && zLeft[0]=='.' && zLeft[1]=='.' ){
iEnd = strtol(&zLeft[2], 0, 0);
-#if 0
}else if( zLeft && zLeft[0]=='b' ){
int ofst, nByte, hdrSize;
unsigned char *a;
if( iStart==1 ){
+ hdrSize = 100;
ofst = hdrSize = 100;
nByte = pagesize-100;
}else{
@@ -330,11 +565,11 @@ int main(int argc, char **argv){
ofst = (iStart-1)*pagesize;
nByte = pagesize;
}
+ ofst = 32 + hdrSize + (iStart-1)*(pagesize+24) + 24;
a = getContent(ofst, nByte);
- decode_btree_page(a, iStart, hdrSize);
+ decode_btree_page(a, iStart, hdrSize, zLeft+1);
free(a);
continue;
-#endif
}else{
iEnd = iStart;
}
diff --git a/lang/sql/sqlite/tool/spaceanal.tcl b/lang/sql/sqlite/tool/spaceanal.tcl
index bf6244e8..a227b852 100644
--- a/lang/sql/sqlite/tool/spaceanal.tcl
+++ b/lang/sql/sqlite/tool/spaceanal.tcl
@@ -4,36 +4,114 @@
#
if {[catch {
-
# Get the name of the database to analyze
#
-#set argv $argv0
-if {[llength $argv]!=1} {
+proc usage {} {
+ set argv0 [file rootname [file tail [info nameofexecutable]]]
puts stderr "Usage: $argv0 database-name"
exit 1
}
-set file_to_analyze [lindex $argv 0]
-if {![file exists $file_to_analyze]} {
- puts stderr "No such file: $file_to_analyze"
+set file_to_analyze {}
+set flags(-pageinfo) 0
+set flags(-stats) 0
+append argv {}
+foreach arg $argv {
+ if {[regexp {^-+pageinfo$} $arg]} {
+ set flags(-pageinfo) 1
+ } elseif {[regexp {^-+stats$} $arg]} {
+ set flags(-stats) 1
+ } elseif {[regexp {^-} $arg]} {
+ puts stderr "Unknown option: $arg"
+ usage
+ } elseif {$file_to_analyze!=""} {
+ usage
+ } else {
+ set file_to_analyze $arg
+ }
+}
+if {$file_to_analyze==""} usage
+set root_filename $file_to_analyze
+regexp {^file:(//)?([^?]*)} $file_to_analyze all x1 root_filename
+if {![file exists $root_filename]} {
+ puts stderr "No such file: $root_filename"
exit 1
}
-if {![file readable $file_to_analyze]} {
- puts stderr "File is not readable: $file_to_analyze"
+if {![file readable $root_filename]} {
+ puts stderr "File is not readable: $root_filename"
exit 1
}
-if {[file size $file_to_analyze]<512} {
- puts stderr "Empty or malformed database: $file_to_analyze"
+set true_file_size [file size $root_filename]
+if {$true_file_size<512} {
+ puts stderr "Empty or malformed database: $root_filename"
exit 1
}
+# Compute the total file size assuming test_multiplexor is being used.
+# Assume that SQLITE_ENABLE_8_3_NAMES might be enabled
+#
+set extension [file extension $root_filename]
+set pattern $root_filename
+append pattern {[0-3][0-9][0-9]}
+foreach f [glob -nocomplain $pattern] {
+ incr true_file_size [file size $f]
+ set extension {}
+}
+if {[string length $extension]>=2 && [string length $extension]<=4} {
+ set pattern [file rootname $root_filename]
+ append pattern {.[0-3][0-9][0-9]}
+ foreach f [glob -nocomplain $pattern] {
+ incr true_file_size [file size $f]
+ }
+}
+
# Open the database
#
-sqlite3 db [lindex $argv 0]
+if {[catch {sqlite3 db $file_to_analyze -uri 1} msg]} {
+ puts stderr "error trying to open $file_to_analyze: $msg"
+ exit 1
+}
register_dbstat_vtab db
-set pageSize [db one {PRAGMA page_size}]
+db eval {SELECT count(*) FROM sqlite_master}
+set pageSize [expr {wide([db one {PRAGMA page_size}])}]
-#set DB [btree_open [lindex $argv 0] 1000 0]
+if {$flags(-pageinfo)} {
+ db eval {CREATE VIRTUAL TABLE temp.stat USING dbstat}
+ db eval {SELECT name, path, pageno FROM temp.stat ORDER BY pageno} {
+ puts "$pageno $name $path"
+ }
+ exit 0
+}
+if {$flags(-stats)} {
+ db eval {CREATE VIRTUAL TABLE temp.stat USING dbstat}
+ puts "BEGIN;"
+ puts "CREATE TABLE stats("
+ puts " name STRING, /* Name of table or index */"
+ puts " path INTEGER, /* Path to page from root */"
+ puts " pageno INTEGER, /* Page number */"
+ puts " pagetype STRING, /* 'internal', 'leaf' or 'overflow' */"
+ puts " ncell INTEGER, /* Cells on page (0 for overflow) */"
+ puts " payload INTEGER, /* Bytes of payload on this page */"
+ puts " unused INTEGER, /* Bytes of unused space on this page */"
+ puts " mx_payload INTEGER, /* Largest payload size of all cells */"
+ puts " pgoffset INTEGER, /* Offset of page in file */"
+ puts " pgsize INTEGER /* Size of the page */"
+ puts ");"
+ db eval {SELECT quote(name) || ',' ||
+ quote(path) || ',' ||
+ quote(pageno) || ',' ||
+ quote(pagetype) || ',' ||
+ quote(ncell) || ',' ||
+ quote(payload) || ',' ||
+ quote(unused) || ',' ||
+ quote(mx_payload) || ',' ||
+ quote(pgoffset) || ',' ||
+ quote(pgsize) AS x FROM stat} {
+ puts "INSERT INTO stats VALUES($x);"
+ }
+ puts "COMMIT;"
+ exit 0
+}
# In-memory database for collecting statistics. This script loops through
# the tables and indices in the database being analyzed, adding a row for each
@@ -41,8 +119,7 @@ set pageSize [db one {PRAGMA page_size}]
# queries the in-memory db to produce the space-analysis report.
#
sqlite3 mem :memory:
-set tabledef\
-{CREATE TABLE space_used(
+set tabledef {CREATE TABLE space_used(
name clob, -- Name of a table or index in the database file
tblname clob, -- Name of associated table
is_index boolean, -- TRUE if it is an index, false for a table
@@ -58,17 +135,17 @@ set tabledef\
int_unused int, -- Number of unused bytes on interior pages
leaf_unused int, -- Number of unused bytes on primary pages
ovfl_unused int, -- Number of unused bytes on overflow pages
- gap_cnt int -- Number of gaps in the page layout
+ gap_cnt int, -- Number of gaps in the page layout
+ compressed_size int -- Total bytes stored on disk
);}
mem eval $tabledef
# Create a temporary "dbstat" virtual table.
#
-db eval {
- CREATE VIRTUAL TABLE temp.stat USING dbstat;
- CREATE TEMP TABLE dbstat AS SELECT * FROM temp.stat ORDER BY name, path;
- DROP TABLE temp.stat;
-}
+db eval {CREATE VIRTUAL TABLE temp.stat USING dbstat}
+db eval {CREATE TEMP TABLE dbstat AS SELECT * FROM temp.stat
+ ORDER BY name, path}
+db eval {DROP TABLE temp.stat}
proc isleaf {pagetype is_index} {
return [expr {$pagetype == "leaf" || ($pagetype == "internal" && $is_index)}]
@@ -84,6 +161,8 @@ db func isleaf isleaf
db func isinternal isinternal
db func isoverflow isoverflow
+set isCompressed 0
+set compressOverhead 0
set sql { SELECT name, tbl_name FROM sqlite_master WHERE rootpage>0 }
foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
@@ -101,10 +180,18 @@ foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
sum(isoverflow(pagetype, $is_index)) AS ovfl_pages,
sum(isinternal(pagetype, $is_index) * unused) AS int_unused,
sum(isleaf(pagetype, $is_index) * unused) AS leaf_unused,
- sum(isoverflow(pagetype, $is_index) * unused) AS ovfl_unused
+ sum(isoverflow(pagetype, $is_index) * unused) AS ovfl_unused,
+ sum(pgsize) AS compressed_size
FROM temp.dbstat WHERE name = $name
} break
+ set total_pages [expr {$leaf_pages+$int_pages+$ovfl_pages}]
+ set storage [expr {$total_pages*$pageSize}]
+ if {!$isCompressed && $storage>$compressed_size} {
+ set isCompressed 1
+ set compressOverhead 14
+ }
+
# Column 'gap_cnt' is set to the number of non-contiguous entries in the
# list of pages visited if the b-tree structure is traversed in a top-down
# fashion (each node visited before its child-tree is passed). Any overflow
@@ -112,15 +199,17 @@ foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
# is.
#
set gap_cnt 0
- set pglist [db eval {
- SELECT pageno FROM temp.dbstat WHERE name = $name ORDER BY rowid
- }]
- set prev [lindex $pglist 0]
- foreach pgno [lrange $pglist 1 end] {
- if {$pgno != $prev+1} {incr gap_cnt}
- set prev $pgno
+ set prev 0
+ db eval {
+ SELECT pageno, pagetype FROM temp.dbstat
+ WHERE name=$name
+ ORDER BY pageno
+ } {
+ if {$prev>0 && $pagetype=="leaf" && $pageno!=$prev+1} {
+ incr gap_cnt
+ }
+ set prev $pageno
}
-
mem eval {
INSERT INTO space_used VALUES(
$name,
@@ -138,14 +227,15 @@ foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
$int_unused,
$leaf_unused,
$ovfl_unused,
- $gap_cnt
+ $gap_cnt,
+ $compressed_size
);
}
}
proc integerify {real} {
if {[string is double -strict $real]} {
- return [expr {int($real)}]
+ return [expr {wide($real)}]
} else {
return 0
}
@@ -158,8 +248,19 @@ mem function int integerify
# [quote {hello world's}] == {'hello world''s'}
#
proc quote {txt} {
- regsub -all ' $txt '' q
- return '$q'
+ return [string map {' ''} $txt]
+}
+
+# Output a title line
+#
+proc titleline {title} {
+ if {$title==""} {
+ puts [string repeat * 79]
+ } else {
+ set len [string length $title]
+ set stars [string repeat * [expr 79-$len-5]]
+ puts "*** $title $stars"
+ }
}
# Generate a single line of output in the statistics section of the
@@ -167,7 +268,7 @@ proc quote {txt} {
#
proc statline {title value {extra {}}} {
set len [string length $title]
- set dots [string range {......................................} $len end]
+ set dots [string repeat . [expr 50-$len]]
set len [string length $value]
set sp2 [string range { } $len end]
if {$extra ne ""} {
@@ -199,8 +300,8 @@ proc divide {num denom} {
# Generate a subreport that covers some subset of the database.
# the $where clause determines which subset to analyze.
#
-proc subreport {title where} {
- global pageSize file_pgcnt
+proc subreport {title where showFrag} {
+ global pageSize file_pgcnt compressOverhead
# Query the in-memory database for the sum of various statistics
# for the subset of tables/indices identified by the WHERE clause in
@@ -224,15 +325,14 @@ proc subreport {title where} {
int(sum(leaf_unused)) AS leaf_unused,
int(sum(int_unused)) AS int_unused,
int(sum(ovfl_unused)) AS ovfl_unused,
- int(sum(gap_cnt)) AS gap_cnt
+ int(sum(gap_cnt)) AS gap_cnt,
+ int(sum(compressed_size)) AS compressed_size
FROM space_used WHERE $where" {} {}
# Output the sub-report title, nicely decorated with * characters.
#
puts ""
- set len [string length $title]
- set stars [string repeat * [expr 65-$len]]
- puts "*** $title $stars"
+ titleline $title
puts ""
# Calculate statistics and store the results in TCL variables, as follows:
@@ -274,15 +374,21 @@ proc subreport {title where} {
statline {Percentage of total database} $total_pages_percent
statline {Number of entries} $nleaf
statline {Bytes of storage consumed} $storage
+ if {$compressed_size!=$storage} {
+ set compressed_size [expr {$compressed_size+$compressOverhead*$total_pages}]
+ set pct [expr {$compressed_size*100.0/$storage}]
+ set pct [format {%5.1f%%} $pct]
+ statline {Bytes used after compression} $compressed_size $pct
+ }
statline {Bytes of payload} $payload $payload_percent
statline {Average payload per entry} $avg_payload
statline {Average unused bytes per entry} $avg_unused
if {[info exists avg_fanout]} {
statline {Average fanout} $avg_fanout
}
- if {$total_pages>1} {
- set fragmentation [percent $gap_cnt [expr {$total_pages-1}] {fragmentation}]
- statline {Fragmentation} $fragmentation
+ if {$showFrag && $total_pages>1} {
+ set fragmentation [percent $gap_cnt [expr {$total_pages-1}]]
+ statline {Non-sequential pages} $gap_cnt $fragmentation
}
statline {Maximum payload per entry} $mx_payload
statline {Entries that use overflow} $ovfl_cnt $ovfl_cnt_percent
@@ -293,16 +399,16 @@ proc subreport {title where} {
statline {Overflow pages used} $ovfl_pages
statline {Total pages used} $total_pages
if {$int_unused>0} {
- set int_unused_percent \
- [percent $int_unused [expr {$int_pages*$pageSize}] {of index space}]
+ set int_unused_percent [
+ percent $int_unused [expr {$int_pages*$pageSize}] {of index space}]
statline "Unused bytes on index pages" $int_unused $int_unused_percent
}
- statline "Unused bytes on primary pages" $leaf_unused \
- [percent $leaf_unused [expr {$leaf_pages*$pageSize}] {of primary space}]
- statline "Unused bytes on overflow pages" $ovfl_unused \
- [percent $ovfl_unused [expr {$ovfl_pages*$pageSize}] {of overflow space}]
- statline "Unused bytes on all pages" $total_unused \
- [percent $total_unused $storage {of all space}]
+ statline "Unused bytes on primary pages" $leaf_unused [
+ percent $leaf_unused [expr {$leaf_pages*$pageSize}] {of primary space}]
+ statline "Unused bytes on overflow pages" $ovfl_unused [
+ percent $ovfl_unused [expr {$ovfl_pages*$pageSize}] {of overflow space}]
+ statline "Unused bytes on all pages" $total_unused [
+ percent $total_unused $storage {of all space}]
return 1
}
@@ -330,7 +436,7 @@ proc autovacuum_overhead {filePages pageSize} {
set ptrsPerPage [expr double($pageSize/5)]
# Return the number of pointer map pages in the database.
- return [expr int(ceil( ($filePages-1.0)/($ptrsPerPage+1.0) ))]
+ return [expr wide(ceil( ($filePages-1.0)/($ptrsPerPage+1.0) ))]
}
@@ -357,17 +463,24 @@ proc autovacuum_overhead {filePages pageSize} {
# (not including sqlite_master)
# user_percent: $user_payload as a percentage of total file size.
-set file_bytes [file size $file_to_analyze]
-set file_pgcnt [expr {$file_bytes/$pageSize}]
+### The following, setting $file_bytes based on the actual size of the file
+### on disk, causes this tool to choke on zipvfs databases. So set it based
+### on the return of [PRAGMA page_count] instead.
+if 0 {
+ set file_bytes [file size $file_to_analyze]
+ set file_pgcnt [expr {$file_bytes/$pageSize}]
+}
+set file_pgcnt [db one {PRAGMA page_count}]
+set file_bytes [expr {$file_pgcnt * $pageSize}]
set av_pgcnt [autovacuum_overhead $file_pgcnt $pageSize]
set av_percent [percent $av_pgcnt $file_pgcnt]
set sql {SELECT sum(leaf_pages+int_pages+ovfl_pages) FROM space_used}
-set inuse_pgcnt [expr int([mem eval $sql])]
+set inuse_pgcnt [expr wide([mem eval $sql])]
set inuse_percent [percent $inuse_pgcnt $file_pgcnt]
-set free_pgcnt [expr $file_pgcnt-$inuse_pgcnt-$av_pgcnt]
+set free_pgcnt [expr {$file_pgcnt-$inuse_pgcnt-$av_pgcnt}]
set free_percent [percent $free_pgcnt $file_pgcnt]
set free_pgcnt2 [db one {PRAGMA freelist_count}]
set free_percent2 [percent $free_pgcnt2 $file_pgcnt]
@@ -387,10 +500,7 @@ set user_percent [percent $user_payload $file_bytes]
# Output the summary statistics calculated above.
#
-puts "/** Disk-Space Utilization Report For $file_to_analyze"
-catch {
- puts "*** As of [clock format [clock seconds] -format {%Y-%b-%d %H:%M:%S}]"
-}
+puts "/** Disk-Space Utilization Report For $root_filename"
puts ""
statline {Page size in bytes} $pageSize
statline {Pages in the whole file (measured)} $file_pgcnt
@@ -401,50 +511,94 @@ statline {Pages on the freelist (calculated)} $free_pgcnt $free_percent
statline {Pages of auto-vacuum overhead} $av_pgcnt $av_percent
statline {Number of tables in the database} $ntable
statline {Number of indices} $nindex
-statline {Number of named indices} $nmanindex
-statline {Automatically generated indices} $nautoindex
-statline {Size of the file in bytes} $file_bytes
+statline {Number of defined indices} $nmanindex
+statline {Number of implied indices} $nautoindex
+if {$isCompressed} {
+ statline {Size of uncompressed content in bytes} $file_bytes
+ set efficiency [percent $true_file_size $file_bytes]
+ statline {Size of compressed file on disk} $true_file_size $efficiency
+} else {
+ statline {Size of the file in bytes} $file_bytes
+}
statline {Bytes of user payload stored} $user_payload $user_percent
# Output table rankings
#
puts ""
-puts "*** Page counts for all tables with their indices ********************"
+titleline "Page counts for all tables with their indices"
puts ""
mem eval {SELECT tblname, count(*) AS cnt,
int(sum(int_pages+leaf_pages+ovfl_pages)) AS size
FROM space_used GROUP BY tblname ORDER BY size+0 DESC, tblname} {} {
statline [string toupper $tblname] $size [percent $size $file_pgcnt]
}
+puts ""
+titleline "Page counts for all tables and indices separately"
+puts ""
+mem eval {
+ SELECT
+ upper(name) AS nm,
+ int(int_pages+leaf_pages+ovfl_pages) AS size
+ FROM space_used
+ ORDER BY size+0 DESC, name} {} {
+ statline $nm $size [percent $size $file_pgcnt]
+}
+if {$isCompressed} {
+ puts ""
+ titleline "Bytes of disk space used after compression"
+ puts ""
+ set csum 0
+ mem eval {SELECT tblname,
+ int(sum(compressed_size)) +
+ $compressOverhead*sum(int_pages+leaf_pages+ovfl_pages)
+ AS csize
+ FROM space_used GROUP BY tblname ORDER BY csize+0 DESC, tblname} {} {
+ incr csum $csize
+ statline [string toupper $tblname] $csize [percent $csize $true_file_size]
+ }
+ set overhead [expr {$true_file_size - $csum}]
+ if {$overhead>0} {
+ statline {Header and free space} $overhead [percent $overhead $true_file_size]
+ }
+}
# Output subreports
#
if {$nindex>0} {
- subreport {All tables and indices} 1
+ subreport {All tables and indices} 1 0
}
-subreport {All tables} {NOT is_index}
+subreport {All tables} {NOT is_index} 0
if {$nindex>0} {
- subreport {All indices} {is_index}
+ subreport {All indices} {is_index} 0
}
-foreach tbl [mem eval {SELECT name FROM space_used WHERE NOT is_index
+foreach tbl [mem eval {SELECT DISTINCT tblname name FROM space_used
ORDER BY name}] {
- regsub ' $tbl '' qn
+ set qn [quote $tbl]
set name [string toupper $tbl]
- set n [mem eval "SELECT count(*) FROM space_used WHERE tblname='$qn'"]
+ set n [mem eval {SELECT count(*) FROM space_used WHERE tblname=$tbl}]
if {$n>1} {
- subreport "Table $name and all its indices" "tblname='$qn'"
- subreport "Table $name w/o any indices" "name='$qn'"
- subreport "Indices of table $name" "tblname='$qn' AND is_index"
+ set idxlist [mem eval "SELECT name FROM space_used
+ WHERE tblname='$qn' AND is_index
+ ORDER BY 1"]
+ subreport "Table $name and all its indices" "tblname='$qn'" 0
+ subreport "Table $name w/o any indices" "name='$qn'" 1
+ if {[llength $idxlist]>1} {
+ subreport "Indices of table $name" "tblname='$qn' AND is_index" 0
+ }
+ foreach idx $idxlist {
+ set qidx [quote $idx]
+ subreport "Index [string toupper $idx] of table $name" "name='$qidx'" 1
+ }
} else {
- subreport "Table $name" "name='$qn'"
+ subreport "Table $name" "name='$qn'" 1
}
}
# Output instructions on what the numbers above mean.
#
+puts ""
+titleline Definitions
puts {
-*** Definitions ******************************************************
-
Page size in bytes
The number of bytes in a single page of the database file.
@@ -452,11 +606,9 @@ Page size in bytes
Number of pages in the whole file
}
-puts \
-" The number of $pageSize-byte pages that go into forming the complete
+puts " The number of $pageSize-byte pages that go into forming the complete
database"
-puts \
-{
+puts {
Pages that store data
The number of pages that store data, either as primary B*Tree pages or
@@ -483,11 +635,11 @@ Number of indices
The total number of indices in the database.
-Number of named indices
+Number of defined indices
The number of indices created using an explicit CREATE INDEX statement.
-Automatically generated indices
+Number of implied indices
The number of indices used to implement PRIMARY KEY or UNIQUE constraints
on tables.
@@ -536,13 +688,16 @@ Average unused bytes per entry
category on a per-entry basis. This is the number of unused bytes on
all pages divided by the number of entries.
-Fragmentation
+Non-sequential pages
- The percentage of pages in the table or index that are not
- consecutive in the disk file. Many filesystems are optimized
- for sequential file access so smaller fragmentation numbers
- sometimes result in faster queries, especially for larger
- database files that do not fit in the disk cache.
+ The number of pages in the table or index that are out of sequence.
+ Many filesystems are optimized for sequential file access so a small
+ number of non-sequential pages might result in faster queries,
+ especially for larger database files that do not fit in the disk cache.
+ Note that after running VACUUM, the root page of each table or index is
+ at the beginning of the database file and all other pages are in a
+ separate part of the database file, resulting in a single non-
+ sequential page.
Maximum payload per entry
@@ -598,7 +753,7 @@ Unused bytes on all pages
# Output a dump of the in-memory database. This can be used for more
# complex offline analysis.
#
-puts "**********************************************************************"
+titleline {}
puts "The entire text of this report can be sourced into any SQL database"
puts "engine for further analysis. All of the text above is an SQL comment."
puts "The data used to generate this report follows:"
@@ -611,7 +766,7 @@ mem eval {SELECT * FROM space_used} x {
set sep (
foreach col $x(*) {
set v $x($col)
- if {$v=="" || ![string is double $v]} {set v [quote $v]}
+ if {$v=="" || ![string is double $v]} {set v '[quote $v]'}
puts -nonewline $sep$v
set sep ,
}
diff --git a/lang/sql/sqlite/tool/stack_usage.tcl b/lang/sql/sqlite/tool/stack_usage.tcl
new file mode 100644
index 00000000..b3574f02
--- /dev/null
+++ b/lang/sql/sqlite/tool/stack_usage.tcl
@@ -0,0 +1,98 @@
+#!/usr/bin/tclsh
+#
+# Parse the output of
+#
+# objdump -d sqlite3.o
+#
+# for x64 and generate a report showing:
+#
+# (1) Stack used by each function
+# (2) Recursion paths and their aggregate stack depth
+#
+set getStack 0
+while {![eof stdin]} {
+ set line [gets stdin]
+ if {[regexp {^[0-9a-f]+ <([^>]+)>:\s*$} $line all procname]} {
+ set curfunc $procname
+ set root($curfunc) 1
+ set calls($curfunc) {}
+ set calledby($curfunc) {}
+ set recursive($curfunc) {}
+ set stkdepth($curfunc) 0
+ set getStack 1
+ continue
+ }
+ if {[regexp {callq? +[0-9a-z]+ <([^>]+)>} $line all other]} {
+ set key [list $curfunc $other]
+ set callpair($key) 1
+ unset -nocomplain root($curfunc)
+ continue
+ }
+ if {[regexp {sub +\$(0x[0-9a-z]+),%[er]sp} $line all xdepth]} {
+ if {$getStack} {
+ scan $xdepth %x depth
+ set stkdepth($curfunc) $depth
+ set getStack 0
+ }
+ continue
+ }
+}
+
+puts "****************** Stack Usage By Function ********************"
+set sdlist {}
+foreach f [array names stkdepth] {
+ lappend sdlist [list $stkdepth($f) $f]
+}
+foreach sd [lsort -integer -decr -index 0 $sdlist] {
+ foreach {depth fname} $sd break
+ puts [format {%6d %s} $depth $fname]
+}
+
+puts "****************** Stack Usage By Recursion *******************"
+foreach key [array names callpair] {
+ foreach {from to} $key break
+ lappend calls($from) $to
+ # lappend calledby($to) $from
+}
+proc all_descendents {root} {
+ global calls recursive
+ set todo($root) $root
+ set go 1
+ while {$go} {
+ set go 0
+ foreach f [array names todo] {
+ set path $todo($f)
+ unset todo($f)
+ if {![info exists calls($f)]} continue
+ foreach x $calls($f) {
+ if {$x==$root} {
+ lappend recursive($root) [concat $path $root]
+ } elseif {![info exists d($x)]} {
+ set go 1
+ set todo($x) [concat $path $x]
+ set d($x) 1
+ }
+ }
+ }
+ }
+ return [array names d]
+}
+set pathlist {}
+foreach f [array names recursive] {
+ all_descendents $f
+ foreach m $recursive($f) {
+ set depth 0
+ foreach b [lrange $m 0 end-1] {
+ set depth [expr {$depth+$stkdepth($b)}]
+ }
+ lappend pathlist [list $depth $m]
+ }
+}
+foreach path [lsort -integer -decr -index 0 $pathlist] {
+ foreach {depth m} $path break
+ set first [lindex $m 0]
+ puts [format {%6d %s %d} $depth $first $stkdepth($first)]
+ foreach b [lrange $m 1 end] {
+ puts " $b $stkdepth($b)"
+ }
+}
diff --git a/lang/sql/sqlite/tool/symbols-mingw.sh b/lang/sql/sqlite/tool/symbols-mingw.sh
new file mode 100644
index 00000000..bf93eec7
--- /dev/null
+++ b/lang/sql/sqlite/tool/symbols-mingw.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Run this script in a directory that contains a valid SQLite makefile in
+# order to verify that unintentionally exported symbols.
+#
+make sqlite3.c
+
+echo '****** Exported symbols from a build including RTREE && FTS4 ******'
+gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
+ -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
+ -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
+ sqlite3.c
+nm sqlite3.o | grep " [TD] "
+
+echo '****** Surplus symbols from a build including RTREE & FTS4 ******'
+nm sqlite3.o | grep " [TD] " | grep -v " .*sqlite3_"
+
+echo '****** Dependencies of the core. No extensions. No OS interface *******'
+gcc -c -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
+ -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
+ -DSQLITE_OS_OTHER -DSQLITE_THREADSAFE=0 \
+ sqlite3.c
+nm sqlite3.o | grep " U "
+
+echo '****** Dependencies including RTREE & FTS4 *******'
+gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
+ -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
+ -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
+ sqlite3.c
+nm sqlite3.o | grep " U "
diff --git a/lang/sql/sqlite/tool/symbols.sh b/lang/sql/sqlite/tool/symbols.sh
new file mode 100644
index 00000000..befffce5
--- /dev/null
+++ b/lang/sql/sqlite/tool/symbols.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Run this script in a directory that contains a valid SQLite makefile in
+# order to verify that unintentionally exported symbols.
+#
+make sqlite3.c
+
+echo '****** Exported symbols from a build including RTREE, FTS4 & ICU ******'
+gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
+ -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
+ -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
+ -DSQLITE_ENABLE_ICU \
+ sqlite3.c
+nm sqlite3.o | grep ' [TD] ' | sort -k 3
+
+echo '****** Surplus symbols from a build including RTREE, FTS4 & ICU ******'
+nm sqlite3.o | grep ' [TD] ' | grep -v ' .*sqlite3_'
+
+echo '****** Dependencies of the core. No extensions. No OS interface *******'
+gcc -c -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
+ -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
+ -DSQLITE_OS_OTHER -DSQLITE_THREADSAFE=0 \
+ sqlite3.c
+nm sqlite3.o | grep ' U ' | sort -k 3
+
+echo '****** Dependencies including RTREE & FTS4 *******'
+gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
+ -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
+ -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
+ -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
+ sqlite3.c
+nm sqlite3.o | grep ' U ' | sort -k 3
diff --git a/lang/sql/sqlite/tool/tostr.awk b/lang/sql/sqlite/tool/tostr.awk
new file mode 100644
index 00000000..b4f48d3d
--- /dev/null
+++ b/lang/sql/sqlite/tool/tostr.awk
@@ -0,0 +1,8 @@
+#!/usr/bin/awk
+#
+# Convert input text into a C string
+#
+{
+ gsub(/\"/,"\\\"");
+ print "\"" $0 "\\n\"";
+}
diff --git a/lang/sql/sqlite/tool/vdbe-compress.tcl b/lang/sql/sqlite/tool/vdbe-compress.tcl
index 3bcff9e5..a349830b 100644
--- a/lang/sql/sqlite/tool/vdbe-compress.tcl
+++ b/lang/sql/sqlite/tool/vdbe-compress.tcl
@@ -13,7 +13,7 @@
# Script usage:
#
# mv vdbe.c vdbe.c.template
-# tclsh vdbe-compress.tcl <vdbe.c.template >vdbe.c
+# tclsh vdbe-compress.tcl $CFLAGS <vdbe.c.template >vdbe.c
#
# Modifications made:
#
@@ -42,6 +42,16 @@ set unionDef {} ;# C code of the union
set afterUnion {} ;# C code after the union
set sCtr 0 ;# Context counter
+# If the SQLITE_SMALL_STACK compile-time option is missing, then
+# this transformation becomes a no-op.
+#
+if {![regexp {SQLITE_SMALL_STACK} $argv]} {
+ while {![eof stdin]} {
+ puts [gets stdin]
+ }
+ exit
+}
+
# Read program text up to the spot where the union should be
# inserted.
#
@@ -79,6 +89,9 @@ while {![eof stdin]} {
append unionDef " $line\n"
append afterUnion $line\n
lappend vlist $vname
+ } elseif {[regexp {^#(if|endif)} $line] && [llength $vlist]>0} {
+ append unionDef "$line\n"
+ append afterUnion $line\n
} else {
break
}
diff --git a/lang/sql/sqlite/tool/warnings-clang.sh b/lang/sql/sqlite/tool/warnings-clang.sh
new file mode 100644
index 00000000..7a0aa4bc
--- /dev/null
+++ b/lang/sql/sqlite/tool/warnings-clang.sh
@@ -0,0 +1,14 @@
+#/bin/sh
+#
+# Run this script in a directory with a working makefile to check for
+# compiler warnings in SQLite.
+#
+rm -f sqlite3.c
+make sqlite3.c
+echo '************* FTS4 and RTREE ****************'
+scan-build gcc -c -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
+ -DSQLITE_DEBUG -DSQLITE_ENABLE_STAT3 sqlite3.c 2>&1 | grep -v 'ANALYZE:'
+echo '********** ENABLE_STAT3. THREADSAFE=0 *******'
+scan-build gcc -c -I. -DSQLITE_ENABLE_STAT3 -DSQLITE_THREADSAFE=0 \
+ -DSQLITE_DEBUG \
+ sqlite3.c ../sqlite/src/shell.c -ldl 2>&1 | grep -v 'ANALYZE:'
diff --git a/lang/sql/sqlite/tool/warnings.sh b/lang/sql/sqlite/tool/warnings.sh
new file mode 100644
index 00000000..246bccbe
--- /dev/null
+++ b/lang/sql/sqlite/tool/warnings.sh
@@ -0,0 +1,19 @@
+#/bin/sh
+#
+# Run this script in a directory with a working makefile to check for
+# compiler warnings in SQLite.
+#
+rm -f sqlite3.c
+make sqlite3.c-debug
+echo '********** No optimizations. Includes FTS4 and RTREE *********'
+gcc -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
+ -ansi -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
+ sqlite3.c
+echo '********** No optimizations. ENABLE_STAT4. THREADSAFE=0 *******'
+gcc -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
+ -ansi -DSQLITE_ENABLE_STAT4 -DSQLITE_THREADSAFE=0 \
+ sqlite3.c
+echo '********** Optimized -O3. Includes FTS4 and RTREE ************'
+gcc -O3 -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
+ -ansi -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
+ sqlite3.c
diff --git a/lang/sql/sqlite/tool/win/sqlite.vsix b/lang/sql/sqlite/tool/win/sqlite.vsix
new file mode 100644
index 00000000..ac4afb3f
--- /dev/null
+++ b/lang/sql/sqlite/tool/win/sqlite.vsix
Binary files differ