drop table if exists t; create virtual table t using fts2; insert into t (content) values ('This is a test'); insert into t (content) values ('This is a string'); insert into t (content) values ('That was a test'); insert into t (content) values ('A random string'); select content from t where t MATCH 'test'; delete from t where content = 'This is a string'; vacuum; select content from t where t MATCH 'test';
The first select outputs 'This is a test' and 'That was a test'. The second select outputs 'This is a test', and 'A random string'. ---- _2007-Jul-17 17:27:21 by anonymous:_ {linebreak} This patch seems to address the FTS2 VACUUM problem and passes all fts2 tests. It adds an INTEGER PRIMARY KEY docid column to the hidden %_content table. Note: this new table format is not backwards compatible with existing FTS2 databases. -Joe Wilson Index: ext/fts2/fts2.c =================================================================== RCS file: /sqlite/sqlite/ext/fts2/fts2.c,v retrieving revision 1.40 diff -u -3 -p -r1.40 fts2.c --- ext/fts2/fts2.c 2 Jul 2007 10:16:50 -0000 1.40 +++ ext/fts2/fts2.c 17 Jul 2007 17:19:49 -0000 @@ -1769,9 +1769,9 @@ typedef enum fulltext_statement { */ static const char *const fulltext_zStatement[MAX_STMT] = { /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ - /* CONTENT_SELECT */ "select * from %_content where rowid = ?", + /* CONTENT_SELECT */ "select * from %_content where docid = ?", /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ - /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + /* CONTENT_DELETE */ "delete from %_content where docid = ?", /* BLOCK_INSERT */ "insert into %_segments values (?)", /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", @@ -1860,14 +1860,14 @@ static struct fulltext_vtab *cursor_vtab static const sqlite3_module fts2Module; /* forward declaration */ /* Return a dynamically generated statement of the form - * insert into %_content (rowid, ...) values (?, ...) + * insert into %_content (docid, ...) values (?, ...) */ static const char *contentInsertStatement(fulltext_vtab *v){ StringBuffer sb; int i; initStringBuffer(&sb); - append(&sb, "insert into %_content (rowid, "); + append(&sb, "insert into %_content (docid, "); appendList(&sb, v->nColumn, v->azContentColumn); append(&sb, ") values (?"); for(i=0; inColumn; ++i) @@ -1878,7 +1878,7 @@ static const char *contentInsertStatemen /* Return a dynamically generated statement of the form * update %_content set [col_0] = ?, [col_1] = ?, ... - * where rowid = ? + * where docid = ? */ static const char *contentUpdateStatement(fulltext_vtab *v){ StringBuffer sb; @@ -1893,7 +1893,7 @@ static const char *contentUpdateStatemen append(&sb, v->azContentColumn[i]); append(&sb, " = ?"); } - append(&sb, " where rowid = ?"); + append(&sb, " where docid = ?"); return stringBufferData(&sb); } @@ -2027,15 +2027,15 @@ static int sql_step_leaf_statement(fullt return rc; } -/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ -static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, +/* insert into %_content (docid, ...) values ([docid], [pValues]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *docid, sqlite3_value **pValues){ sqlite3_stmt *s; int i; int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); if( rc!=SQLITE_OK ) return rc; - rc = sqlite3_bind_value(s, 1, rowid); + rc = sqlite3_bind_value(s, 1, docid); if( rc!=SQLITE_OK ) return rc; for(i=0; inColumn; ++i){ @@ -2047,7 +2047,7 @@ static int content_insert(fulltext_vtab } /* update %_content set col0 = pValues[0], col1 = pValues[1], ... - * where rowid = [iRowid] */ + * where docid = [iRowid] */ static int content_update(fulltext_vtab *v, sqlite3_value **pValues, sqlite_int64 iRowid){ sqlite3_stmt *s; @@ -2075,7 +2075,7 @@ static void freeStringArray(int nString, free((void *) pString); } -/* select * from %_content where rowid = [iRow] +/* select * from %_content where docid = [iRow] * The caller must delete the returned array and all strings in it. * null fields will be NULL in the returned array. * @@ -2101,10 +2101,10 @@ static int content_select(fulltext_vtab values = (const char **) malloc(v->nColumn * sizeof(const char *)); for(i=0; inColumn; ++i){ - if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + if( sqlite3_column_type(s, i+1)==SQLITE_NULL ){ values[i] = NULL; }else{ - values[i] = string_dup((char*)sqlite3_column_text(s, i)); + values[i] = string_dup((char*)sqlite3_column_text(s, i+1)); } } @@ -2120,7 +2120,7 @@ static int content_select(fulltext_vtab return rc; } -/* delete from %_content where rowid = [iRow ] */ +/* delete from %_content where docid = [iRow ] */ static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ sqlite3_stmt *s; int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); @@ -2870,7 +2870,7 @@ static int fulltextCreate(sqlite3 *db, v if( rc!=SQLITE_OK ) return rc; initStringBuffer(&schema); - append(&schema, "CREATE TABLE %_content("); + append(&schema, "CREATE TABLE %_content(docid INTEGER PRIMARY KEY, "); appendList(&schema, spec.nColumn, spec.azContentColumn); append(&schema, ")"); rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); @@ -3731,8 +3731,8 @@ static int fulltextFilter( TRACE(("FTS2 Filter %p\n",pCursor)); - zSql = sqlite3_mprintf("select rowid, * from %%_content %s", - idxNum==QUERY_GENERIC ? "" : "where rowid=?"); + zSql = sqlite3_mprintf("select * from %%_content %s", + idxNum==QUERY_GENERIC ? "" : "where docid=?"); sqlite3_finalize(c->pStmt); rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); sqlite3_free(zSql);
---- _2007-Jul-18 00:13:56 by shess:_ {linebreak} BTW, AFAICT this only happens for sqlite3.4. Older versions don't seem to have the problem. ---- _2007-Jul-18 01:31:49 by anonymous:_ {linebreak} The rowid changing after VACUUM predates 3.4.0... SQLite version 3.3.7 Enter ".help" for instructions sqlite> CREATE TABLE t(a); sqlite> INSERT INTO "t" VALUES('one'); sqlite> INSERT INTO "t" VALUES('two'); sqlite> INSERT INTO "t" VALUES('three'); sqlite> select rowid, * from t; 1|one 2|two 3|three sqlite> delete from t where a = 'one'; sqlite> select rowid, * from t; 2|two 3|three sqlite> vacuum; sqlite> select rowid, * from t; 1|two 2|three SQLite version 3.2.0 Enter ".help" for instructions sqlite> CREATE TABLE t(a); sqlite> INSERT INTO "t" VALUES('one'); sqlite> INSERT INTO "t" VALUES('two'); sqlite> INSERT INTO "t" VALUES('three'); sqlite> select rowid, * from t; 1|one 2|two 3|three sqlite> delete from t where a = 'one'; sqlite> select rowid, * from t; 2|two 3|three sqlite> vacuum; sqlite> select rowid, * from t; 1|two 2|three
---- _2007-Jul-18 15:59:24 by anonymous:_ {linebreak} As you may know, INTEGER PRIMARY KEY indexes are the ROWID, so I must supect they would change after a VACUUM. The best workaround is to put docid as INTEGER, then adding a PRIMARY KEY index for the docid column.
#f2dcdc 2503 code active 2007 Jul anonymous 2007 Jul 3 4 sqlite3PagerReleaseMemory does not decrement page count When cached pages are released by sqlite3PagerReleaseMemory the number of pages (pPager->nPage) is not decremented. This also subsequently affects the maximum value at pPager->nMaxPage. This does not affect the operation of sqlite (but does upset my statistic gathering). Although only tested with 3.3.8, the problem does not appear to have been corrected in version 3.4.0 Actually this will affect the handling of cache_size as the pager will think there are more pages cached than is the case, and may unnecessarily release some. ---- _2007-Jul-18 15:54:29 by anonymous:_ {linebreak} Do you have a simple test case that demonstrates this? Put a print statement in pager.c if necessary.
#f2dcdc 2512 code active 2007 Jul shess 2007 Jul 1 1 FTS virtual table name quoting problem All table names should be quoted in the FTS module code. with TRACE enabled in ext/fts2/fts2.c: sqlite> create virtual table "a b c" using fts2 (t); FTS2 Create FTS2 sql: CREATE TABLE main.a b c_content(c0t) SQL error: vtable constructor failed: a b c
_2007-Jul-18 06:44:21 by anonymous:_ {linebreak} A similar problem shows if a FTS column has the same name as the FTS table: CREATE VIRTUAL TABLE a USING fts2 (a); Returns "vtable constructor failed: a.".
#f2dcdc 2511 code active 2007 Jul anonymous 2007 Jul drh 3 2 Inconsistent Pragma output Pragma output is inconsistent when setting the value. Most do not generate any output and silently set the value, while others generate a singleton row with the set value. Here is a list of pragmas that generate output while setting the values: sqlite> PRAGMA locking_mode = NORMAL; normal sqlite> PRAGMA max_page_count = 100000; 100000 The following do not generate any output upon query: PRAGMA case_sensitive_like; PRAGMA incremental_vacuum; Sqlite was built from almagamation using the following configuration flags: --enable-threadsafe --disable-tcl --enable-tempstore
#f2dcdc 2509 code active 2007 Jul anonymous 2007 Jul 1 1 SQLITE_DATE SELECT CAST(MyDate AS DATE), CAST(MyTime AS TIME) FROM MyData I hope, it will result/return DATE, TIME. Please support to SQLITE_DATE and SQLITE_TIME. Thanks.
#f2dcdc 2498 code active 2007 Jul anonymous 2007 Jul 3 2 sqlite memory org on linux (related ticket #2473)... he sample programme that I run(wrote) in tty1 and there I operate the command of ps at tty2, there seems two items from the programme of ps command. This error was not at the version 3.3.13 but now it is happening at sqlite versions although i change nothing from the programme, If I turn to old versions, there is seen only one item again. When I upgrade to version 3.3.13 or later, there is seen two items again Is it normal or there is any mistake? (excuse my poor english) _2007-Jul-11 16:44:22 by anonymous:_ {linebreak} So you are seeing 2 processes instead of 1 on Linux? Linux 2.4 and earlier kernels show threads as seperate processes with unique process IDs. Is your program creating any threads? The only place where SQLite creates threads is the function below - but it joins with the thread right away. /* ** This procedure attempts to determine whether or not threads ** can override each others locks then sets the ** threadsOverrideEachOthersLocks variable appropriately. */ static void testThreadLockingBehavior(int fd_orig){ int fd; struct threadTestData d[2]; pthread_t t[2]; fd = dup(fd_orig); if( fd<0 ) return; memset(d, 0, sizeof(d)); d[0].fd = fd; d[0].lock.l_type = F_RDLCK; d[0].lock.l_len = 1; d[0].lock.l_start = 0; d[0].lock.l_whence = SEEK_SET; d[1] = d[0]; d[1].lock.l_type = F_WRLCK; pthread_create(&t[0], 0, threadLockingTest, &d[0]); pthread_create(&t[1], 0, threadLockingTest, &d[1]); pthread_join(t[0], 0); pthread_join(t[1], 0); close(fd); threadsOverrideEachOthersLocks = d[0].result==0 && d[1].result==0; }
If you post a small C program demonstrating what you're seeing, someone may be able to offer a suggestion. ---- _2007-Jul-11 16:47:10 by anonymous:_ {linebreak} I suppose it's not inconceivable that the join failed. Perhaps these pthread_join calls' return codes should be examined for errors. ---- _2007-Jul-11 18:53:36 by anonymous:_ {linebreak} If you're playing games with tty's and you've got an early Linux 2.6 kernel, it's possible that processes are dying because of http://lkml.org/lkml/2004/10/21/119. It was, last I checked, fixed in 2.6.10. The SIGHUP being generated might also interfer with a =pthread_join()=, although =pthread_join()= doesn't say anything about ever generating =EINTR=... c. ---- _2007-Jul-12 06:12:28 by anonymous:_ {linebreak} my example program is very simple, i not use threading-multithreading structure... If I turn to old versions of sqlite, there is seen only one item again, when I upgrade to version 3.3.13 or later, there is seen two items again Is it. note: /lib/libpthread.so.0 linked to /lib/libpthread-0.10.so (size 55468 byte) ---- _2007-Jul-12 11:36:37 by anonymous:_ {linebreak} Your description of the problem isn't clear enough, so the answers you're getting are just guesses. You may have more luck by describing the problem (with as much detail as possible) in your native language and hoping someone in the SQLite community can add a translation. I know you're doing your best with the english you speak, but it's not working well enough for someone to help with your problem. Adding code samples and command-line output would also help considerably, since that sort of this is mostly language independent.
#f2dcdc 2496 code active 2007 Jul anonymous 2007 Jul 5 4 "No such column" error should include table information It'd be nice if the "no such column" error included the table/view that SQLite was searching for the column. no such column: ChecklistID Thanks, Sam
#f2dcdc 2491 code active 2007 Jul anonymous 2007 Jul 1 1 Mingw Warnings w/ 3.4.0 Amalgamation When compiling the 3.4.0 amalgamation sqlite3.c file w/ no defines, you get the following warnings: sqlite3/sqlite3.c: In function `sqlite3BtreeFindCell':{linebreak} sqlite3/sqlite3.c:23249: warning: unused variable `data'{linebreak} sqlite3/sqlite3.c: In function `vxprintf':{linebreak} sqlite3/sqlite3.c:8488: warning: 'xtype' might be used uninitialized in this function{linebreak} sqlite3/sqlite3.c: In function `sqlite3BtreeOpen':{linebreak} sqlite3/sqlite3.c:19488: warning: 'nameLen' might be used uninitialized in this function{linebreak} sqlite3/sqlite3.c: In function `getOverflowPage':{linebreak} sqlite3/sqlite3.c:25386: warning: 'rc' might be used uninitialized in this function{linebreak} sqlite3/sqlite3.c: In function `sqlite3Select':{linebreak} sqlite3/sqlite3.c:56300: warning: 'pEList' might be used uninitialized in this function{linebreak} sqlite3/sqlite3.c:56301: warning: 'pTabList' might be used uninitialized in this function{linebreak} sqlite3/sqlite3.c: At top level:{linebreak} sqlite3/sqlite3.c:16020: warning: 'sqlite3GenericAllocationSize' defined but not used{linebreak} sqlite3/sqlite3.c:6188: warning: 'sqlite3Utf16Substr' declared `static' but never defined{linebreak} sqlite3/sqlite3.c:6307: warning: 'sqlite3Get2byte' declared `static' but never defined{linebreak} sqlite3/sqlite3.c:6309: warning: 'sqlite3Put2byte' declared `static' but never defined{linebreak} sqlite3/sqlite3.c:23248: warning: 'sqlite3BtreeFindCell' defined but not used{linebreak} sqlite3/sqlite3.c:63547: warning: 'sqlite3ParserAlloc' defined but not used{linebreak} sqlite3/sqlite3.c:63673: warning: 'sqlite3ParserFree' defined but not used{linebreak} sqlite3/sqlite3.c:65286: warning: 'sqlite3Parser' defined but not used{linebreak} I know the uninitialized warnings are false warnings but the defined functions that aren't used seem to be an error in building the amalgamation.
#f2dcdc 2487 code active 2007 Jul anonymous 2007 Jul 1 1 SQLite database locked error on NFS mounted home dir I have a c program using the provided API. My home directory is NFS mounted, Im using SQLite 3.3.17. I open a new database using "sqlite3_open", then strcpy () a SQL command to create a table, and run "sqlite3_exec" with this string. I get a return code of 5=database locked. I then tried to manually (command line using sqlite3) create a table within a database in my home dir, that fails too. =========== x@y> sqlite3 db2 SQLite version 3.3.17 Enter ".help" for instructions sqlite> create table test (Lastname varchar); SQL error: database is locked sqlite> ============== If I try this on my local machine (a Mac), it works fine, but I need it to work in my home directory mounted via NFS as that is where the output of our program goes _2007-Jul-06 19:04:15 by anonymous:_ {linebreak} If you're using a Mac, compile sqlite with SQLITE_ENABLE_LOCKING_STYLE in os_unix.c ---- _2007-Jul-07 11:51:10 by drh:_ {linebreak} This is a problem with your NFS implementation - it does not appear to support posix advisory locking. There is nothing much that SQLite can do about this. Anonymous above suggests making use of the dot-locking mechanism contributed by Apple. This might be an effective work-around. But remember that there is performance impact. Also remember that an SQLite database that uses dot-locking is subtly imcompatible with a standard SQLite database. The file format itself is the same, but if two processes try to access the database file at the same time and one uses dot-locks and the other uses posix advisory locks, you will end up with corruption. ---- _2007-Jul-07 12:44:09 by anonymous:_ {linebreak} It's very odd that Apple does not fix their Mac OSX POSIX locks for NFS given their resources.
#e8e8bd 2484 new active 2007 Jul anonymous 2007 Jul 5 4 Support for RETURNING I was recently trying to get HTSQL (http://htsql.org) to work with SQLite, especially since it'd be nice to work out-of-the-box with Python. One of the hiccups was the lack of a RETURNING clause, this is especially important once you have auto-incremented keys. For example.. INSERT INTO TABLE some_table (a_column) values ('value') RETURNING (serial_column); This acts like a SELECT following the INSERT returning the requested columns on the affected rows. It is quite helpful for cases like UPDATE or DELETE when more than one row is affected. While this feature isn't critical for SQLite, it reduces client-side code significantly. Thank you for your kind consideration.
#e8e8bd 2377 build active 2007 May pweilbacher 2007 Jul 4 3 Allow easy DLL build on OS/2 =Makefile.in= contains a target to build a DLL on Windows but unfortunately it doesn't work for OS/2. Current GCC versions use a calling convention that prepends underscores and these need to go into the =.def= file. To make a nice DLL header some extra lines in the =.def= file would be nice, too, that are probably incompatible with Windows linkers. Finally, to make the DLL usable we need to create an import library =.lib=. As a nice-to-have feature I would like the DLL to be named after the VERSION but without the dot, as I expect DLLs from version 3.0.x to be imcompatible with 3.3.x or other future 3.x versions... I don't know a clever way to do that other than introducing a new variable into configure.ac and Makefile.in. _2007-Jul-03 23:42:20 by pweilbacher:_ {linebreak} SQLite releases for OS/2 are now built from the amalgamation, so this is only useful for checks of the CVS code between releases. Not sure if it still makes sense to check this in, but at least the patch can stay attached to the ticket for possible future reference.
#e8e8bd 1126 new active 2005 Feb anonymous Unknown 2007 Jan drh 2 3 sqlite 2.8.16 port to djgpp here is a diff to be applied on sqlite 2.8.16 to make it work with djgpp. some of the fixes are needed for general purpose, such as relative path handling, and bypass of history and readline wherever not functional. dear drh, please apply this patch to mainstream sqlite. waiver of copyright in the patch itself. best regards, alex _2005-Feb-16 14:14:56 by drh:_ {linebreak} I applied these patches. But then the regression tests fail under unix. The patches much have broken something. No time to fix it now.... ---- _2005-Feb-17 11:46:57 by anonymous:_ {linebreak} thanks for your time. i will try to compile on linux and compare results. ---- _2005-Oct-11 14:58:05 by anonymous:_ {linebreak} i have to appologize for the long time it took me to get to it. i have found the flaw in the patch that made the fulltest fail on unix. now, that tests pass, please incorporate the diff in mainstream. i will fix the ports for sqlite3 too. ---- _2007-Jan-31 01:33:52 by anonymous:_ {linebreak} it seems someone has accidentally changed the diff i've provided for an invalid binary file.
#f2dcdc 2203 code active 2007 Jan anonymous 2007 Jan 2 3 table_info pragma "default" column format changed? Beginning with SQLite3 3.3.8, it looks like the format of the 'default' value returned by the table_info pragma has changed. Before, it used to be a bare string: dev:~> sqlite3 SQLite version 3.3.7 Enter ".help" for instructions sqlite> create table testings (a integer primary key, b string default 'Tester', c string default NULL); sqlite> pragma table_info(testings); 0|a|integer|0||1 1|b|string|0|Tester|0 2|c|string|0||0 After 3.3.8, the 'defaults' column is now a SQL-quoted string: dev:~> sqlite3 SQLite version 3.3.11 Enter ".help" for instructions sqlite> create table testings (a integer primary key, b string default 'Tester', c string default NULL); sqlite> pragma table_info(testings); 0|a|integer|0||1 1|b|string|0|'Tester'|0 2|c|string|0|NULL|0 Now, I think I do prefer the latter, where the default is a SQL-quoted string. However, this seems a rather significant change to make mid-stream, in a minor point release. It broke all Ruby on Rails applications that use sqlite3, for instance, because Rails reads that default value to determine how to default the value of each new record. Was this intentional? Or is this a bug? I'd love to see this behavior reverted and saved for a release with a more significant release number. _2007-Jan-29 22:01:54 by anonymous:_ {linebreak} One of your fellow Railers requested this change: Ticket #2078 ---- _2007-Jan-29 22:10:55 by drh:_ {linebreak} See also ticket #1919 which might also be an issue here. ---- _2007-Jan-29 22:33:19 by anonymous:_ {linebreak} Anonymous, you make it sound as if anyone associated with Rails can make a request of the sqlite3 team and have it be automatically assumed to be sanctioned by the Rails core team. Whoever did the original request did not do so under the umbrella of Rails core. If that change was the one that resulted in this behavior, it most definitely should not have been recommended, and would not have been blessed by any of the core team. At this point, though, I'm not interested in blame. I just want to see what can be done to make sqlite3 work with Rails again, preferably in a way that is backwards compatible with previous sqlite3 releases. ---- _2007-Jan-30 05:20:56 by anonymous:_ {linebreak} I agree the feature should be fixed due to backwards compatability, but Rails should try to accomodate both pragma variants since they are both "in the wild". You could base your decision on the sqlite version string, for instance. ---- _2007-Jan-30 18:39:44 by anonymous:_ {linebreak} Just FYI, there's another related ticket at the Rails trac at http://dev.rubyonrails.org/ticket/6523, and a Debian bug report with a patch at http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=397531.
#e8e8bd 2206 new active 2007 Jan anonymous 2007 Jan 5 4 Support for foreign key constraints in virtual tables Please consider supporting parsing of foreign key constraints in the _CREATE TABLE_ SQL text passed to _sqlite3_declare_vtab()_. Rationale: when a virtual table is used to implement something like a _JOIN_ or _VIEW_ on table(s) of the master SQLite database, _PRAGMA foreign_key_list()_ on the virtual table(s) can provide information about key relationship between the virtual table(s) and the table(s) in the master SQLite database. I've attached a patch which implements this feature, however I'm unsure about possible side effects.
#e8e8bd 2205 build active 2007 Jan anonymous 2007 Jan anonymous 1 1 Problem while using with tcl on ARM I am using the sqlite-3.3.12. I have compiled this version for ARM and mandrake linux. On PC it is working fine. But on the Hand Held device with tcl, it produce error after creating the database file that "database disk image is malformed" while executing query for creating table. Another problem is that on executing sqlite3 executable on PC it shows version 3.3.11 But on executing sqlite3 executable on hand held it shows version 3.3.12 though these both executables were compiled from same source that is sqlite 3.3.12.
#e8e8bd 2202 new active 2007 Jan anonymous 2007 Jan 5 4 function request: sqlite3_attach() Hello! A request for a new function, if i may: int sqlite3_attach( sqlite3 * host, sqlite3 * parasite, char const * dbname ); In case it's not obvious, this would be functionally identical to calling: ATTACH parasite as dbname; from the host database. Alternately, but probably less useful: int sqlite3_attach( sqlite3 * host, char const * parasite, char const * dbname ); to directly attach databases. This second option isn't so useful because we already have this feature via the ATTACH SQL command.
#e8e8bd 2200 new active 2007 Jan anonymous 2007 Jan 5 4 threadsafe status not reported in .pc file Some application based on sqlite (for exampe Tracker indexing and searching tool) could need the threadsafe. Currently there in no way to know the status of threadsafe of installed sqlite. It could be good add a "threadsafe" variable in sqlite3.pc like libdir=${exec_prefix}/lib includedir=${prefix}/include threadsafe=yes that developers could query using: $ pkg-config --variable=threadsafe sqlite3 yes
#e8e8bd 2199 build active 2007 Jan anonymous 2007 Jan 1 1 configure doesn't find libreadline if its in uncommon place configure doesn't find libreadline if its in uncommon place. I suggest to change configure to be able to deal with something like this: --with-readline=/path
#e8e8bd 2198 new active 2007 Jan anonymous 2007 Jan 5 4 API: opening only existing databases It would be useful to enhance the sqlite3_open function so it would take 3 parameters -- the third would control whether the database should exist for function success. In other words (the 3rd argument):{linebreak} create_or_access_existing -- works in both cases{linebreak} do_not_create_new_db -- fails if there is no such database file It is extremely useful if the user wants only read from the db. In current API she/he gets an delayed error while trying to read the table in database.
#f2dcdc 2183 code active 2007 Jan anonymous 2007 Jan drh 1 1 OMIT_SHARED_CACHE: AV and crash with FTS2 INSERT Given that SQLite is compiled with -DSQLITE_ENABLE_FTS2=1 -DSQLITE_OMIT_SHARED_CACHE=1 the following code crashes after about 273 insertions with Access violation: Read of address 0x00000014 at btree.c, line 3451: if( pCur->idx>=pPage->nCell ){ Here is the code to reproduce: int main(int argc, char* argv[]) { sqlite3_stmt *pStmt; int i; check( sqlite3_open( "test_fts2.db3", &db) ); check( sqlite3_exec( db, "CREATE VIRTUAL TABLE FTS USING FTS2 (Content);", 0, 0, 0)); check( sqlite3_exec( db, "BEGIN TRANSACTION;", 0, 0, 0)); check( sqlite3_prepare( db, "INSERT INTO FTS (Content) VALUES ('Far out in the uncharted backwaters of the unfashionable end of the Western Spiral arm of the Galaxy lies a small unregarded yellow sun.');", -1, &pStmt, NULL)); for( i = 1; i < 1000; i++) { printf( "%d\n", i); check( sqlite3_step( pStmt) ); check( sqlite3_reset( pStmt) ); } check( sqlite3_exec( db, "COMMIT;", 0, 0, 0)); check( sqlite3_finalize( pStmt )); check( sqlite3_close( db )); printf ("Done"); scanf ("*%s"); return 0; } Could this be related to ticket #2032?
#e8e8bd 2188 doc active 2007 Jan anonymous 2007 Jan 5 4 Doc bug in src/vdbe.c, should s/P1/P2/ in NotFound # diff --- src/vdbe.c 2007-01-10 01:01:14.000000000 +1100 +++ src/vdbe_new.c 2007-01-24 16:34:38.139376872 +1100 @@ -2923,7 +2923,7 @@ ** ** The top of the stack holds a blob constructed by MakeRecord. P1 is ** an index. If no entry exists in P1 that matches the blob then jump -** to P1. If an entry does existing, fall through. The cursor is left +** to P2. If an entry does existing, fall through. The cursor is left ** pointing to the entry that matches. The blob is popped from the stack. ** ** The difference between this operation and Distinct is that
#e8e8bd 2187 todo active 2007 Jan anonymous 2007 Jan 3 4 RAISE in trigger not being caught by CONFLICT clause in calling SQL When an exception is raised within a trigger, the on conflict clause on the calling SQL statement is not being invoked. Note: The CASE statement in example is to demonstrate the error, a CHECK Constraint would have been more appropriate if this were a real world senario. Sample SQL Below: ------------------------------------- CREATE TABLE abc (a);{linebreak} CREATE TRIGGER abc_insert BEFORE INSERT ON abc BEGIN{linebreak} SELECT CASE WHEN (new.a > 2) THEN{linebreak} RAISE(ABORT, 'error here'){linebreak} END;{linebreak} END;{linebreak} BEGIN;{linebreak} INSERT INTO abc VALUES (1);{linebreak} -- This should raise error but not rollback{linebreak} INSERT INTO abc VALUES (4);{linebreak} -- This should raise error and rollback{linebreak} INSERT OR ROLLBACK INTO abc VALUES (4);{linebreak} -- Check to see if ROLLBACK performed (which it hasn't){linebreak} SELECT * FROM abc;{linebreak}
#e8e8bd 2185 new active 2007 Jan anonymous 2007 Jan 5 3 API access to opened database pathname - helpful for virtual tables It would be helpful if there was an api to retrieve the pathname (or :memory:) to the opened database. I am implementing a virtual table and would like to open subsequent virtual table (flat files in the filesystem) in the same location that the DB was opened.
#f2dcdc 2182 code active 2007 Jan anonymous 2007 Jan 4 4 sqlite3BtreeGetMeta does not check the file format =sqlite3BtreeGetMeta= reads database page 1 directly from the pager layer, skipping all of the format checks in =lockBtree=. Thus, if the very first query you do against a database is "=PRAGMA user_version=" and the database isn't valid (in particular, if it is an sqlite 2 database) you will get a garbage result rather than an =SQLITE_NOTADB= or =SQLITE_CORRUPT= error. Demonstration: $ touch bug1.db # empty file $ dd if=/dev/zero of=bug2.db bs=100 count=1 # file header all zeroes $ sqlite2 bug3.db 'create table a(b);' # old-format database $ sqlite3 bug1.db 'pragma user_version' ; echo $? 0 0 $ sqlite3 bug2.db 'pragma user_version' ; echo $? 0 0 $ sqlite3 bug3.db 'pragma user_version' ; echo $? 1795162112 0 Contrast the sensible behavior if you do a =SELECT=: $ sqlite3 bug1.db 'select * from a'; echo $? SQL error: no such table: a 1 $ sqlite3 bug2.db 'select * from a'; echo $? SQL error: file is encrypted or is not a database 1 $ sqlite3 bug3.db 'select * from a'; echo $? SQL error: file is encrypted or is not a database 1 _2007-Jan-21 22:05:14 by anonymous:_ {linebreak} (Submitter:) Is there a reason why the file format check doesn't happen in sqlite3_open?
#e8e8bd 2181 new active 2007 Jan anonymous 2007 Jan 5 4 Generalize ON CONFLICT to failure trapping for all SQL statements (This is a more general alternative to the feature request in #2180.) It would be nice if clauses similar to ON CONFLICT were available for all supported statements, to specify error recovery behavior in all cases. For instance, one would like to be able to write things like SELECT ... FROM foo ON ABSENT IGNORE; with the effect that if there is no table 'foo', the SELECT simply returns zero rows. I am not sure what the complete set of conditions to recover from would be, but I think that ABSENT (table or column missing), EXISTS (something you're trying to create already exists), and NONEMPTY (to avoid deleting data unintentionally, per #2180) should cover most cases.
#e8e8bd 2180 new active 2007 Jan anonymous 2007 Jan 5 4 feature request: DROP TABLE IF [EXISTS AND] EMPTY It would be useful to have a straightforward way to drop a table only if it contains no rows. Currently it is necessary to do this in application logic, by doing a dummy SELECT to find out if there's any data in the table (e.g. "SELECT 1 FROM table LIMIT 1" - this is the most efficient such query I can find). And of course one has to take care to handle errors in that SELECT (e.g. if the table doesn't exist). I suggest DROP TABLE IF EMPTY, by analogy with the existing DROP TABLE IF EXISTS. Naturally, one would like to be able to combine the two, to drop an empty table but do nothing if the table exists or isn't empty. _2007-Jan-26 22:59:50 by anonymous:_ {linebreak} To comment on that last statement: "I suggest DROP TABLE IF EMPTY, by analogy with the existing DROP TABLE IF EXISTS. Naturally, one would like to be able to combine the two, to drop an empty table but do nothing if the table exists or isn't empty." That sounds redundant: DROP TABLE IF EMPTY will not drop a table if it has any data, nor if the table does not exist. The question comes to mind, "when is there an error?" Is DROP TABLE IF EMPTY an error if the table is not empty? Or would we need DROP TABLE IF EXISTS AND IF EMPTY to ensure that we have clear success/failure paths in the case that we do DROP TABLE IF EMPTY for a table which we're not sure is there or not. If sounds like "IF EMPTY" should automatically imply "AND IF EXISTS", because a prerequisite of being empty is that the table has to exist. IMO, a statement like DROP TABLE IF EMPTY is 100% sugar and should not produce any error code, similarly to DROP TABLE IF EXISTS (which does not produce an error if the table DOES exist, though we could rightfully argue that it should raise an error because its condition is not met). ----- sgb
#e8e8bd 2167 new active 2007 Jan anonymous 2007 Jan 1 3 add sqlite3_copy_bindings (parallel to sqlite3_transfer_bindings) sqlite3_transfer_bindings( from, to ) does not leave the 'from' stmt in a usable mode. If I want to create a separate, independent copy of an sqlite3_stmt, I have to replicate the bindings. I have created a modified version of sqlite3_transfer_bindings() which simply replaced sqlite3VdbeMemMove() with sqlite3VdbeMemCopy(), and named the function sqlite3_copy_bindings. I'm not an expert in Sqlite internals so I can't tell if there any issues with this.
#f2dcdc 2165 code active 2007 Jan anonymous 2007 Jan drh 4 3 pager performance and checksum pager.c Embedd the 2 byte pager_pagehash() result into the page, near the beginning of the page. Use the intire page to calculate the pager_pagehash exclusive of the two byte page_hash data embedded in the page. That way a simple xor as in CHECK_PAGE of the entire page including the 2 byte pager_pagehash is all that is needed to validate a page. Also you could include the "4 byte" random at the beginning and at the end... But that would be a bit of overkill. The sampling of only every 200 bytes is interesting. Review change the SQLITE_CHECK_PAGES ifdef to a SQLITE_OMIT_PAGE_CHECK.. As on disk page validity is very important. Could this be integrated into a pragma setting? _2007-Jan-12 18:08:49 by anonymous:_ {linebreak} uint16_t pg_chkval(Dpage *pg, uint32_t pg_size) { register int i; register uint16_t val = 0; register uint16_t *bw = (uint16_t *) pg; for(i= 0; i < pg_size;i=i+2 ) val= val^ *bw++ ; return val; } uint16_t pg_calcval(Dpage *pg, uint32_t page_size) { int i; register uint16_t val = 0; register uint16_t *bw = (uint16_t *) pg; /* Scan up to location where chk val is stored */ for(i= 0; i < 8;i=i+2 ) val= val^ *bw++ ; val = val^ 0; bw ++ ; /* Now scan the tail of the block */ for(i=10; i < page_size;i=i+2 ) val= val^ *bw++ ; return val; }
#e8e8bd 1722 new active 2006 Mar anonymous Unknown 2007 Jan 4 2 agregate sum() of strings i'd like to have something like sum() i agregate functions but to work with strings. I'd like to that that function would concate strings similar to summing in sum()e.g: SELECT sum(name || ',')FROM names GROUP BY ..... etc... :) I've heard that something like this is in postgresql?
#e8e8bd 2148 doc active 2007 Jan anonymous 2007 Jan 5 5 3.3.9 changes.html addition - Fixed the ".dump" command in the command-line shell to show triggers and views again. + Fixed the ".dump" command in the command-line shell to show indexes, triggers and views again.
#f2dcdc 2140 code active 2007 Jan anonymous 2007 Jan 3 1 sqlite doesn't link to readline sqlite relies on another library to link to libreadline, causing this error with LDFLAGS=-Wl,--as-needed: gcc -O2 -march=i686 -pipe -DOS_UNIX=1 -DHAVE_USLEEP=1 -DHAVE_FDATASYNC=1 -I. -I./src -DNDEBUG -DTHREADSAFE=1 -DSQLITE_THREAD_OVERRIDE_LOCK=-1 -DSQLITE_OMIT_CURSOR -DHAVE_READLINE=1 -I/usr/include/readline -o .libs/sqlite3 ./src/shell.c ./.libs/libsqlite3.so -lpthread -lncurses /tmp/cclOD1M7.o: In function `process_input': shell.c:(.text+0x37a5): undefined reference to `readline' shell.c:(.text+0x37c0): undefined reference to `add_history' /tmp/cclOD1M7.o: In function `main': shell.c:(.text+0x3f01): undefined reference to `read_history' shell.c:(.text+0x3f1a): undefined reference to `stifle_history' shell.c:(.text+0x3f22): undefined reference to `write_history' collect2: ld returned 1 exit status
#e8e8bd 2254 new active 2007 Feb anonymous 2007 Feb 5 4 ATTACH support IF NOT ATTACHED statement It'd be nice if ATTACH supported an IF NOT ATTACHED option as in ATTACH IF NOT ATTACHED 'C:\db\log.dat' AS Logs so there'd be no harm in issuing an attach statement multiple times (and no need to query the database list to see if a database is already attached).
#e8e8bd 2247 doc active 2007 Feb anonymous 2007 Feb 3 3 documentation of DEFUALT cluase in CREATE TABLE should be fixed The documentation for the CREATE TABLE statement at http://www.sqlite.org/lang_createtable.html shows the syntax of the DEFAULT clause as DEFAULT value and the description says that value can be NULL, a string constant, a number, or one of three keyword values; CURRENT_DATE, CURRENT_TIME, or CURRENT_TIMESTAMP. This is incorrect since the DEFAULT clause also allows value to be an expression in brackets. The syntax should be changed to something like DEFAULT default_value default_value := value | ( expression ) and the description should say that some expressions are allowed. In particular functions can be used as the expression. It should also be clarified when the DEFAULT clause functions are evaluated, at the time the create statement executes, or at the time a record is added to the table. Note that not all expressions are allowed. In particular the ( select_statement ) form produces an error saying the default value is not constant, even though the apparently non-constant function random() is accepted. sqlite> create table t2(id, b default ((select avg(a) from t1))); SQL error: default value of column [b] is not constant sqlite> create table t2(id, b default (random())); sqlite> Tests using a default function julianday('now') as a default show that the function is evaluated at the time the record is inserted. If that is the case, why can the select expression above not be evaluated each time a record is inserted to generate a default value?
#e8e8bd 2246 doc active 2007 Feb anonymous 2007 Feb 5 4 SQL docs for CREATE TABLE should include FOREIGN KEY syntax The documentation on supported SQL for CREATE TABLE should include syntax for foreign keys since foreign keys are parsed. Of course it should say that the foreign keys are not enforced, but since they are parsed having the syntax in the documentation would be appropriate. http://sqlite.org/lang_createtable.html Thanks, Sam
#f2dcdc 2075 code active 2006 Nov anonymous 2007 Feb 3 3 Improve VACUUM speed and INDEX page locality In testing several 100 Meg - 1 Gig databases (including the Monotone OpenEmbedded database) I found that changing the order of the SQL commands executed by VACUUM to create indexes after table inserts results in 15% faster VACUUM times, and up to 25% faster cold-file-cache queries when indexes are used. This patch effectively makes the pages of each index contiguous in the database file after a VACUUM, as opposed to being scattered throughout the pages of the table related to the index. Your results may vary, but I think this is a very safe change that can potentially boost average database performance. Index: src/vacuum.c =================================================================== RCS file: /sqlite/sqlite/src/vacuum.c,v retrieving revision 1.65 diff -u -3 -p -r1.65 vacuum.c --- src/vacuum.c 18 Nov 2006 20:20:22 -0000 1.65 +++ src/vacuum.c 20 Nov 2006 21:09:27 -0000 @@ -143,14 +143,6 @@ int sqlite3RunVacuum(char **pzErrMsg, sq " AND rootpage>0" ); if( rc!=SQLITE_OK ) goto end_of_vacuum; - rc = execExecSql(db, - "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000)" - " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - rc = execExecSql(db, - "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) " - " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"); - if( rc!=SQLITE_OK ) goto end_of_vacuum; /* Loop through the tables in the main database. For each, do ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy @@ -162,10 +154,22 @@ int sqlite3RunVacuum(char **pzErrMsg, sq "FROM sqlite_master " "WHERE type = 'table' AND name!='sqlite_sequence' " " AND rootpage>0" - ); if( rc!=SQLITE_OK ) goto end_of_vacuum; + /* Create indexes after the table inserts so that their pages + ** will be contiguous resulting in (hopefully) fewer disk seeks. + */ + rc = execExecSql(db, + "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) " + " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + + rc = execExecSql(db, + "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000)" + " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + /* Copy over the sequence table */ rc = execExecSql(db, _2007-Feb-11 00:49:50 by drh:_ {linebreak} My alternative plan is to modify insert.c so that it recognizes the special case of INSERT INTO table1 SELECT * FROM table2; when table1 and table2 have identical schemas, including all the same indices. When this special case is recognized, the generated bytecode will first transfer all table entries from table2 to table1, using a row by row transfer without decoding each row into its constituient columns. Then it will do the same for each index. There will be two benefits here. First, when the above construct occurs during the course of a VACUUM, the table and each index, including intrisic indices associated with UNIQUE and PRIMARY KEY constraints, will be transferred separately so that all of there pages will be adjacent in the database file. The second benefit will occur when trying to load large quantities of data into an indexed table. Loading indexed data into a very large table is currently slow because the index entries are scattered haphazardly around in the file. But if data is first loaded into a smaller temporary table with the same schema, it can then be transferred to the main table using an INSERT statement such as the above in what amounts to a merge operation. ---- _2007-Feb-11 06:58:36 by anonymous:_ {linebreak} There's no question that your proposal will greatly improve VACUUM speed which relies on the "INSERT INTO table1 SELECT * from table2" construct. But would it be possible for you to relax the restriction on having identical indexes for table1 and table2? For that matter it would be nice if table2 could be any subselect or view. Then "REPLACE INTO table1 SELECT ...anything..." could also be optimized. Since you can detect that SQLite is doing a bulk insert anyway, it could generate code to make a temporary staging table with automatically generated identical indexes to table1 which could be periodically merged into table1 and truncated every X rows. X could be either set via pragma or be a function of the size of the page cache. The temporary staging table would be dropped after the bulk INSERT INTO ... SELECT. Every user inserting large volumes of data would have to perform this procedure anyway. Manually recreating all the indexes for a given temporary table to match the original table and performing the looping logic is cumbersome and error-prone. It would be very conveniant if SQLite were to do it on the user's behalf. This scheme could only work if there are no triggers on table1, of course. ---- _2007-Feb-11 09:16:25 by drh:_ {linebreak} My initial enhancement does nothing to preclude the more agressive enhancement described by anonymous. In order to avoid subtle bugs, and in view of my limited time available to work on this, I think it best to take the more conservative approach first and defer the more elaborate optimization suggested by anonymous until later. ---- _2007-Feb-11 13:54:34 by anonymous:_ {linebreak} It should be possible to identify contiguous blocks of individual "INSERT INTO table1 VALUES(...)" statements to the same table within a large transaction and perform the same proposed optimization as with "INSERT INTO table1 SELECT ...". This would require higher level coordination by the parser. Anytime a read operation (SELECT, UPDATE) occurs on such a table marked for bulk INSERT within the large transaction, its temp staging table could be merged into the INSERT destination table and the staging table truncated. The process could be repeated for the remainder of the transaction. Such an optimization would be a huge benefit to SQLite users since they would need not know the idiosynchracies of the implementation of "INSERT INTO table1 SELECT ..." in order to have efficient table and index population. Alternatively, if you wish to avoid the complexity of re-assembling and staging individual INSERT statements, it might be a good opportunity for SQLite to support the multi-row variant of the INSERT command: INSERT INTO table1 (a,b,c) VALUES(1,2,3), (4,5,6), (7,8,9); Which is essentially a transform of: CREATE TEMP TABLE table1_staging (a,b,c); INSERT INTO table1_staging VALUES(1,2,3); INSERT INTO table1_staging VALUES(4,5,6); INSERT INTO table1_staging VALUES(7,8,9); INSERT INTO table1 SELECT * FROM table1_staging; -- TRUNCATE OR DROP table1_staging as necessary which could use the same bulk staging optimization. ---- _2007-Feb-13 02:42:41 by anonymous:_ {linebreak} Any harm in checking in the simple patch above for the 3.3.13 release? ---- _2007-Feb-13 12:51:47 by drh:_ {linebreak} I have a much better fix standing by that I intend to check-in as soon as I get 3.3.13 out the door. I don't want this in 3.3.13 for stability reasons. ---- _2007-Feb-18 23:07:08 by anonymous:_ {linebreak} Some related analysis and an .import patch using a :memory: staging table with the "INSERT INTO table1 SELECT FROM table2" construct can be found here: http://www.mail-archive.com/sqlite-users%40sqlite.org/msg22143.html
#e8e8bd 2238 new active 2007 Feb anonymous 2007 Feb 2 3 Streams as datbase Would it be possible to allow the use of streams as a database source? _2007-Feb-18 03:56:46 by anonymous:_ {linebreak} You'll have to be more precise in what you mean by that. SQLite needs to be able to do random access to the database data (ie seek all over the place according to how it is laid out). It also needs the ability to have a journal file alongside the database which is used when writing to do a rollback, or even for readers to know that a rollback needs to be done. I am not aware of any 'streams' that meet those criteria.
#f2dcdc 2237 code active 2007 Feb anonymous 2007 Feb 4 4 Test suite regressions using Tcl 8.5 Tcl 8.5, SQLite 3.3.13 CVS printf-1.7.6... Expected: [Three integers: (1000000) ( f4240) (3641100)] Got: [Three integers: ( 1000000) ( f4240) (3641100)] printf-1.8.6... Expected: [Three integers: (999999999) (3b9ac9ff) (7346544777)] Got: [Three integers: ( 999999999) (3b9ac9ff) (7346544777)] printf-1.9.7... Expected: [Three integers: ( 0) ( 0x0) ( 0)] Got: [Three integers: ( 0) ( 0) ( 0)] tcl-1.6... Expected: [1 {syntax error in expression "x*"}] Got: [1 {invalid bareword "x" in expression "x*"; should be "$x" or "{x}" or "x(...)" or ...}]
#e8e8bd 2235 new active 2007 Feb anonymous 2007 Feb 4 3 Missing xml support in FTS2 for the snippet function The snippet function _may_ output invalid characters when used for an xml stream (like xhtml). Characters &, < and > need to be escaped (&, < >) in this context. The modification proposed is to add a boolean parameter to the snippet function to disable/enable the XML processing mode ; for example, given : =insert into poem (name, text) values ('test', 'Xml string with special < > & entities') ;= =select snippet(poem, '', '', '...', 1) from poem where text match 'xml' ;= output should be: =Xml string with special < > & entities= This modification does not affect the default behaviour of the snippet function. Patch included.
#f2dcdc 2060 code active 2006 Nov anonymous 2007 Feb 1 1 Table references enclosed in parenthesis become "invisible" Hi, I'm developing an RDF-based system, which translates queries from SPARQL into SQL. While trying to add support for SQLite (MySQL is already supported) I came across the following problem: when table references in a FROM clause are enclosed in parenthesis, they cannot be referenced from outside the parenthesized expression. For example, given the table definitions CREATE TABLE IF NOT EXISTS t1 (a, b); CREATE TABLE IF NOT EXISTS t2 (c, d); CREATE TABLE IF NOT EXISTS t3 (e, f); The following queries all fail with "no such column" errors: SELECT t1.a, t3.f FROM (t1 CROSS JOIN t2 ON t1.b = t2.c) LEFT JOIN t3 ON t2.d = t3.e; SELECT t1.a, t3.f FROM t1 CROSS JOIN (t2 LEFT JOIN t3 ON t2.d = t3.e) ON t1.b = t2.c; SELECT t1.a, t2.d FROM (t1), (t2) WHERE t1.b = t2.c; I'm not sure if it is always possible to reformulate the queries in such a way that the extra parenthesis aren't necessary, but I suspect that complex expressions involving joins may require them to achieve the intended semantics. In any case, my system would require large changes to be able to get rid of the parenthesized subjoins, so it would be nice if this problem could be fixed. :-) _2006-Nov-10 03:56:46 by anonymous:_ {linebreak} For what it's worth, here's the parse trees of two similar queries ("SELECT t1.a, t2.d FROM t1, t2 WHERE t1.b = t2.c" and "SELECT t1.a, t2.d FROM (t1), (t2) WHERE t1.b = t2.c"), as well as one of the other more complicated join queries previously listed. SELECT t1.a, t2.d FROM t1, t2 WHERE t1.b = t2.c; Select { op: TK_SELECT isResolved: 1 pSrc: { a[0]: { zName: t1 iCursor: 0 colUsed: 0x00000003 pTab: t1 jointype: JT_INNER } a[1]: { zName: t2 iCursor: 1 colUsed: 0x00000003 pTab: t2 } } pEList: { a[0]: { pExpr: { op: TK_COLUMN span: {t1.a} affinity: SQLITE_AFF_NONE iTable: 0 iColumn: 0 pTab: t1 } } a[1]: { pExpr: { op: TK_COLUMN span: {t2.d} affinity: SQLITE_AFF_NONE iTable: 1 iColumn: 1 pTab: t2 } } } pWhere: { op: TK_EQ span: {t1.b = t2.c} pLeft: { op: TK_COLUMN span: {t1.b} affinity: SQLITE_AFF_NONE iTable: 0 iColumn: 1 pTab: t1 } pRight: { op: TK_COLUMN span: {t2.c} affinity: SQLITE_AFF_NONE iTable: 1 iColumn: 0 pTab: t2 } } } SELECT t1.a, t2.d FROM (t1), (t2) WHERE t1.b = t2.c; Select { op: TK_SELECT isResolved: 1 pSrc: { a[0]: { zAlias: sqlite_subquery_5C0A10_ iCursor: 0 pTab: sqlite_subquery_5C0A10_ pSelect: { op: TK_SELECT isResolved: 1 pSrc: { a[0]: { zName: t1 iCursor: 1 colUsed: 0x00000003 pTab: t1 } } pEList: { a[0]: { zName: a pExpr: { op: TK_COLUMN token: {a} span: {a} affinity: SQLITE_AFF_NONE iTable: 1 iColumn: 0 pTab: t1 } } a[1]: { zName: b pExpr: { op: TK_COLUMN token: {b} span: {b} affinity: SQLITE_AFF_NONE iTable: 1 iColumn: 1 pTab: t1 } } } } jointype: JT_INNER } a[1]: { zAlias: sqlite_subquery_5BE4F0_ iCursor: 2 pTab: sqlite_subquery_5BE4F0_ pSelect: { op: TK_SELECT isResolved: 1 pSrc: { a[0]: { zName: t2 iCursor: 3 colUsed: 0x00000003 pTab: t2 } } pEList: { a[0]: { zName: c pExpr: { op: TK_COLUMN token: {c} span: {c} affinity: SQLITE_AFF_NONE iTable: 3 iColumn: 0 pTab: t2 } } a[1]: { zName: d pExpr: { op: TK_COLUMN token: {d} span: {d} affinity: SQLITE_AFF_NONE iTable: 3 iColumn: 1 pTab: t2 } } } } } } pEList: { a[0]: { pExpr: { op: TK_COLUMN span: {t1.a} flags: EP_Resolved EP_Error iTable: -1 iColumn: 0 } } a[1]: { pExpr: { op: TK_DOT span: {t2.d} pLeft: { op: TK_ID token: {t2} span: {t2} } pRight: { op: TK_ID token: {d} span: {d} } } } } pWhere: { op: TK_EQ span: {t1.b = t2.c} pLeft: { op: TK_DOT span: {t1.b} pLeft: { op: TK_ID token: {t1} span: {t1} } pRight: { op: TK_ID token: {b} span: {b} } } pRight: { op: TK_DOT span: {t2.c} pLeft: { op: TK_ID token: {t2} span: {t2} } pRight: { op: TK_ID token: {c} span: {c} } } } } SQL error: no such column: t1.a SELECT t1.a, t3.f FROM (t1 CROSS JOIN t2 ON t1.b = t2.c) LEFT JOIN t3 ON t2.d = t3.e; Select { op: TK_SELECT isResolved: 1 pSrc: { a[0]: { zAlias: sqlite_subquery_5BFA30_ iCursor: 0 pTab: sqlite_subquery_5BFA30_ pSelect: { op: TK_SELECT isResolved: 1 pSrc: { a[0]: { zName: t1 iCursor: 1 colUsed: 0x00000003 pTab: t1 jointype: JT_INNER JT_CROSS } a[1]: { zName: t2 iCursor: 2 colUsed: 0x00000003 pTab: t2 } } pEList: { a[0]: { zName: a pExpr: { op: TK_COLUMN span: {t1.a} affinity: SQLITE_AFF_NONE iTable: 1 iColumn: 0 pTab: t1 } } a[1]: { zName: b pExpr: { op: TK_COLUMN span: {t1.b} affinity: SQLITE_AFF_NONE iTable: 1 iColumn: 1 pTab: t1 } } a[2]: { zName: c pExpr: { op: TK_COLUMN span: {t2.c} affinity: SQLITE_AFF_NONE iTable: 2 iColumn: 0 pTab: t2 } } a[3]: { zName: d pExpr: { op: TK_COLUMN span: {t2.d} affinity: SQLITE_AFF_NONE iTable: 2 iColumn: 1 pTab: t2 } } } pWhere: { op: TK_EQ span: {t1.b = t2.c} flags: EP_FromJoin EP_Resolved iRightJoinTable: 2 pLeft: { op: TK_COLUMN span: {t1.b} affinity: SQLITE_AFF_NONE flags: EP_FromJoin EP_Resolved iTable: 1 iColumn: 1 iRightJoinTable: 2 pTab: t1 } pRight: { op: TK_COLUMN span: {t2.c} affinity: SQLITE_AFF_NONE flags: EP_FromJoin EP_Resolved iTable: 2 iColumn: 0 iRightJoinTable: 2 pTab: t2 } } } jointype: JT_LEFT JT_OUTER } a[1]: { zName: t3 iCursor: 3 pTab: t3 } } pEList: { a[0]: { pExpr: { op: TK_COLUMN span: {t1.a} flags: EP_Resolved EP_Error iTable: -1 iColumn: 0 } } a[1]: { pExpr: { op: TK_DOT span: {t3.f} pLeft: { op: TK_ID token: {t3} span: {t3} } pRight: { op: TK_ID token: {f} span: {f} } } } } pWhere: { op: TK_EQ span: {t2.d = t3.e} flags: EP_FromJoin iRightJoinTable: 3 pLeft: { op: TK_DOT span: {t2.d} flags: EP_FromJoin iRightJoinTable: 3 pLeft: { op: TK_ID token: {t2} span: {t2} flags: EP_FromJoin iRightJoinTable: 3 } pRight: { op: TK_ID token: {d} span: {d} flags: EP_FromJoin iRightJoinTable: 3 } } pRight: { op: TK_DOT span: {t3.e} flags: EP_FromJoin iRightJoinTable: 3 pLeft: { op: TK_ID token: {t3} span: {t3} flags: EP_FromJoin iRightJoinTable: 3 } pRight: { op: TK_ID token: {e} span: {e} flags: EP_FromJoin iRightJoinTable: 3 } } } } SQL error: no such column: t1.a ---- _2006-Nov-11 18:29:33 by anonymous:_ {linebreak} The resolving bug appears to be that unique column names or column aliases are searched across all subqueries, but table names and table aliases are only searched at their current SELECT level only. With this in mind, here are mechanical workarounds without using column aliases (assumes the column names in all joined tables are unique): SELECT a, f FROM (t1 CROSS JOIN t2 ON t1.b = t2.c) LEFT JOIN t3 ON d = e; SELECT t1.a, f FROM t1 CROSS JOIN (t2 LEFT JOIN t3 ON t2.d = t3.e) ON t1.b = c; SELECT a, d FROM (t1), (t2) WHERE b = c; And here are mechanical workarounds using column aliases (assumes the column names are not unique between tables): SELECT t1.a, t3f FROM t1 CROSS JOIN (select t3.f t3f, t2.c t2c from t2 LEFT JOIN t3 ON t2.d = t3.e) ON t1.b = t2c; SELECT t1a, t3.f FROM (select t1.a t1a, t2.d t2d from t1 CROSS JOIN t2 ON t1.b = t2.c) LEFT JOIN t3 ON t2d = t3.e; SELECT t1a, t2d FROM (select t1.a t1a, t1.b t1b from t1), (select t2.c t2c, t2.d t2d from t2) WHERE t1b = t2c; Notice that t3.f in the second query did not require an alias because the table "t3" was part of its immediate SELECT. You could make an alias for every column just in case, I just wanted to highlight the difference. ---- _2007-Feb-13 15:40:31 by anonymous:_ {linebreak} Fixing this issue would slow down SELECT parsing and column resolution for all queries (more specifically all prepared statements) due to the recursion required for column resolution. It would be easier to change your SQL code generator to accomodate SQLite. Just make aliases for every table at every subselect level and have the SELECT at any given level only work with the table aliases at that level.
#f2dcdc 2225 code active 2007 Feb scouten 2007 Feb 5 4 Request count of number of inserts and deletes from the free page list As a rough indication of potential fragmentation in the database, we'd like to know how many pages have been added and removed from the free page list. _2007-Feb-10 19:24:13 by drh:_ {linebreak} The issue of database fragmentation is also addressed by ticket #2075 (a ticket which I had overlooked prior to today.)
#e8e8bd 2224 new active 2007 Feb scouten 2007 Feb 4 4 Option to have one-bit "journal should exist" flag Per discussion with DRH: Would it be possible to have a one-bit flag in the header page of the DB file that signals that there _should_ be a journal file present. If you attempt to open a database with that flag set, but the journal file is not present, SQLite should fail to open the DB. _2007-Feb-09 13:47:44 by drh:_ {linebreak} Here is the issue: An application that uses SQLite for persistence is receiving database corruption reports from the field. The developers believe that the corruption occurs after a power failure or other crash leaves a hot journal file and then the users manually deletes the hot journal thinking that it is some garbage file left over from the crash. If there is a "journal should exist" flag in the database file and no journal is seen, that would indicate that the journal has been deleted or renamed and that the database has been corrupted. If the application can detect this, it might be able to locate the deleted journal in the trashcan and recover from the user error. Other ideas for addressing this problem: *: Change the "-journal" extension on the journal files to something like "-do-not-delete". *: Make the journal a hidden file. (The problem here is that if somebody goes to move the databaes file and the database has a hot journal, they would likely not know to move the journal too since it is not visible.) *: Change permissions on the journal file so that it is read-only. This doesn't prevent the journal from being deleted by a determined user, but it might at least give them a clue that this is not a file to be deleted without at least due consideration.
#f2dcdc 2226 code active 2007 Feb scouten 2007 Feb 5 4 Report on unused indices (and tables?) After exercising a particular database heavily, it would be nice to know which indices (and possibly tables) have not been used at all.
#e8e8bd 2222 build active 2007 Feb anonymous 2007 Feb 4 4 Type mismatch in fts2.c [5091] when compile as fastcall or stdcall I noticed a problem when compiling with Borland TurboC using either the -pr or -ps switches and I have a fix. The error is in reference to termDataCmp being passed to qsort. By changing the declaration of termDataCmp from: static int termDataCmp(const void *av, const void *bv){ to: static int __cdecl termDataCmp(const void *av, const void *bv){ the problem was resolved. I'm not sure this is a bug, but I thought I should report it just in case someone else runs into it. Kind Regards, Tom Olson
#e8e8bd 2221 new active 2007 Feb anonymous 2007 Feb drh 3 4 Store blobs using inode-like lookup of pages rather than linked list In a recent conversation, the matter of how BLOBs are stored came up. Currently, each page of BLOB data is in a linked list. By default each page is 1K so a very large BLOB may have many many pages. The linked list becomes inefficient to find and update BLOBs. DRH mentioned a thought to move to an inode style of page management for BLOBs. This would require updating the file format.
#e8e8bd 2220 new active 2007 Feb anonymous 2007 Feb drh 2 4 fsck for database files The existing recovery strategies for dealing with a corrupted database are entirely manual and could be improved with a reasonable amount of effort. One possible way to mitigate the issue would be the creation of an fsck recovery mechanism. This would be an improved recovery from the current .dump support.
#f2dcdc 2219 code active 2007 Feb shess 2007 Feb shess 2 2 Creating an fts table in an attached database works wrong. ATTACH DATABASE 'test2.db' AS two; CREATE VIRTUAL TABLE two.t2 USING fts2(content); will put t2_content, t2_segments, and t2_segdir in database 'main' rather than database 'two'. In some cases everything will appear to work, because the tables will be defaults for that name.
#f2dcdc 2218 code active 2007 Feb anonymous 2007 Feb 3 4 select columns from views with table prefix I have a table and a view on the table, defined like this: create table mytable (mycolumn varchar); create view myview as select mytable.mycolumn from mytable; Now select "mycolumn" from myview; does work, but select mycolumn from myview; gives "unknown column"!
#f2dcdc 2215 code active 2007 Feb anonymous 2007 Feb 1 2 error messages in virtual table are not propagated I'm trying to return a customized error message in xBestIndex in my virtual table implementation. Rather than copying my implementation here the problem can be reproduced by changing the fulltextBestIndex method from fts1. For example: /* Decide how to handle an SQL query. */ static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ int i; TRACE(("FTS1 BestIndex\n")); pVTab->zErrMsg = sqlite3_mprintf ("THIS IS AN ERROR MESSAGE"); return SQLITE_ERROR; for(i=0; inConstraint; ++i){ const struct sqlite3_index_constraint *pConstraint; If I run the following I do not see the error message reported when using the shell. $ ./sqlite3 SQLite version 3.3.12 Enter ".help" for instructions sqlite> create virtual table foo using fts1(name, address); sqlite> insert into foo (name, address) values ('amanda', '43 elm avenue'); sqlite> select * from foo; SQL error: SQL logic error or missing database
#f2dcdc 2214 code active 2007 Feb anonymous 2007 Feb 5 4 lemon generates bad error messages for %destructor When using incorrect syntax for %destructor, lemon generates a bad error message. When I wanted to use %default_destructor, but used %destructor instead: %destructor { ... } I got this error message: Symbol name missing after 134583560estructor keyword This is trivially fixed by replacing "Symbol name missing after %destructor keyword" with "Symbol name missing after %%destructor keyword" twice in lemon.c
#e8e8bd 2204 new active 2007 Jan anonymous 2007 Feb 5 3 Stable, documented metadata interface In response to #2203, I'd like to suggest that a documented, stable means be added to SQLite3 by which consumers of the API may reliably query column metadata for a table, including the names of the columns, whether they are nullable or not, their types, and what their default values are. Given that, currently, the only way to get this data is via the undocumented table_info pragma, clients who want this data are at your mercy every time that pragma changes. Thanks! _2007-Jan-30 00:07:28 by anonymous:_ {linebreak} How about implementing the sql-standard information_schema? I see something similar at http://www.sqlite.org/cvstrac/wiki?p=InformationSchema The PostgreSQL equivalent: http://www.postgresql.org/docs/current/static/information-schema.html ---- _2007-Jan-31 19:14:48 by anonymous:_ {linebreak} Pragma table_info is documented at http://www.sqlite.org/pragma.html#schema ---- _2007-Jan-31 19:31:48 by anonymous:_ {linebreak} PRAGMAs are deficient because they cannot be used within SELECT statements or as sub-selects. This severely limits their usefulness in an SQL-only context. You have to use SQLite's API to make use of them. ---- _2007-Feb-03 15:10:36 by anonymous:_ {linebreak} Note that I was told by Richard himself that the table_info pragma is not considered a documented interface, and as such is fair game for incompatible changes in point releases (as we saw in 3.3.8). What I'm asking for in this ticket is an interface that is officially sanctioned and documented, and which (barring the occassional bug) can be guaranteed to remain stable (between point releases at the very least).
#e8e8bd 2864 doc active 2007 Dec anonymous 2007 Dec 5 3 ext/fts3/README.txt File ext/fts3/README.txt reads: This folder contains source code to the second full-text search [...] Shouldn't that be: This folder contains source code to the third full-text search [...] _2007-Dec-30 18:27:08 by anonymous:_ {linebreak} Oh, after Googl'ing a little bit, I found that _fts3_ really is _fts2-with-rowid-fixed_. If both _fts2_ and _fts3_ are considered to be the _"second full-text search extension for SQLite"_, the _README_ files could maybe explain the situation.
#f2dcdc 2865 code active 2007 Dec anonymous 2007 Dec 1 2 FTS3 does not build with amalgamation in CVS Grab the latest CVS sources, then run: ./configure make sqlite3.c grep sqlite3Fts3Init sqlite3.c extern int sqlite3Fts3Init(sqlite3*); rc = sqlite3Fts3Init(db); If you compile sqlite3.c with -DSQLITE_ENABLE_FTS3, then sqlite3Fts3Init is unresolved. For some reason, sqlite3Fts3Init and fts3.c was not included in the sqlite3.c amalg. It used to work correctly in 3.5.4. _2007-Dec-30 18:17:57 by anonymous:_ {linebreak} Nevermind, "make sqlite3.c" has never built with the fts3 sources in 3.5.4 or before. You have to run ext/fts3/mkfts3amal.tcl ---- _2007-Dec-30 18:20:56 by anonymous:_ {linebreak} It seems that the sqlite3+fts3 amalg can only be built from main.mk, not Makefile.
#f2dcdc 2863 code active 2007 Dec anonymous 2007 Dec 2 3 test cast-3.14, cast-3.18 and cast-3.24 fail test cast-3.{14,18,24} fail on freebsd-6.3-PRERELEASE2: cast-3.14...^M Expected: [9223372036854774784]^M Got: [9223372036854773760]^M cast-3.18...^M Expected: [-9223372036854774784]^M Got: [-9223372036854773760]^M cast-3.24...^M Expected: [9223372036854774784]^M Got: [9223372036854773760]^M I used tcl8.4 from ports with no threads and here was the config line: ../sqlite-3.5.4/configure --prefix=/home/marc/local --with-tcl=/usr/local/lib/tcl8.4/ This was built on an ibm t30 laptop
#e8e8bd 2860 todo active 2007 Dec anonymous 2007 Dec 3 1 Database file fragmentation Adding data in database file increases file fragmentation. for example my file which size is 1G, consists of 20000 pieces. (NTFS) This happens because truncation of '-journal' file. I see some ways to reduce fragmentaion: 1. Increase database file size by greater pieces (not by PAGESIZE). 2. SQLite can save '-journal' file in another folder(logical disc). 3. Preallocation of database file(must increase INSERT speed).
#f2dcdc 2859 code active 2007 Dec anonymous 2007 Dec drh 3 2 Inconsistent column names with DISTINCT Given the following SQL:{linebreak} CREATE TABLE foo(a,b); INSERT INTO foo (a, b) VALUES (1,2); SQLite returns inconsistent column names when using the DISTINCT clause:{linebreak} SELECT DISTINCT foo.A, foo.B FROM foo; foo.A|foo.B 1|2 SELECT DISTINCT a, b FROM foo; a|b 1|2 SELECT DISTINCT * FROM foo; a|b 1|2 SELECT DISTINCT foo.* FROM foo; a|b 1|2 Compared with SELECT without DISTINCT:{linebreak} SELECT foo.A, foo.B FROM foo; a|b 1|2 SELECT a, b FROM foo; a|b 1|2 SELECT * FROM foo; a|b 1|2 SELECT foo.* FROM foo; a|b 1|2
#f2dcdc 2761 code active 2007 Nov anonymous 2007 Dec 3 3 CLI (shell.c) should be bundled with amalgamation The CLI (shell.c) should be bundled with the amalgamation for database administrative purposes without downloading the matching shell.c from the full source tree. I second that! Qt ships with the amalgamated source files, but we also ship shell.c, whch we have to retrieve from the non-amalgamated source files. ---- _2007-Dec-26 15:20:04 by anonymous:_ {linebreak} I also agree. It is inconvenient to retrieve the matching shell.c from the source tree.
#f2dcdc 2857 code active 2007 Dec anonymous 2007 Dec 2 2 GROUP BY cost estimate wrong with WHERE clause There seems to be an issue with the sqlite cost heuristic with an INDEX present on GROUP BY with certain types of WHERE clauses. Given the database formed by running these statements: create table stuff(a,b,c,d); insert into stuff values(1,2,3,4); create temp view v1 as select random()%100, random()%100, random()%1000, random()%10000 from stuff x, stuff y; insert into stuff select * from v1; insert into stuff select * from v1; insert into stuff select * from v1; insert into stuff select * from v1; insert into stuff select * from v1; create index stuff_b on stuff(b); create index stuff_c on stuff(c); create index stuff_d on stuff(d); analyze; Using sqlite.org's sqlite3-3.5.4.bin, this query takes 47 seconds: select c from stuff where a=23 group by c; while this query takes just 2 seconds: select c from stuff where a=23 group by +c; It is more efficient in this case to do a full table scan instead of using the INDEX on column c. _2007-Dec-23 23:14:06 by anonymous:_ {linebreak} The queries above both run in a couple of seconds with this naive patch: --- src/where.c 12 Dec 2007 17:42:53 -0000 1.266 +++ src/where.c 23 Dec 2007 22:48:37 -0000 @@ -1514,6 +1514,12 @@ static double bestIndex( flags = 0; } + if( pWC && pWC->nTerm>0 && pOrderBy ){ + /* Reduce cost if both an ORDER/GROUP BY exists with a WHERE. */ + cost /= 100; /* A very rough guess. */ + WHERETRACE(("... WHERE + ORDER BY decreases cost to: %.9g\n", cost)); + } + /* If the table scan does not satisfy the ORDER BY clause, increase ** the cost by NlogN to cover the expense of sorting. */ if( pOrderBy ){
But it has not been tested on queries with more than one table. Its logic could be flawed. ---- _2007-Dec-24 00:09:00 by drh:_ {linebreak} The complaint is centered around these two queries: /* 1 */ SELECT c FROM stuff WHERE a=23 GROUP BY c; /* 2 */ SELECT c FROM stuff WHERE a=23 GROUP BY +c; Query 1 runs in about 40 seconds and query 2 in about 1.5 seconds on my macbook. But with the patch, both queries run in about 1.5 seconds. Fair enough. But now consider these two queries: /* 3 */ SELECT c FROM stuff WHERE a!=23 GROUP BY c; /* 4 */ SELECT c FROM stuff WHERE a!=23 GROUP BY +c; In this case, query 3 runs in 42 seconds on an unpatched version of 3.5.4 and query 4 runs in about 109 seconds. So in cases where the WHERE clause is not particularly selective, the first version is faster than the second by a good margin. On a patched version of 3.5.4, both queries 3 and 4 run in about 110 seconds. So it seems to me that the patch is robbing Peter to pay Paul. It makes ORDER BY queries with very selective WHERE clauses run faster but at the expense of making queries with unselective WHERE clauses running slower. But notice this: in the current (unpatched) implementation, the programmer at least has the ability to select a different algorithm by the judicious placement of a "+" sign. After the patch, this is no longer possible. The patch forces the second algorithm to be used in all cases, even cases where it is slower. It seems to me to be better to leave things as they are since the current approach at least allows the programmer to override SQLite's algorithm selection if SQLite chooses incorrectly. The only way, it seems to me, to automatically choose the correct algorithm is to devise some test that will determine (in advance) whether or not the WHERE clause weeds out many or few rows from the result set. I'm thinking that determination is going to be very hard (or impossible) to do without first doing a full table scan. ---- _2007-Dec-24 05:40:47 by anonymous:_ {linebreak} It think it would be surprising to average users that _adding_ an index (on column C in this case) may significantly _decrease_ query performance for some queries. It was surprising to me, at least. In my opinion, a query being 20 times slower in a default bad guess situation is worse than a query only being 2.5 times slower with a default bad guess in a worst case scenario. It's a question of relative magnitude of the difference. This is why I think that the database should err on the side of the WHERE clause having a more selective bias. (Side note: the query timings difference is less pronounced if you use PRAGMA temp_store=memory, in which case query 3 running on an unpatched 3.5.4 takes just 50% more time to run than query 4 on my machine.) But you raise a good point in that if there's a wrong guess in the selectivity bias it would be nice to be able to manually override it. How much do you hate this type of syntax that some other databases use? select c from stuff where a!=23 group by /*+stuff_c*/ c; SQLite does not currently offer a way to pick a specific index. I think it would be quite useful. ---- _2007-Dec-24 17:05:16 by anonymous:_ {linebreak} Another option is to collect WHERE clause statistics in a table like create table sqlite_stat2( where_clause_md5 BLOB primary key, where_clause TEXT, rows_examined INT, rows_true INT ); where the last 2 columns are cumulative for each query. The statistics option could be enabled/disabled via a PRAGMA sqlite_collect_statistics. The where_clause column could be a string generated fairly easily from the walking the parse tree of the resolved Select statement's pWhere. This way the where_clause is normalized and a single query with many subselects could generate more than 1 where_clause, and different queries that happen to use the same normalized where clause would update the same entry in the stat2 table. where_clause normalization would strip off aliases and only refer to the original table and column names. For example the 2 queries below: -- CREATE TABLE t1(a, b); -- CREATE TABLE t2(b, c); SELECT t1.a*c as AC, t2.b-a as BA FROM t1, t2 WHERE AC>BA; SELECT *, t1.a Foo FROM t2, t1 WHERE Foo*c > t2.b - t1.a; would generate the same normalized where_clause string "(T1.A*T2.C)>(T2.B-T1.A)". The table information is already encoded within it. The generated VDBE code would have to generate Mem counters that would be incremented by each WHERE test, and lazily updated at the end of transactions or periodically written to the stat2 table to minimize disk use, as this information is not critical. One could also manually set the stat2 table with statistical values they would like their queries to use even if PRAGMA sqlite_collect_statistics=off; Any time the schema is changed, the entire sqlite_stat2 table would be cleared.
#e8e8bd 2856 doc active 2007 Dec anonymous 2007 Dec anonymous 4 3 SQLite Documentation - Tcl API - Link broken On the page "The Tcl Interface to the SQLite library", the link "enable load extension" does not work.
#e8e8bd 1648 new active 2006 Jan anonymous Shell 2007 Dec 4 3 meaningful error message: constraint failed create table emp( id text unique, sex text check( sex in 'm' or sex in 'f' ); insert into emp values( '1','x' ); SQL error: constraint failed This error message could be better. If there are several constraints, which constraint failed? So I named the constraint create table emp( id text unique, sex text constraint chk_sex check( sex in 'm' or sex in 'f' ); insert into emp values( '1','x' ); SQL error: constraint failed Still no joy . . . It would be nice if the error message were more specific. _2006-Jan-30 16:22:58 by anonymous:_ {linebreak} actually my testing was better than my typing, I used: check (sex = 'm' or sex = 'f' ) ---- _2007-Oct-25 09:47:14 by anonymous:_ {linebreak} This is a really big deal for me and for many others I suspect. If this is not a priority, could you at least throw out some hints about implementing it? I browsed through the code but can't seem to find where this would even go. ---- _2007-Oct-25 10:10:36 by anonymous:_ {linebreak} Hm, ok, the check constraints are stored in the table structure as a single expression which is the AND of all of them. This alone suggests that the task at hand is not simple... ---- _2007-Oct-25 10:17:36 by anonymous:_ {linebreak} Perhaps a new Check type could be created which could basically be Expr plus an extra pointer, which could then be used to make a list of them, similar to how the triggers seem to be stored. I'll keep snooping around, but I thought I'd post what I've found thus far in case anyone else looks at this. ---- _2007-Oct-25 19:40:37 by anonymous:_ {linebreak} I have attached a patch that implements this. I've only tested it lightly by hand. (The test suite failed to run and gave me some strange linking errors) ---- _2007-Dec-20 11:38:03 by anonymous:_ {linebreak} Although this is tagged as shell, the error message comes from the sqlite core. My single biggest problem besides the lack of detail (some of my tables have 5 constraints) is that it also prevents me from localizing the error messages. If I have the constraint name then at least I can look it up in a translation table and tell non-english speakers something meaningful.
#f2dcdc 2508 code active 2007 Jul anonymous 2007 Dec 1 1 utf8ToUnicode() does not work on some WinCE devices On some WinCE devices first call to =MultiByteToWideChar()= in =utf8ToUnicode()= always fails. Tried calling =GetLastError()= after it fails and it returns error code 87 -- =ERROR_INVALID_PARAMETER=. To fix this had to change code page from =CP_UTF8= to =CP_ACP= -- no idea why this works. Original =utf8ToUnicode()= ---- static WCHAR *utf8ToUnicode(const char *zFilename) { int nChar; WCHAR *zWideFilename; nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); zWideFilename = sqliteMalloc( nChar*sizeof(zWideFilename[0]) ); if( zWideFilename==0 ){ return 0; } nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, nChar); if( nChar==0 ){ sqliteFree(zWideFilename); zWideFilename = 0; } return zWideFilename; } ---- Fixed =utf8ToUnicode()= ---- static WCHAR *utf8ToUnicode(const char *zFilename) { int nChar; WCHAR *zWideFilename; nChar = MultiByteToWideChar(CP_ACP, 0, zFilename, -1, NULL, 0); if( nChar == 0 ) { DWORD dwError = GetLastError(); OSTRACE2("MultiByteToWideChar() failed, last error: %d\n", dwError); return 0; } zWideFilename = sqliteMalloc( nChar*sizeof(zWideFilename[0]) ); if( zWideFilename==0 ){ return 0; } nChar = MultiByteToWideChar(CP_ACP, 0, zFilename, -1, zWideFilename, nChar); if( nChar==0 ){ sqliteFree(zWideFilename); zWideFilename = 0; } return zWideFilename; } ---- _2007-Jul-17 23:56:10 by anonymous:_ {linebreak} =unicodeToUtf8()= needs to be fixed the same way. Before: ---- static char *unicodeToUtf8(const WCHAR *zWideFilename){ int nByte; char *zFilename; nByte = WideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, 0, 0, 0, 0); zFilename = sqliteMalloc( nByte ); if( zFilename==0 ){ return 0; } nByte = WideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ sqliteFree(zFilename); zFilename = 0; } return zFilename; } ---- After: ---- static char *unicodeToUtf8(const WCHAR *zWideFilename){ int nByte; char *zFilename; nByte = WideCharToMultiByte(CP_ACP, 0, zWideFilename, -1, NULL, 0, NULL, NULL); if ( nByte == 0 ) { DWORD dwError = GetLastError(); OSTRACE2("WideCharToMultiByte() failed, last error = %d\n", dwError); return 0; } zFilename = sqliteMalloc( nByte ); if( zFilename==0 ){ return 0; } nByte = WideCharToMultiByte(CP_ACP, 0, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ sqliteFree(zFilename); zFilename = 0; } return zFilename; } ---- Note that while original code with =CP_UTF8= works on Windows and SOME WinCE devices, this modified code works well and Windows and all WinCE devices I've tested so far. ---- _2007-Jul-18 16:01:21 by anonymous:_ {linebreak} Why not using the conversions from SQLite internals ? It can change a UTF-16 to UTF-8 and vice-versa. Or using UTF-16 variants in windows ce should be the best case. ---- _2007-Aug-09 20:47:04 by anonymous:_ Why not using the conversions from SQLite internals ? It can change a UTF-16 to UTF-8 and vice-versa. Or using UTF-16 variants in windows ce should be the best case. Not so simple. =unicodeToUtf8()= is used a lot internally regardless of what whether you use UTF-16 or UTF-8 yourself. For example, =unicodeToUtf8()= is used by =sqlite3WinTempFileName()= which is in turn used by =sqlite3PagerOpentemp()= -- I think you get the idea. ---- _2007-Dec-20 00:29:33 by anonymous:_ {linebreak} We've found that using CP_UTF8 fails on WinCE kernels that don't include SYSGEN_CORELOC (http://msdn2.microsoft.com/en-us/library/ms903883.aspx). To make the code handle any device it should be changed to: static WCHAR *utf8ToUnicode(const char *zFilename) { int nChar; WCHAR *zWideFilename; nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); if( nChar == 0 ) { nChar = MultiByteToWideChar(CP_ACP, 0, zFilename, -1, NULL, 0); if( nChar == 0 ) { DWORD dwError = GetLastError(); OSTRACE2("MultiByteToWideChar() failed, last error: %d\n", dwError); return 0; } } zWideFilename = sqliteMalloc( nChar*sizeof(zWideFilename[0]) ); if( zWideFilename==0 ) { return 0; } nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, nChar); if( nChar==0 ) { nChar = MultiByteToWideChar(CP_ACP, 0, zFilename, -1, zWideFilename, nChar); if( nChar==0 ) { sqliteFree(zWideFilename); zWideFilename = 0; } } return zWideFilename; }
#e8e8bd 558 build active 2004 Jan anonymous 2007 Dec 4 4 Makefile.in should honor libdir and bindir Please support non-standard installation layouts by honoring configure's --libdir and --bindir flags rather than hard-coding $(exec_prefix)/lib and $(exec_prefix)/bin. (For instance, the layout we often use on Solaris has parallel "lib" and "lib64" directories under a common prefix.) _2007-Dec-18 17:29:26 by anonymous:_ {linebreak} Why is this ticket not solved? The patch is trivial and solves a real problem. Thank you. ---- _2007-Dec-18 17:54:46 by drh:_ {linebreak} The patch does not apply to the current makefile. And I do not understand what the -libdir or -bindir options are for or what they are suppose to do so I do not know how to fix it.
#f2dcdc 368 code active 2003 Jun anonymous 2007 Dec 3 4 UPDATE trigger doesn't fire on INSERT OR REPLACE After executing the following SQL, there will be nothing in table T2. I expect to see '1' there: CREATE TABLE T1 ( id, name ); CREATE TABLE T2 ( id ); CREATE TRIGGER T1A AFTER UPDATE ON T1 BEGIN INSERT INTO T2 VALUES( new.id ); END; INSERT INTO T1 VALUES (1, 'Hi'); INSERT INTO T1 VALUES (2, 'There'); INSERT OR REPLACE INTO T1 VALUES (1,'Me'); An INSERT trigger *does* fire on INSERT OR REPLACE if the item exists already -- I would expect an UPDATE trigger. ---- _2004-Sep-21 17:15:37 by anonymous:_ {linebreak} Still repros in 3.0.7 :-( ---- _2007-Dec-17 21:45:03 by anonymous:_ {linebreak} I would say that ON DELETE and ON INSERT better describes what really happens (and not ON UPDATE), because if there would be another columns in the '1' row, their values would not be preserved after INSERT OR REPLACE takes place, as the documentation of the ON REPLACE algorithm states: "When a UNIQUE constraint violation occurs, the pre-existing rows that are causing the constraint violation are removed prior to inserting or updating the current row". However, neither ON UPDATE nor ON DELETE trigger occurs, which still is a bug. Thank you.
#e8e8bd 916 new active 2004 Sep anonymous Unknown 2007 Dec 1 1 No delete notification for INSERT OR REPLACE It would be nice if the "ON DELETE" trigger is called for the row substituted with a new one during REPLACE. Or, even better, one could add the OLD statement for the "ON INSERT" trigger and set it to point to the same row as NEW if a new row is inserted or to the deleting row if replace occurs. Thanks. _2007-Dec-17 21:36:40 by anonymous:_ {linebreak} I have the same problem. My solution would be to stick with the documentation of the ON REPLACE algorithm: "When a UNIQUE constraint violation occurs, the pre-existing rows that are causing the constraint violation are removed prior to inserting or updating the current row". That is, to call ON DELETE trigger whenever rows are removed. Thank you, and keep going, you do wonderful job anyway.
#e8e8bd 2844 build active 2007 Dec anonymous 2007 Dec 4 1 lemon is being built without respecting LDFLAGS lemon is being built without respecting LDFLAGS. I'm attaching a patch which fixes this bug. In other words, why should we fix this? What problem is it causing? _2007-Dec-17 16:22:19 by drh:_ {linebreak} Why is this important? What LDFLAGS settings might a user want to carry through into lemon? ---- _2007-Dec-17 18:00:59 by anonymous:_ {linebreak} > Why is this important? It is considered to be be good practice to respect user's LDFLAGS. A user might want to have all executables and libraries built with identical LDFLAGS. > What LDFLAGS settings might a user want to carry through into lemon? A user might have LDFLAGS="-Wl,-O1,--hash-style=gnu,--sort-common" You can read http://lwn.net/Articles/192082/. Users can also use some other flags. > In other words, why should we fix this? What problem is it causing? It slightly increases the size of lemon executable and it slightly decreases performance. ---- _2007-Dec-17 18:04:31 by drh:_ {linebreak} lemon is used as an intermediate build tool in part of the SQLite build process. It is not a deliverable. If it runs a little slower or uses a little more memory, nobody cares. We only care if it gets the wrong answer. Is it ever possible that the lack of LDFLAGS support might result in lemon getting the wrong answer? ---- _2007-Dec-17 18:27:33 by anonymous:_ {linebreak} Can you comment on Lemon bug in #2835? It produces 2 different sqlite3.c files depending on your malloc implementation. ---- _2007-Dec-17 19:19:01 by anonymous:_ {linebreak} > lemon is used as an intermediate build tool in part of the > SQLite build process. It is not a deliverable. If it runs a > little slower or uses a little more memory, nobody cares. CFLAGS are respected when lemon is being built, so for consistency LDFLAGS also should be respected. (The comment above was not created by me.)
#f2dcdc 2842 code active 2007 Dec anonymous 2007 Dec 1 1 .import does not recongnise NULL values .import function fails to see NULL values in csv files as NULL values...instead they are treated as the string "NULL". This is with .mode list and separator , But behaves similarly for .mode csv Also if one outputs a table with NULL values to a file, then re-imports that file, again .import does not recognise the values as NULL, but as "NULL". Everything here also applies to empty strings in files, e.g. instead of "NULL" using nothing... This is a showstopper for us since we want to import a large amount of data with many tables containing NULL values. I can't see any valid reason for .import not to recognise the same syntax as the command line. Note that something like: sqlite3 my.db insert into MY_TABLE values (1,"foo","bar",NULL) ..works fine. It is just .import that appears to be broken. _2007-Dec-14 16:39:51 by rdc:_ {linebreak} .import only inserts string values into database tables. If your column has a declared type that changes the columns affinity to numeric or integer, then those strings will be converted to numeric values by the SQLIte library. The workaround is to simply insert a unique string where ever you want a NULL value, and then run an update that replaces those strings with real NULL values. If you inserted the string 'NULL' then do this after the .import update t set field = null where field = 'NULL'; You will have to repeat this for each field in your table that might contain the 'NULL' string.
#e8e8bd 2841 todo active 2007 Dec anonymous 2007 Dec 1 1 The sqlite mailing list has become overrun by trolls The sqlite mailing list is very useful. The S/N is at times a little high but nonetheless quite manageable. Recently (see the DeviceSQL thread) it got really bad. Would moderation be unacceptable during these periods of time where people feel the need to protect their ego's? The sqlite mailing list is primarily about sqlite (well, and lemon), not a marketing vector for other products? Surely they have their own lists and resources for that?
#f2dcdc 2721 code active 2007 Oct anonymous 2007 Dec 2 1 if db file is in a folder with non-ansi character some functions fail If database file is located in directory with some non-ANSI characters (in my case with a Russian subdirectory c:\Мои документы\Data_Jobs), or it's name is non-ansi. Some functions fail to execute sql. For example (with defined UNICODE): TCHAR sql[512]; _stprintf(sql, _T("INSERT INTO tab_SurveyedPoints (name, comment, code,") _T("coordinatetype, b, l, h, solutiontype, sigmah, sigmav)") _T(" VALUES ('%s','%s','%s',0,%lf,%lf,%lf,0,%lf,%lf);"), point.m_name.c_str(), point.m_description.c_str(), point.m_code.c_str(), point.m_coordinates.b, point.m_coordinates.l, point.m_coordinates.h, point.m_sigmah, point.m_sigmav); int rc1 = sqlite3_prepare16(m_db, sqlfmt, -1, &stmt, (const void**)&pszTail); rc != SQLITE_OK
But if I move the file to c:\My documents\Data_Jobs this works ok. It's improbable behaviour, but I can't work around yet. Although, prepare() functions work ok as well in both cases. Yuri Noyanov. _2007-Oct-11 19:33:34 by drh:_ {linebreak} All string arguments to SQLite, and especially filename arguments, must be UTF-8 or UTF-16 (depending on the function). If you use string parameters which are not UTF-8 or UTF-16 (as appropriate) then the behavior of SQLite is undefined and probably not what you want. ---- _2007-Oct-12 04:25:56 by anonymous:_ {linebreak} but ALL programs to handle SQLite DBs (SQLIteBrowser, SQLite Control) fail to handle the files as well. Till I move the file to different directory !!! ---- _2007-Oct-12 04:27:54 by anonymous:_ {linebreak} Also I must note, that I CAN open the database, I CAN execute some SQLs with sqlite_prepare function OK. But sqlite_prepare16 FAILS if I just rename my database !!! ---- _2007-Oct-12 04:31:46 by anonymous:_ {linebreak} Also note to make my issue clearer: sqlite_prepare16() with the same code either works OK either doesn't work. depends on database filename or folder path. The database is opened OK in both cases (I used utf8 conversion). sql_prepare() works ok in both cases. ---- _2007-Oct-13 06:37:43 by anonymous:_ {linebreak} That appears to be only with INSERT sql statement. Both SELECT and UPDATE work fine with sqlite_prepare16.
#e8e8bd 2831 new active 2007 Dec anonymous 2007 Dec 3 4 alter view View can't be used after ALTER RENAME TO: SQLite version 3.5.3 Enter ".help" for istructions sqlite> create table t(a); sqlite> create view v1 as select * from t; sqlite> alter table v1 rename to v2; sqlite> select * from v2; SQL error: no such table: v2 sqlite> select * from v1; SQL error: no such table: v1 sqlite> .schema CREATE TABLE t(a); CREATE VIEW v1 as select * from t; sqlite> select * from sqlite_master; table|t|t|2|CREATE TABLE t(a) view|v1|v1|0|CREATE VIEW v1 as select * from t
This is a feature request, not a bug. ---- _2007-Dec-11 18:40:17 by anonymous:_ {linebreak} Notice that alter table doesn't return an error. After the command neither v1 nor v2 can be used. ---- _2007-Dec-13 08:18:16 by danielk1977:_ {linebreak} [4623] improves the situation by returning an error when the user attempts to rename a view. One reason this feature (renaming views) is not a high priority is because a view can be dropped and recreated with a different name efficiently. This was not the case with tables.
#f2dcdc 2825 code active 2007 Dec anonymous 2007 Dec 3 3 FormatMessage (win32) should use extra flag and convert from Unicode The call to FormatMessageA in the win32 source code needs to have the flags changed from: FORMAT_MESSAGE_FROM_SYSTEM to FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS This ensures that any system messages that expect arguments do not try to grab the argument from some random memory location. ref: http://blogs.msdn.com/oldnewthing/archive/2007/11/28/6564257.aspx _2007-Dec-06 14:07:53 by anonymous:_ {linebreak} I also noticed that the result is NOT converted to UTF-8. FormatMessageA returns the text in the local ANSI codepage. FormatMessageW should be used on NT systems, and either result should be converted to the SQLite UTF-8 default. ---- _2007-Dec-11 00:34:37 by anonymous:_ {linebreak} to simplify what is meant even more... http://www.sqlite.org/cvstrac/fileview?f=sqlite/src/os_win.c&v=1.118 Search for FormatMessageA (only 1 instance) - FORMAT_MESSAGE_FROM_SYSTEM, + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, No breakage, ensures that no crashes with some messages (e.g. filesystem errors). The encoding issue should be addressed separately. ---- _2007-Dec-11 01:27:07 by anonymous:_ {linebreak} The function should be changed to the following to correctly handle the conversion from Unicode/MBCS. static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ int error = GetLastError(); #if OS_WINCE if( error>0x7FFFFFF ){ sqlite3_snprintf(nBuf, zBufOut, "OsError 0x%x", error); }else{ sqlite3_snprintf(nBuf, zBufOut, "OsError %d", error); } #else if( isNT() ){ LPWSTR zWinTemp = NULL; DWORD dwLen = FormatMessageW( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, error, 0, (LPWSTR) &zWinTemp, 0, 0 ); if (dwLen > 0) { char * zOut = unicodeToUtf8(zWinTemp); LocalFree(zWinTemp); sqlite3_snprintf(nBuf, zBufOut, "%s", zOut); free(zOut); } }else{ LPSTR zWinTemp = NULL; DWORD dwLen = FormatMessageA( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, error, 0, (LPSTR) &zWinTemp, 0, 0 ); if (dwLen > 0) { char * zOut = mbcsToUtf8(zWinTemp); LocalFree(zWinTemp); sqlite3_snprintf(nBuf, zBufOut, "%s", zOut); free(zOut); } } #endif }
#e8e8bd 2821 new active 2007 Dec anonymous 2007 Dec 3 4 hashtable indicies It would be nice to implement non btree indices. I.e. CREATE INDEX ON table(rowid) AS HASH. Using a hashtable's O(1) properties, you could use the index for very quick lookups when one result is expected. This does have the tradeoff that a hashtable index has no ordering properties (can not be used for sorts or non-equality searching). However, it would be a *huge* win when you have 250,000 rowids in memory, and you want to go fetch another column in the database for each one of those rowids (SELECT * FROM table WHERE rowid=?). _2007-Dec-03 21:58:01 by anonymous:_ {linebreak} For 250,000 rows I doubt you would see that much of an improvement (try it.) You'll almost certainly find log_n is going to be fairly fast (especially for large n.) I personally would prefer some sort of 'virtual' index though, that could be a hash or actually from a user-supplied function so that I can index large blobs by some function (i.e. a hash). And yes, this would be an incompatible file-format change and it's not clear how to update an index when the function isn't loaded (i.e. db reopened with that function.) Perhaps mark the index as 'stale' and ignore it until the function loads then you can do the updates. Of course this starts to get quite complicated. ---- _2007-Dec-03 22:12:17 by anonymous:_ {linebreak} Everything in sqlite depends on btree indexes. You're talking a major rewrite if you support hash-based or other indexing.
#f2dcdc 2814 code active 2007 Nov anonymous 2007 Dec 3 3 _XOPEN_SOURCE again Ideally setting _XOPEN_SOURCE should be an opt-in detected by configure, rather than a hardcoded opt-out as it is now. I find you create more problems in setting it than just leaving it out on modern platforms. Can you please give users the option of not defining _XOPEN_SOURCE at all? +#ifndef SQLITE_DONT_DEFINE_XOPEN_SOURCE #if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && SQLITE_THREADSAFE # define _XOPEN_SOURCE 500 /* Needed to enable pthread recursive mutexes */ #endif +#endif
_2007-Dec-01 09:23:15 by anonymous:_ {linebreak} Also when using Python, it sets _XOPEN_SOURCE to 600. No idea what the 500 vs 600 difference is about. ---- _2007-Dec-01 15:58:28 by anonymous:_ {linebreak} I've used a couple of different Linux OSes and _XOPEN_SOURCE is not needed. Maybe it's for OSes more than 5 years old. Recursive mutexes are pretty much standard these days since the popularity of Java which uses them extensively. ---- _2007-Dec-01 17:21:05 by drh:_ {linebreak} See also tickets #2673, #2681, and #2741. ---- _2007-Dec-02 02:08:26 by anonymous:_ {linebreak} On Linux, PTHREAD_MUTEX_RECURSIVE is the same as PTHREAD_MUTEX_RECURSIVE_NP: PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP, Since PTHREAD_MUTEX_RECURSIVE_NP is always available, you could avoid defining _XOPEN_SOURCE and use this code instead: - pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutexattr_settype(&recursiveAttr, +#ifdef linux + PTHREAD_MUTEX_RECURSIVE_NP +#else + PTHREAD_MUTEX_RECURSIVE +#endif + );
---- _2007-Dec-02 02:17:22 by anonymous:_ {linebreak} A quick google search reveals how various projects deal with this recursive mutex declaration problem (in no particular order): *: #define _XOPEN_SOURCE 500 and use PTHREAD_MUTEX_RECURSIVE *: #define _XOPEN_SOURCE 600 and use PTHREAD_MUTEX_RECURSIVE *: #define _GNU_SOURCE and use PTHREAD_MUTEX_RECURSIVE *: don't define anything and use PTHREAD_MUTEX_RECURSIVE_NP on linux, and PTHREAD_MUTEX_RECURSIVE elsewhere. Unfortunately, since PTHREAD_MUTEX_RECURSIVE is an enum on Linux, so you can't use the #ifdef PTHREAD_MUTEX_RECURSIVE compile-time technique.
#e8e8bd 2604 new active 2007 Aug anonymous 2007 Aug 4 4 CREATE VIRTUAL TABLE does not allow IF NOT EXISTS CREATE VIRTUAL TABLE vt IF NOT EXISTS; would help with development since creating a virtual table that exists returns error 1 - as do several "Real" errors.
#e8e8bd 2595 doc active 2007 Aug anonymous 2007 Aug 4 4 sqlite3_commit_hook doc typo src/main.c: -** Register a function to be invoked when a transaction comments. +** Register a function to be invoked when a transaction commits.
#f2dcdc 2559 code active 2007 Aug anonymous 2007 Aug 4 4 "make clean" does not delete sqlite3.c and tsrc/ Index: Makefile.in =================================================================== RCS file: /sqlite/sqlite/Makefile.in,v retrieving revision 1.179 diff -u -3 -p -r1.179 Makefile.in --- Makefile.in 27 Aug 2007 23:38:43 -0000 1.179 +++ Makefile.in 28 Aug 2007 01:25:55 -0000 @@ -724,7 +724,7 @@ clean: rm -f testfixture$(TEXE) test.db rm -rf doc rm -f common.tcl - rm -f sqlite3.dll sqlite3.lib sqlite3.def + rm -rf sqlite3.dll sqlite3.lib sqlite3.def sqlite3.c tsrc distclean: clean rm -f config.log config.status libtool Makefile config.h sqlite3.pc
#e8e8bd 2587 build active 2007 Aug anonymous 2007 Aug 3 4 Build problem when using the SQLITE_OMIT_FLOATING_POINT define. I apologize in advance if the values I chose above are not appropriate. If I define SQLITE_OMIT_FLOATING_POINT=1 and try to build a Windows DLL, I get two errors in loadext.c, line 116 and 192. "error C4028: formal para meter 3 different from declaration" I believe you want to change the include order at the top of loadext.c from: #include "sqlite3ext.h" #include "sqliteInt.h" to: #include "sqliteInt.h" #include "sqlite3ext.h" Reversing the order of include fixes my build. Yes, I know there is no real reason to disable floating point for the Windows DLL. I'm actually porting SqLite for use in an NT kernel mode driver and avoiding floating point operations will save a lot of time if I don't really need them and I don't. So I made sure this was a problem with a supported platform like the Windows DLL and griped about that instead of my insanity. ;-) You can email questons to mspiegel@vipmail.com. If you want to discuss this over the phone, shoot me an email and I'll send you phone numbers.
#e8e8bd 2568 new active 2007 Aug anonymous 2007 Aug 3 3 TEMP_STORE is ignored in some cases It seems that sometimes TEMP_STORE is ignored. I've tried to force SQLite to always use memory by setting TEMP_STORE=3, but some etilqs_* temp files are still being created. The call stack that's causing these file to be created is: sqlite3PagerOpentemp(OsFile * *) sqlite3PagerStmtBegin(Pager *) sqlite3BtreeBeginStmt(Btree *) sqlite3VdbeExec(Vdbe *) sqlite3Step(Vdbe *) sqlite3_step(sqlite3_stmt *) It looks like the temp files are being used to store information for undoing earlier parts of a transaction if a later part fails. I'm assuming the fact this part of the code ignores TEMP_STORE is an over site? _2007-Aug-13 15:03:19 by drh:_ {linebreak} The TEMP_STORE compile-time option only changes the storage for temporary database files. The statement journal is not a databaes file and thus does not come under the control of TEMP_STORE. There is currently no mechanism to force the statement journal into memory instead of onto disk. I will reclassify this ticket as a "feature request". ---- _2007-Aug-22 10:42:50 by anonymous:_ {linebreak} Okay, thank you.
#f2dcdc 1242 code active 2005 May anonymous Shell 2007 Aug 3 4 EXPLAIN causes segmentation fault on OSX (and linux) Under Mac OS X, EXPLAIN causes a segmentation fault: [jacob@046] ~$ sqlite3 foo.db SQLite version 3.2.1 Enter ".help" for instructions sqlite> CREATE TABLE test (a int, b int); sqlite> EXPLAIN SELECT * FROM test; Segmentation fault The crash dump follows: Host Name: jacobian Date/Time: 2005-05-13 09:17:04.860 -0500 OS Version: 10.4 (Build 8A428) Report Version: 3 Command: sqlite3 Path: /usr/local/bin/sqlite3 Parent: bash [15421] Version: ??? (???) PID: 15544 Thread: 0 Exception: EXC_BAD_ACCESS (0x0001) Codes: KERN_INVALID_ADDRESS (0x0001) at 0x1400fffc Thread 0 Crashed: 0 libSystem.B.dylib 0x90003228 strlen + 8 1 libsqlite3.0.dylib 0x002387c8 sqlite3VdbeList + 284 (vdbeaux.c:609) 2 libsqlite3.0.dylib 0x002376e0 sqlite3_step + 312 (vdbeapi.c:207) 3 libsqlite3.0.dylib 0x0023e5d8 sqlite3_exec + 260 (legacy.c:82) 4 sqlite3 0x00005b64 process_input + 808 (shell.c:1504) 5 sqlite3 0x000062bc main + 1528 (shell.c:1790) 6 sqlite3 0x00001db4 _start + 348 (crt.c:272) 7 sqlite3 0x00001c54 start + 60 Thread 0 crashed with PPC Thread State: srr0: 0x90003228 srr1: 0x0000d030 vrsave: 0x00000000 cr: 0x22444428 xer: 0x00000006 lr: 0x002387c8 ctr: 0x90003220 r0: 0x002387c8 r1: 0xbfffef40 r2: 0x00249a00 r3: 0x1400fffe r4: 0x00000028 r5: 0x00000000 r6: 0x00000001 r7: 0xffffffff r8: 0x00000001 r9: 0x1400fffc r10: 0x00000086 r11: 0x00249180 r12: 0x90003220 r13: 0x00000000 r14: 0x00000000 r15: 0x00000000 r16: 0x00000000 r17: 0xbffff0f8 r18: 0x00000000 r19: 0xbffff17c r20: 0x00000000 r21: 0x000036d0 r22: 0x00303d90 r23: 0x00303d74 r24: 0x01805700 r25: 0x01807e00 r26: 0x00000001 r27: 0x00000004 r28: 0x01805640 r29: 0x01805600 r30: 0x01805200 r31: 0x002386bc Binary Images Description: 0x1000 - 0x7fff sqlite3 /usr/local/bin/sqlite3 0x205000 - 0x248fff libsqlite3.0.dylib /usr/local/lib/libsqlite3.0.dylib 0x8fe00000 - 0x8fe50fff dyld 43 /usr/lib/dyld 0x90000000 - 0x901a6fff libSystem.B.dylib /usr/lib/libSystem.B.dylib 0x901fe000 - 0x90202fff libmathCommon.A.dylib /usr/lib/system/libmathCommon.A.dylib 0x91d33000 - 0x91d53fff libmx.A.dylib /usr/lib/libmx.A.dylib 0x9680c000 - 0x9683afff libncurses.5.4.dylib /usr/lib/libncurses.5.4.dylib 0x969a3000 - 0x969b9fff libedit.2.dylib /usr/lib/libedit.2.dylib Happening to me as well on FC6 sqlite3 version 3.3.6 ---- _2007-Aug-21 17:09:34 by anonymous:_ {linebreak} Try to upgrade to 3.4.2.
#e8e8bd 2582 doc active 2007 Aug anonymous 2007 Aug anonymous 5 5 documenation clarification docs for topic `Set A Busy Timeout` int sqlite3_busy_timeout(sqlite3*, int ms); http://sqlite.org/capi3ref.html#sqlite3_busy_timeout the wording "The handler will sleep multiple times until at least "ms" milliseconds of sleeping have been done" implies it will wait the total amount regardless of the lock status, it should perhaps indicate in the same sentence that it will exit early if the lock becomes available.
#f2dcdc 2580 code active 2007 Aug anonymous 2007 Aug anonymous 1 2 Can't open a query if text to search is Greek for example: SELECT * FROM mytable WHERE mycolumn LIKE '%some greek text%' I get wrong results, using the 3.4.2 version. No problem instead using other earlier version. I tested only in Windows.
#e8e8bd 2567 build active 2007 Aug anonymous 2007 Aug 3 2 Build fails to install I compile under MinGW with Msys. A build error occurs during 'make install'. After checking the makefile. The 'install' target depends on 'sqlite3', when it should be 'sqlite3$(TEXE)'. The workaround is, after configure, edit makefile for target install, and replace 'sqlite3' with 'sqlite3${TEXE}' where needed. I did not have this problem with 3.3.17. I assume this could be fixed just by fixing the configure to produce correct makefile. _2007-Aug-12 04:41:12 by anonymous:_ {linebreak} Do you have a patch?
#e8e8bd 2566 build active 2007 Aug anonymous 2007 Aug 2 1 fts2 broken after vacuum Hi there, I'm testing your database and I'm having problems with fts2: --------- sqlite> select * from distB where distB match "MARIANO"; Assertion failed: *pData!='\0', file fts2amal.c, line 16790 This application has requested the Runtime to terminate it in an unusual way. Please contact the application's support team for more information. --------- Steps: 1) Create a new .db 2) Import data in new distA table 3) Import data in new distB table 4) Create a new distC virtual table (dts2) 5) insert into distC (rowid, f1, f2, f3) select rowid, f1, f2, f3 from DistB Everything working like a charm until here!!! The fts2 works very well, but after 6) vacuum; the fts seems broken... doing a select throws the error I paste at the post of the topic If you want the .db file I can send it to you (607MB) Thanks.-
#f2dcdc 2558 code active 2007 Aug anonymous 2007 Aug 2 3 Multiple JOIN USING() gives incorrect results I'm having a problem joining multiple tables with USING. It appears to work, but the results are incorrect. Here is an example to illustrate the problem. I believe the three SELECT statements should be equivalent, but they produce three different results. .header on .mode column CREATE TABLE Main (pk INTEGER PRIMARY KEY, name VARCHAR); CREATE TABLE OptA (pk INTEGER PRIMARY KEY, alpha VARCHAR); CREATE TABLE OptB (pk INTEGER PRIMARY KEY, beta VARCHAR); INSERT INTO Main VALUES (1, 'One'); INSERT INTO Main VALUES (2, 'Two'); INSERT INTO Main VALUES (3, 'Three'); INSERT INTO Main VALUES (4, 'Four'); INSERT INTO OptA VALUES (1, 'Alpha1'); INSERT INTO OptA VALUES (4, 'Alpha4'); INSERT INTO OptB VALUES (2, 'Beta2'); INSERT INTO OptB VALUES (4, 'Beta4'); SELECT * FROM Main LEFT JOIN OptA USING (pk) LEFT JOIN OptB USING (pk); SELECT * FROM Main LEFT JOIN OptB USING (pk) LEFT JOIN OptA USING (pk); SELECT Main.pk, name, alpha, beta FROM Main LEFT JOIN OptA ON Main.pk = OptA.pk LEFT JOIN OptB ON Main.pk = OptB.pk; Joining Main, OptA, and OptB omits Beta2: pk name alpha beta ---------- ---------- ---------- ---------- 1 One Alpha1 2 Two 3 Three 4 Four Alpha4 Beta4 Joining Main, OptB, and OptA omits Alpha1: pk name beta alpha ---------- ---------- ---------- ---------- 1 One 2 Two Beta2 3 Three 4 Four Beta4 Alpha4 Only by using ON instead of USING do we get the correct results: pk name alpha beta ---------- ---------- ---------- ---------- 1 One Alpha1 2 Two Beta2 3 Three 4 Four Alpha4 Beta4 I think this is basically the same issue as ticket #1637, but it's a more serious example. In that one, the query simply failed to compile. In this case, it seems to work, but gives you the wrong results. I've also tried this script in PostgreSQL 8.0.13. All three queries give (the same) correct results. _2007-Aug-08 17:34:27 by anonymous:_ {linebreak} The problem is that SQLite is transforming SELECT * FROM Main LEFT JOIN OptA USING (pk) LEFT JOIN OptB USING (pk); into SELECT Main.pk, name, alpha, beta FROM Main LEFT JOIN OptA ON Main.pk = OptA.pk LEFT JOIN OptB ON OptA.pk = OptB.pk; Here is a workaround to this bug that makes use of a subquery: select * from (SELECT * FROM Main LEFT JOIN OptA USING (pk)) LEFT JOIN OptB USING (pk); Conceivably all LEFT JOIN chains could be transformed into the above form, but that would decrease performance due to the intermediate result set of the subquery. Having it work without the subquery is tricky since sqlite must deduce that the last USING (pk) is equivalent to the first pk in the chain of joined tables, namely Main.pk, and not OptA.pk. Joe Wilson
#e8e8bd 2555 new active 2007 Aug anonymous 2007 Aug 1 1 FTS index without original text Is it possible to build FTS index without storing original text? I want to use fts index without features of snippets etc. I just want to find ID of the record not the content of indexed phrase. I suppose that the table myname_content stores this content. I have tried to update all columns of myname_content and set its values to “xyz” (without one column in which I store ID of the record). After this operation FTS search works good, but unfortunately the table isn’t smaller (I cant’t use vacuum on FTS tables). Is there any other way to have pure text indexes without source level changes?
#f2dcdc 2547 code active 2007 Aug danielk1977 2007 Aug 5 3 Changing db encoding of an attached db can confuse shared cache mode. This is quite obscure, but in shared-cache mode: 1) Open db A, attach empty db B. 2) Using another connection from the same thread, set the encoding of B to be different from that of A. Add some data to B. 3) Using the original connection, access database B. It assumes the encoding of A (and therefore mangling any text data). The correct response is to return an error - "attached databases must use the same text encoding as main database".
#e8e8bd 2329 new active 2007 Apr anonymous 2007 Apr 5 4 add a feature to .dump : partial dumps Hi, I would like to request a feature related to the .dump command introduction: ;-) I have a large database (few GB) and I only remove rows from it. since I forgot to use the pragma auto_vacuum, I am creating using .dump another database that has the triggers and pragmas I needed. okay. so, this is slow. probably mainly because of the millions of inserts it needs to perform. the feature request: partial .dumps - that is, you specify how many rows or how many megabytes to dump. this should add a begin transaction and a commit at the end of each dumpfile, and enumerate them as well. for example: sqlite3 mylarge.db .partialdump --rows=40000 > dumpfile.sql dumping.. please wait 1:: dumping first 40000 rows to file dumpfile.001.sql ....done 2:: dumping second 40000 rows to file dumpfile.002.sql ....done etc.. hope this is obvious enough. if you need more info contact me. I know sqlite tries to be minimal and to the point, but this is a good feature and very handy. (dumping to text and then splitting can take too much space and then impractical) Thanks, Kobi
#f2dcdc 2328 code active 2007 Apr anonymous 2007 Apr 1 1 Makefile sqlite3.c target breakage for C++ This is generated by "make sqlite3.c": #if 0 extern "C" { #endif _2007-Apr-29 06:02:20 by anonymous:_ {linebreak} If the entire sqlite3.c almalgomation is wrapped with: #ifdef __cplusplus extern "C" { #endif ... #ifdef __cplusplus } #endif then it could be compiled with a C++ compiler. Please close this ticket if you did not intend to have this capability.
#e8e8bd 2327 new active 2007 Apr anonymous 2007 Apr anonymous 2 1 "DELETE" operation makes memory rise First declare a standard SQL script: delete from TableName where ....; Then calling repeatedly the sqlite3_exec() to process this "DELETE" operation. Surprisely the memory was rising fast, and couldn't be freed even the program exitted.
#f2dcdc 2322 code active 2007 Apr anonymous 2007 Apr 1 1 Windows error: datetime('2000-10-29 06:00:00','localtime') NY time zone. Windows (from http://sqlite.org/sqlite-3_3_17.zip) SELECT coalesce(datetime('2000-10-29 06:00:00','localtime'),'NULL'); 2000-10-29 02:00:00 Linux (from latest CVS, same TZ) SELECT coalesce(datetime('2000-10-29 06:00:00','localtime'),'NULL'); 2000-10-29 01:00:00 make test errors on Windows only: date-6.2... Expected: [{2000-10-29 01:00:00}] Got: [{2000-10-29 02:00:00}] date-6.3... Expected: [{2000-04-02 01:59:00}] Got: [{2000-04-02 02:59:00}] date-6.6... Expected: [{2000-10-29 07:00:00}] Got: [{2000-10-29 06:00:00}] date-6.7... Expected: [{2000-04-02 06:59:00}] Got: [{2000-04-02 05:59:00}] _2007-Apr-26 23:09:12 by anonymous:_ {linebreak} Confirmed Windows bug on Windows 2000 in NY time zone with Y2K7DST OS patch. ---- _2007-Apr-27 22:03:25 by drh:_ {linebreak} Do I correctly understand the previous remark to say that this is confirmed to be a bug in Windows, not a bug in SQLite? It is identical code in SQLite for both operating systems, so I would certainly suspect that the problem is in windows and not in SQLite. But it would be nice to have confirmation of this before closing the ticket. ---- _2007-Apr-28 03:19:06 by anonymous:_ {linebreak} I meant to write "This erroneous SQLite datetime() output can also be seen on my Windows 2000 machine." Does it work correctly for you under Windows XP or 2000 with the DST patch? OS bug or not, it would be strange to not have the datetime() function correctly on a primary platform. If that were the case, it would be better to #ifdef it out of the Windows compile altogether. ---- _2007-Apr-28 03:59:27 by anonymous:_ {linebreak} This is a bigger mess than I thought. http://groups.google.com/group/perl.perl5.porters/msg/e632557614474014?hl=en& Key phrase: "This API only provides the transition times according to the *current* DST rules. There is no database of historical transition times. That means that localtime() applied to previous years will use the new transition times even for old timestamps." Please don't close this bug. Perhaps some industrious Windows programmer will have a correct solution for it one day. But in the meantime, Windows SQLite users should be aware of it.
#e8e8bd 2326 doc active 2007 Apr anonymous 2007 Apr a.rottmann 5 2 miss one word 'list' in documentation sqlite3: A command-line access program for SQLite The sqlite3 program is able to show the results of a query in eight different formats: "csv", "column", "html", "insert", "line", "tabs", and "tcl". missed one format: "list" it should be: The sqlite3 program is able to show the results of a query in eight different formats: "csv", "column", "html", "insert", "line", "list", "tabs", and "tcl".
#f2dcdc 2320 code active 2007 Apr anonymous 2007 Apr drh 1 1 sqlite3_open(sFN_with_umlaut) Do it in a standard MS Visual Studio Project:
0. CString sFnWithUmlaut = "c:\\long path\\path with umlauts äÄ\\db";
1. call sqlite3_open(sFnWithUmlaut);
2. db cannot be opened, because the transformation functions utf8ToUnicode/unicodeToUtf8 work incorrect
Is there a way to correct this error on win32?
Is there a workaround?
For a solution thanks in advance... _2007-Apr-25 20:21:24 by anonymous:_ {linebreak} The functions work correctly, but you are not using them in the correct way. The parameter to sqlite3_open function should be UTF8 string, but you are passing one that is specific to your code page.
#f2dcdc 2282 code active 2007 Apr anonymous 2007 Apr 3 4 Update on view with no matching trigger does not raise error Attempting to update a view with no triggers properly fails with the error sqlite> update foo set key=key+1; SQL error: cannot modify foo because it is a view However, if a single trigger is added that contains a WHEN clause, then UPDATE statements that do not satisfy that WHEN clause silently succeed without raising any error. sqlite> select 'Before:'; select * from foo; update foo set key=key+1; select 'After:'; select * from foo; Before: 1|42|forty-two|42.0 2|69|sixty-nine|69.0 After: 1|42|forty-two|42.0 2|69|sixty-nine|69.0 _2007-Apr-18 21:50:00 by anonymous:_ {linebreak} Your desired behavior can be accomplished by changing your trigger to: create trigger foo_update instead of update on foo begin select raise(ABORT, 'invalid key') where old.key <> new.key; update foo_backing set num=new.num, str=new.str, float=new.float, dirty=1 where key=new.key; end; ---- _2007-Apr-24 20:26:46 by anonymous:_ {linebreak} I have come up with a sample patch that *partially* fixes this problem -- specifically, it raises an error if any rows affected by an update/delete against a view are not caught by any triggers. It does not handle uncaught inserts, however, because I couldn't quite figure out how to make that work (much of the logic for updates and deletes is almost identical, whereas the code for inserts is quite different.) This patch adds/updates 76 lines across 4 files. I disclaim all copyright to these 76 lines.
#e8e8bd 2313 build active 2007 Apr anonymous 2007 Apr 3 3 readline.h is not properly detected This is actually an old issue i also had with 2.8.15. configure says "checking for readline.h... no", but it really needs to look for readline/readline.h (or both?) this is easily fixed with "--with-readline-inc=-I/path/to/include" although the actual syntax for this is a bit unusual/unintuitive) but such "fix" should not be needed as i had these in my environment: CPPFLAGS="-I/path/to/include" CFLAGS="-I/path/to/include" (The library was found by configure, thanks to my LDFLAGS environment setting which is similar to the above.)
#f2dcdc 2303 code active 2007 Apr anonymous 2007 Apr 1 1 Encrypted databases: No page cache due to problem in pagerSharedLock With codec encryption enabled, =pagerSharedLock= always invalidates the page cache, even if no changes have occured since the cache was last valid and it would be safe to retain the cached pages. This in fact disables the newly improved page cache for encrypted databases and slows down performance. The problem occurs because =pagerSharedLock= reads the change counter directly from the database file without codec decryption. Since the codec always encrypts full pages, the 4 bytes at offset 24 are read as encrypted data and do not match =Pager->iChangeCount=. To solve, codecs would be required to store the 4 bytes at offset 24 of page 1 unencrypted. This would, however, render those 4 bytes vulnerable to attacks. It would therefore be more secure if =pagerSharedLock= could decrypt page one prior to extracting the change counter. Check-in [3844] does not fix the problem to reset the cache if the codec is changed but the database file is not. The following procedure for opening an encrypted database no longer works with the improved page cache: *: Open an encrypted database. Do not set a key yet as we (pretend to) believe that the database is not encrypted. *: Access the DB for reading. This returns =SQLITE_NOTADB=, so we conclude that the DB is encrypted. *: Attach the proper codec using =sqlite3CodecAttach=. *: Access the DB again. *Problem:* This still returns =SQLITE_NOTADB= because the old page cache is still in use and is not reloaded. The codec change is not detected because the pager checks the unencrypted DB file instead of the decrypted page. The file of course did not change, but the decrypted page did because of the new codec. The cache should therefore be cleared. A workaround would be possible if =sqlite3CodecAttach= could reset the page cache. Unfortunately, the method to do so (=pager_reset=) is static to pager.c. It seems that there once was an external function =sqlite3PagerReset= (it is still defined in pager.h), but its implementation is unfortunately no longer available. Could this be fixed in a way that =pagerSharedLock= checks the decrypted page 1 to see if the database has been modified or, alternatively, by reverting the static =pager_reset= back to the external =sqlite3PagerReset=?
#e8e8bd 2308 build active 2007 Apr anonymous 2007 Apr 4 3 make sqlite3.c recreates sqlite3.c even though nothing changed When building the amalgamized sqlite3.c source file, make will recreate the sqlite3.c source file each time it's run. When using this as part of a larger build process, this is annoying, since it will result in unnecessary compilations. The fix is to rename the makefile target target_source to tsrc to make sure make will be able to properly detect the dependencies. Below is a patch for Makefile.in that fixes this: --- Makefile.in 19 Apr 2007 10:20:59 -0000 1.167 +++ Makefile.in 19 Apr 2007 11:08:50 -0000 @@ -296,14 +296,14 @@ # files are automatically generated. This target takes care of # all that automatic generation. # -target_source: $(SRC) parse.c opcodes.c keywordhash.h $(VDBEHDR) +tsrc: $(SRC) parse.c opcodes.c keywordhash.h $(VDBEHDR) rm -rf tsrc mkdir -p tsrc cp $(SRC) $(VDBEHDR) tsrc rm tsrc/sqlite.h.in tsrc/parse.y cp parse.c opcodes.c keywordhash.h tsrc -sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl +sqlite3.c: tsrc $(TOP)/tool/mksqlite3c.tcl tclsh $(TOP)/tool/mksqlite3c.tcl # Rules to build the LEMON compiler generator _2007-Apr-20 06:01:23 by anonymous:_ {linebreak} Make does not deal well with directories as dependencies (because their last modification time doesn't mean what Make thinks it means). It would be much better to use a stamp file.
#f2dcdc 2310 code active 2007 Apr anonymous 2007 Apr anonymous 4 4 Problem installing on AIX 5.3, ML5 after successful compile with xlc After performing the suggested edits to the Makefile (from Tom Poindexter 2003-12-17): edit Makefile, change the TCC macro: TCC = xlc -q32 -qlonglong -D_LARGE_FILE=1 -D_LARGE_FILES=1 -DUSE_TCL_STUBS=1 -O2 -DOS_UNIX=1 -DOS_WIN=0 -DHAVE_USLEEP=1 -I. -I${TOP}/src Version 3.3.15 compiled perfectly. However, a make install gave me this: : /data/bld --> make install tclsh ../sqlite-3.3.15/tclinstaller.tcl 3.3 couldn't open ".libs/libtclsqlite3.so": no such file or directory while executing "open $LIBFILE" invoked from within "set in [open $LIBFILE]" (file "../sqlite-3.3.15/tclinstaller.tcl" line 23) make: 1254-004 The error code from the last command is 1. I had to edit line 8 of ../sqlite-3-3-15/tclinstaller.tcl by adding a ".0" to the end. Then it installed perfectly. _2007-Apr-19 20:47:24 by anonymous:_ {linebreak} Haven't used AIX or xLC for 15 years - does IBM still make UNIX machines?
#e8e8bd 2304 new active 2007 Apr anonymous 2007 Apr 1 1 resolve "databas is locked" problem under DEFERRED transaction under DEFERRED transaction, if there are multiple thread immediate execute writing operation after BEGIN statement,{linebreak} sqlite will direct kick in "database is locked" exception, but if you execute some reading operation before writing operation,{linebreak} it works well, could BEGIN statement acquire a shared lock and solve this problem? {quote: relative mail archive http://www.mail-archive.com/sqlite-users@sqlite.org/msg21768.html }
#e8e8bd 2302 build active 2007 Apr anonymous 2007 Apr anonymous 4 4 sqlite3 does not honor configure --disable-threads anymore In a non-threaded TCL build, the TEA configuration option --disable-threads is no longer honored. In version 3.3.12 this used to work: (test) 49 % packa req sqlite3 couldn't load file "/usr/local/tcl/8.5a5-1/lib/sqlite3.3.15/libsqlite3.3.15.so": /usr/local/tcl/8.5a5-1/lib/sqlite3.3.15/libsqlite3.3.15.so: undefined symbol: pthread_create (test) 50 % packa req -exact sqlite3 3.3.12 3.3.12 In new file tclsqlite3.c, line 11734, threading is hard-coded with #define THREADSAFE 1 A workaround for non-threaded builds is to set this manually to #define THREADSAFE 0 _2007-Apr-15 17:03:07 by anonymous:_ {linebreak} I encountered the same problem and I agree that this change is problematic and should be reverted. ---- _2007-Apr-15 18:03:05 by drh:_ {linebreak} Why is it such a problem that the library is threadsafe? Just because it is threadsafe does not mean you are required to use threads, or anything like that. Everything continues to work normally in a single threaded application. There is no measurable performance impact. Why is it so important to you that the threading mutexes not be enabled? ---- _2007-Apr-15 19:34:11 by anonymous:_ {linebreak} In a shared library setting it's not such a big deal, but in a purely static binary it can pull in a fair bit of unwanted thread library code. Also, some embedded UNIX-like targets lack a pthreads implementation. The autoconf default can be threadsafe instead of non-threadsafe. It would be nice if it respected the autoconf flag as it did before.
#e8e8bd 2299 build active 2007 Apr anonymous 2007 Apr 1 1 Cannot compile sqlite-3.3.15 on linux rhel My platform: Linux 2.6.9-42.0.3.ELsmp #1 SMP Mon Sep 25 17:28:02 EDT 2006 i686 i686 i386 GNU/Linux My gcc version: gcc (GCC) 3.4.6 20060404 (Red Hat 3.4.6-3) My problem: I'm (ultimately) trying to get svntrac built and installed on this machine, but cannot compile the sqlite dependency. If I follow the documented build procedure, namely: 1) Create sibling directory to source directory 2) Run ../sqlite-3.3.15/configure from build directory 3) Run make from build directory I get build errors, mostly: undefined reference to `__getreent' _2007-Apr-13 17:20:23 by anonymous:_ {linebreak} gcc -g -O2 -o lemon ../sqlite-3.3.15/tool/lemon.c /tmp/ccOClNK1.o(.text+0x7c): In function `Action_new': ../sqlite-3.3.15/tool/lemon.c:344: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x153): In function `acttab_alloc': ../sqlite-3.3.15/tool/lemon.c:440: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x1ed): In function `acttab_action': ../sqlite-3.3.15/tool/lemon.c:455: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x223): In function `myassert': ../sqlite-3.3.15/tool/lemon.c:567: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x4e4): In function `acttab_insert': ../sqlite-3.3.15/tool/lemon.c:497: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x68f):../sqlite-3.3.15/tool/lemon.c:1362: more undefined references to `__getreent' follow /tmp/ccOClNK1.o(.text+0x280f): In function `tplt_xfer': ../sqlite-3.3.15/tool/lemon.c:2980: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x299a): In function `tplt_open': ../sqlite-3.3.15/tool/lemon.c:3026: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x2a54): In function `tplt_linedir': ../sqlite-3.3.15/tool/lemon.c:3042: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x2a5d):../sqlite-3.3.15/tool/lemon.c:3042: undefined reference to `__swbuf_r' /tmp/ccOClNK1.o(.text+0x2a8b):../sqlite-3.3.15/tool/lemon.c:3041: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x2a93):../sqlite-3.3.15/tool/lemon.c:3041: undefined reference to `__swbuf_r' /tmp/ccOClNK1.o(.text+0x2b68): In function `tplt_print': ../sqlite-3.3.15/tool/lemon.c:3061: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x2b71):../sqlite-3.3.15/tool/lemon.c:3061: undefined reference to `__swbuf_r' /tmp/ccOClNK1.o(.text+0x2ba6):../sqlite-3.3.15/tool/lemon.c:3065: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x2bae):../sqlite-3.3.15/tool/lemon.c:3065: undefined reference to `__swbuf_r' /tmp/ccOClNK1.o(.text+0x3196): In function `print_stack_union': ../sqlite-3.3.15/tool/lemon.c:3366: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x3319):../sqlite-3.3.15/tool/lemon.c:3387: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x3ae0): In function `translate_code': ../sqlite-3.3.15/tool/lemon.c:3207: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x447c): In function `ReportTable': ../sqlite-3.3.15/tool/lemon.c:3534: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x44a3):../sqlite-3.3.15/tool/lemon.c:3535: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x55d4):../sqlite-3.3.15/tool/lemon.c:3575: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x5800): In function `Symbol_new': ../sqlite-3.3.15/tool/lemon.c:4259: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x5ba3): In function `Parse': ../sqlite-3.3.15/tool/lemon.c:2500: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x5c2c):../sqlite-3.3.15/tool/lemon.c:2407: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x619b):../sqlite-3.3.15/tool/lemon.c:1997: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x61eb):../sqlite-3.3.15/tool/lemon.c:2201: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x628e):../sqlite-3.3.15/tool/lemon.c:2027: more undefined references to `__ctype_ptr' follow /tmp/ccOClNK1.o(.text+0x65b8): In function `Parse': ../sqlite-3.3.15/tool/lemon.c:2439: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x6603):../sqlite-3.3.15/tool/lemon.c:2415: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x6628):../sqlite-3.3.15/tool/lemon.c:2415: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x6baa):../sqlite-3.3.15/tool/lemon.c:2164: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x7bad): In function `main': ../sqlite-3.3.15/tool/lemon.c:1419: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x7c65):../sqlite-3.3.15/tool/lemon.c:1445: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x7ca0):../sqlite-3.3.15/tool/lemon.c:1425: undefined reference to `__getreent' /tmp/ccOClNK1.o(.text+0x7d4c):../sqlite-3.3.15/tool/lemon.c:1457: undefined reference to `__ctype_ptr' /tmp/ccOClNK1.o(.text+0x7e4b):../sqlite-3.3.15/tool/lemon.c:1514: undefined reference to `__getreent' collect2: ld returned 1 exit status make: *** [lemon] Error 1 ---- _2007-Apr-15 13:51:08 by anonymous:_ {linebreak} I am not real familiar with Red Hat, but that looks to me like a case of your C library headers being out of sync with the library proper. There are several different ways that could happen; I would guess that the most probable is that you installed a version of GCC by hand, it copied some of the C library headers to a private directory (GCC tends to do this when you build it from source, unfortunately) and then you installed a new version of the C library from packages. ---- _2007-Apr-15 13:54:33 by anonymous:_ {linebreak} To be clearer, I think this is a local installation problem, not a bug in SQLite.
#e8e8bd 2301 build active 2007 Apr anonymous 2007 Apr 1 1 Latest cvs 3.3.15 fails lock4-1.3 test export CFLAGS=-O3 ./configure --prefix=/usr/local make make test produces a single failure... lock4-1.2... Ok lock4-1.3... Error: database is locked lock4-999.1... Ok _2007-Apr-15 02:47:15 by anonymous:_ {linebreak} which OS? ---- _2007-Apr-15 11:31:46 by drh:_ {linebreak} To amplify the previous comment, I observe that the test works fine for me on both Linux (SuSE 10.1) and Mac OS-X x86.
#f2dcdc 2297 code active 2007 Apr anonymous 2007 Apr drh 3 3 uninitialized var (with patch) Warnings with amalgamation and NDEBUG. _2007-Apr-12 21:21:29 by drh:_ {linebreak} I looked at the suggested changes and I didn't find any cases where it really was possible to use an uninitialized variable, at least not in a harmful way. Did I overlook something, or is this ticket just a request to silence compiler warnings? ---- _2007-Apr-13 00:08:36 by anonymous:_ {linebreak} vdbe.c with n, n64, payloadSize and payloadSize64{linebreak} sqlite3BtreeKeySize,sqlite3BtreeLast return are not checked. You can not be sure the pointer passed as second argument will be init depending on the return of restoreOrClearCursorPosition (btree.c).{linebreak} page.c with ro{linebreak} Compiled with -DNDEBUG, the return of sqlite3OsOpenReadWrite is not checked before making a move with 'ro'. For sContext.zAuthContext in delete.c/update.c, you're the one. gcc (compiler in general) warnings are quite usefull, i don't think it's a good idea to ignore them and accumulate danger. Perhaps one day, one line in a subroutine will modify some tricky behavior and (re)raise a previous checked warning, making it completely normal and 'under control'.
#e8e8bd 2256 new active 2007 Feb anonymous 2007 Apr drh 5 1 Add POSITION() function (SQL-92 standard) Hi! Just voting for POSITION() function. It's mighty useful when you need to modify a field in real time with SUBSTR() function (for instance, when 'start' parameter of SUBSTR() function needs to be variable according to POSITION() function). Many thanks! Regards. _2007-Mar-23 14:10:14 by anonymous:_ {linebreak} Yes, this is a function which I often suffered from not being available.
#f2dcdc 2288 code active 2007 Apr anonymous 2007 Apr 4 2 FTS does not support REPLACE Simple to replicate: CREATE VIRTUAL TABLE fts_table USING fts2(text); INSERT OR REPLACE INTO fts_table (rowid, text) VALUES (1, 'text1'); INSERT OR REPLACE INTO fts_table (rowid, text) VALUES (1, 'text2'); The first insert succeeds, the second fails. Also occurs with fts1. _2007-Apr-10 15:27:10 by anonymous:_ {linebreak} http://www.mail-archive.com/sqlite-users%40sqlite.org/msg23865.html
#e8e8bd 2283 warn active 2007 Apr anonymous 2007 Apr 1 1 Compile warning by VCToolkit2003 sqlite3.c D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5494) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5495) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5600) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5601) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5604) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5605) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5606) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5607) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5622) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5623) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5625) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5668) : warning C4244: 'initializing' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5674) : warning C4244: '=' : conversion from 'double' to 'time_t', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5785) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5791) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5883) : warning C4244: '+=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5889) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(5895) : warning C4244: '+=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(6104) : warning C4244: '=' : conversion from 'double' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(9622) : warning C4244: '=' : conversion from 'u64' to 'unsigned char', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(9625) : warning C4244: '=' : conversion from 'u64' to 'unsigned char', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(9632) : warning C4244: '=' : conversion from 'u64' to 'u8', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(16030) : warning C4244: 'initializing' : conversion from 'i64' to 'LONG', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(16031) : warning C4244: 'initializing' : conversion from 'i64' to 'LONG', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(16071) : warning C4244: 'initializing' : conversion from 'i64' to 'LONG', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(16075) : warning C4244: 'function' : conversion from 'i64' to 'LONG', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(17312) : warning C4018: '<' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(17903) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(17908) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18120) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18128) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18146) : warning C4018: '<' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18264) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18280) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18668) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18674) : warning C4244: 'return' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(18771) : warning C4018: '<=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(19202) : warning C4018: '<=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(20253) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(20470) : warning C4018: '<=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(21671) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(21673) : warning C4244: 'function' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(23335) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(23335) : warning C4018: '<=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(23977) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(23983) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24117) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24124) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24437) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24437) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24439) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24441) : warning C4244: 'function' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24442) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24442) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24815) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(24976) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(25046) : warning C4244: '+=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(25048) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(26605) : warning C4018: '>' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(27294) : warning C4244: '+=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(27821) : warning C4101: 'rc' : unreferenced local variable D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(28087) : warning C4244: '=' : conversion from 'double' to 'i64', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(28325) : warning C4244: '=' : conversion from 'const i64' to 'double', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(28330) : warning C4244: '=' : conversion from 'const i64' to 'double', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30422) : warning C4244: 'return' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30479) : warning C4244: '=' : conversion from 'u64' to 'unsigned char', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30644) : warning C4018: '>=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30646) : warning C4018: '>=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30671) : warning C4018: '<' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30673) : warning C4018: '<' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30717) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30756) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(30860) : warning C4244: 'return' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(31019) : warning C4244: '=' : conversion from 'double' to 'i64', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(31056) : warning C4244: '=' : conversion from 'double' to 'u64', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(33652) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(33718) : warning C4018: '>=' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(33736) : warning C4018: '<' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(33751) : warning C4018: '<' : signed/unsigned mismatch D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(34173) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(34177) : warning C4244: '=' : conversion from 'i64' to 'u8', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(34279) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(34297) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(35189) : warning C4244: '=' : conversion from 'i64' to 'u32', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(35869) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(35875) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(36389) : warning C4244: '=' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(36403) : warning C4244: 'function' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(46396) : warning C4244: 'initializing' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(46397) : warning C4244: 'initializing' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(46398) : warning C4244: 'initializing' : conversion from 'i64' to 'int', possible loss of data D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(56943) : warning C4005: 'ARRAYSIZE' : macro redefinition D:\Ulti\SDK\Include\WinNT.h(950) : see previous definition of 'ARRAYSIZE' D:\Ulti\MyApps\USQLite3\SQLite\sqlite3.c(58192) : warning C4244: '=' : conversion from 'u16' to 'unsigned char', possible loss of data USQLite3: 2 file(s) built in (0:04.04), 2022 msecs / file, duration = 4586 msecs
#e8e8bd 2006 event active 2006 Sep anonymous 2006 Sep 1 1 Strange data in a table. When dumping a database file, this is what I found: CREATE TABLE TopSites ( XID INTEGER REFERENCES X(ID), YID INTEGER REFERENCES Y(ID), URLID INTEGER REFERENCES TopSitesURLs(ID)); INSERT INTO "TopSites" VALUES(-761955577, 5, 1322);{linebreak} INSERT INTO "TopSites" VALUES(-761955577, 5, 1120);{linebreak} INSERT INTO "TopSites" VALUES(-761955577, 5, 1323);{linebreak} INSERT INTO "TopSites" VALUES(-761955577, 5, 1324);{linebreak} ....................................................... INSERT INTO "TopSites" VALUES(-761955577, 5, 1323);{linebreak} INSERT INTO "TopSites" VALUES(-761955577, 5, 1324);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.bnimanningham.com', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.wellnesscareoncollins.com.au/Chiropractic-Articles.html', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.healthyrisepharmacy.com', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.alextechmelb.com/testimonials.html', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.ecca.com.au/melbourne-contactus.html', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.naturopathicwellness.com.au/additionaltherapies.htm', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.rrr.org.au/sponsors.php', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.caavic.asn.au/html/s02_article/show_article.asp?id=507&topic_id=-1&category_id=-1', NULL); INSERT INTO "TopSites" VALUES(NULL, 'http://www.cosmeticchoice.com.au/healing_nutrition.php?PHPSESSID=&PHPSESSID=d85928253b38f1bf88200022e7a93218', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.coca.com.au/vic.htm', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.embracechiropractic.com.au', NULL);{linebreak} INSERT INTO "TopSites" VALUES(NULL, 'http://www.cooperchiro.com', NULL);{linebreak} The database was created on: os: Mac OS X 10.4.6 jre: 1.5.0_06-64 sqlite: 3.3.4 The code for inserting into the database is: public static String GetTopSitesInsert(int aX, int aY, int aURLID){linebreak} {{linebreak} return "INSERT OR ROLLBACK INTO TopSites (XID, YID, URLID) VALUES {linebreak}(" + aX + ", " + aY + ", " + aURLID + ");";{linebreak} }{linebreak} I think that the last lines are from another table, or from another insert, as the java int could have never been a value like: http://www.bnimanningham.com When trying to delete some rows from this table, sqlite threw "malformed database" exception and the java virtual machine crashed. _2006-Sep-29 12:23:31 by anonymous:_ {linebreak} This is duplicate of #2005 ---- _2006-Sep-29 14:03:50 by drh:_ {linebreak} I'm thinking this and #2005 represent a bug in whatever Java bindings the reporter is using.
#e8e8bd 2005 event active 2006 Sep anonymous 2006 Sep 1 1 Multiple rows with the same primary key, and null values in "not null" This is what I have found when dumping a database file: CREATE TABLE TopSitesURLs ( ID INTEGER PRIMARY KEY, URLText TEXT NOT NULL ); INSERT INTO "TopSitesURLs" VALUES(1, 'http://www.backinline.com.au');{linebreak} INSERT INTO "TopSitesURLs" VALUES(2, 'http://www.wellnesscareoncollins.com.au');{linebreak} INSERT INTO "TopSitesURLs" VALUES(3, 'http://www.oakleighdental.com.au/chirodontics.php');{linebreak} INSERT INTO "TopSitesURLs" VALUES(4, 'http://bacinactionchiropractic.com');{linebreak} INSERT INTO "TopSitesURLs" VALUES(5, 'http://melbourne.zpages.com.au/chiropractors');{linebreak} INSERT INTO "TopSitesURLs" VALUES(6, 'http://myname.chiropractic.com.au');{linebreak} INSERT INTO "TopSitesURLs" VALUES(7, 'http://www.melbournechiropractor.com');{linebreak} INSERT INTO "TopSitesURLs" VALUES(8, 'http://www.chiroweb.net/us/fl_melbourne.html');{linebreak} INSERT INTO "TopSitesURLs" VALUES(9, 'http://www.chiropractor.net.au/aridiskin.htm');{linebreak} INSERT INTO "TopSitesURLs" VALUES(10, 'http://www.melbournechiropractic.com.au');{linebreak} INSERT INTO "TopSitesURLs" VALUES(11, 'http://www.melbournechiropractor.com/index.php?page=privacy.php&pageID=-1');{linebreak} INSERT INTO "TopSitesURLs" VALUES(12, 'http://www.vitaminstoday.com.au/chiropractor/index.php?page=grid');{linebreak} INSERT INTO "TopSitesURLs" VALUES(13, 'http://www.goodechiro.com/index.asp');{linebreak} INSERT INTO "TopSitesURLs" VALUES(14, 'http://www.goodechiro.com/FirstVisit.asp');{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, 'http://www.usenature.com/chirodirectory.htm');{linebreak} INSERT INTO "TopSitesURLs" VALUES(16, 'http://www.melbournemeditationcentre.com.au/courses/teacher.htm');{linebreak} INSERT INTO "TopSitesURLs" VALUES(17, 'http://www.yogatree.com.au/Therapies.htm');{linebreak} ................................................................................... INSERT INTO "TopSitesURLs" VALUES(6259, 'http://www.yarravillehealth.com.au/osteopath-melbourne.html');{linebreak} INSERT INTO "TopSitesURLs" VALUES(6260, 'http://www.worldveganday.org.au/forum/viewtopic.php?p=4543&sid=465ea5c2e7452f6fd23470488e277781');{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(14, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} INSERT INTO "TopSitesURLs" VALUES(15, NULL);{linebreak} The database was created on: os: Mac OS X 10.4.6 jre: 1.5.0_06-64 sqlite: 3.3.4 The primary key 15 is duplicated, and the "not null" field is null. _2006-Sep-29 12:03:17 by anonymous:_ {linebreak} What tool (and parameters) did you use to dump the database? ---- _2006-Sep-29 13:13:28 by drh:_ {linebreak} When I run the SQL, I get lots of errors. And the resulting database does not contain any duplicate primary keys or NULLs in NOT NULL columns. Can you attach the database that contains duplicate primary keys and NULLs in NOT NULL columns to this ticket so that I can see it? jre==Java Runtime Engine? Are you using some kind of java binding to SQLite? If so, which one? Is SQLite in a separate DLL, or is your Java binding using a statically linked (and possibly modified and broken) version of SQLite? ---- _2006-Oct-03 15:15:54 by anonymous:_ {linebreak} I am using a java wrapper for sqlite: http://www.ch-werner.de/javasqlite/overview-summary.html I got the same problem again: INSERT INTO "TopSitesURLs" VALUES(13023, 'http://costaricaretirementvacationproperties.com/index.php?op=show_listing&ShowOption=Condo Resales&option=cat'); INSERT INTO "TopSitesURLs" VALUES(13024, 'http://www.hot-tropics.com/costa-rica-links.html'); INSERT INTO "TopSitesURLs" VALUES(13025, 'http://southpacificrealestateservices.com/index.php?PHPSESSID=6b7a257fad5cbd886f09526a2cd59ed8'); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(9, NULL); INSERT INTO "TopSitesURLs" VALUES(13041, 'http://www.livingabroadincostarica.com'); INSERT INTO "TopSitesURLs" VALUES(13042, 'http://limitededitionsre.com/blog.html'); INSERT INTO "TopSitesURLs" VALUES(13043, 'http://www.officecenter.nosaranet.com/property.html'); You can see the index 13025, then up to 13041 you can see only 9. How do I upload a database? ---- _2006-Oct-03 15:17:33 by anonymous:_ {linebreak} I have dumped the database with sqlite3.exe in command line: sqlite3.exe file.db .dump > file.sql ---- _2006-Oct-03 17:44:00 by anonymous:_ How do I upload a database? Use the _Attach_ link near the top right. Note that attachment size is currently limited to 100KB. ---- _2006-Oct-04 06:38:23 by anonymous:_ {linebreak} An integrity check on this database looks like this:{linebreak} C:\Documents and Settings\stefan matei\Desktop>sqlite3 project.db{linebreak} SQLite version 3.3.4{linebreak} Enter ".help" for instructions{linebreak} sqlite> PRAGMA integrity_check;{linebreak} *** in database main ***{linebreak} On tree page 59 cell 10: 2nd reference to page 1077{linebreak} On tree page 59 cell 10: Child page depth differs{linebreak} On tree page 59 cell 11: Child page depth differs{linebreak} On page 871 at right child: 2nd reference to page 1078{linebreak} sqlite> .quit{linebreak} When can this happen? Is there a fix for this (integrity fix or something)? I ask this because the database is perfectly readable. I assume that a tool can be done to check the tables and remove all the data that are not complying to the table definition.
#e8e8bd 2004 warn active 2006 Sep anonymous 2006 Sep 1 4 vtab.c:142: warning: pointer targets in initialization differ in signe const unsigned char *z = pParse->sArg.z; fix this warning but then this warning appears: vtab.c:145: warning: pointer targets in passing argument 1 of 'sqlite3StrNDup' differ in signedness which can be fixed with: addModuleArgument(pParse->pNewTable, sqliteStrNDup((char *)z, n));
#f2dcdc 1822 code active 2006 May anonymous 2006 Sep 3 3 Table Alias together with Subquery seems not to work proper SELECT * FROM auth AS a LEFT JOIN (SELECT tm.team FROM teammbs AS tm) AS tr ON a.ateam=tr.team; Error message: No such colum tr.team But if I run the sub-query itself, it works fine. Of course, this example can be expressed different, so no subquery required. But the complete expression looks like this: SELECT a.auth, a.avalue FROM auth a LEFT JOIN (SELECT tm.member, tm.team FROM teammbs tm, team t WHERE tm.team=t.teamid AND (t._state<64 or (t._state>120 AND t._state<192)) AND (tm._state<64 or (tm._state>120 AND tm._state<192))) AS tr ON a.ateam=tr.team WHERE (a._state<64 or (a._state>120 AND a._state<192)) AND (a.auser='test' OR tr.member='test') ORDER BY a.auth; It works fine with MySQL 5, and brings the same error on SQLite 3: No such column tr.team. Any idea?
#e8e8bd 1998 build active 2006 Sep anonymous 2006 Sep 2 3 prefix option to configure ignored in tclinstaller.tcl schliep@karlin:~/tmp/sqlite-3.3.7> configure --prefix=/some/dir ... schliep@karlin:~/tmp/sqlite-3.3.7> make install tclsh ./tclinstaller.tcl 3.3 can't create directory "/usr/lib/tcl8.4/sqlite3": permission denied After commenting out all the stuff in ./tclinstaller.tcl things work
#e8e8bd 1996 new active 2006 Sep anonymous Unknown 2006 Sep drh 2 3 Data type CHAR An interface API for CHAR datatypes would really be helpful. For example, often sql tables contain CHAR(1) datatypes or CHAR(10) types. There should be some mechanism for handling these types natively. ie: sqlite3_bind_char sqlite3_column_char sqlite3_result_char sqlite3_value_char This will allow a more native implementation for CHAR datatypes, As it is, a single CHAR(1) must be first converted into a string (char[2]) and copied with a terminator. for CHAR types, not \000 termination is required. It is implied with the lenght. Thanks...
#f2dcdc 1994 code active 2006 Sep anonymous Parser 2006 Sep 1 3 Columns from nested joins aren't properly propagated When using this query: _:SELECT * FROM ROLE_ATTRIBUTE INNER JOIN (ROLE INNER JOIN PERSON ON ROLE.PERSON_ID=PERSON.ID) ON ROLE_ATTRIBUTE.PERSON_ID=ROLE.PERSON_ID AND ROLE_ATTRIBUTE.PROJECT_ID=ROLE.PROJECT_ID WHERE ((PERSON.FIRSTNAME = "bob")); the parser fails with an error "no such column: ROLE.PROJECT_ID". It seems that doing an inner join with more than one subexpression doesn't work. _2006-Sep-25 22:41:52 by anonymous:_ {linebreak} Your query will run without the brackets. SELECT * FROM PERSON P INNER JOIN ROLE_ATTRIBUTE RA ON P.ID = RA.PERSON_ID INNER JOIN ROLE R ON RA.PROJECT_ID = R.PROJECT_ID AND P.ID = R.PERSON_ID WHERE P.FIRSTNAME = 'bob'; ---- _2006-Sep-25 23:03:28 by navaraf:_ {linebreak} Hm, you're right. So actually the thing SQLite chokes on is the parenthesis syntax as JOIN parameter. I can try to modify the generator to produce the expanded form, but since the same code is used for MSSQL, MySQL and Oracle I still think it would be handy to allow it in SQLite too. Also it's not my code that generates these horrible expressions and I'd rather try to avoid modifying it. ---- _2006-Sep-26 09:59:13 by anonymous:_ {linebreak} I changed the title to correctly describe the problem. Also I found another thread on the mailing list that describes exactly the same problem: http://marc.10east.com/?t=115378699000001 ---- _2006-Sep-26 11:42:38 by navaraf:_ {linebreak} I believe the "lookupName" function in src/expr.c should do recursion for ephemeral tables found in the pSrcList (at least those that were created as subqueries in the FROM clause of the SELECT statement).
#f2dcdc 1990 code active 2006 Sep anonymous 2006 Sep 1 1 sqlite3_close doesn't release always the file handle I *think* that sqlite3_close behave strangly. I use version 3.3.7 on Linux (Fedora Core 5). What I do is to open a database, and start a transaction in it. Then, without ending the transaction, open again the database and simply close it. I found out, that the inner sqlite3_close return 0 (SQLITE_OK), but the file handle is not released. So if I do it too many times, I run out of file handles. You are free to ask why I open and close that many times the same database while it is already in transaction. This is my mistake. Actually, it is already fixed. But I still wonder - shouldn't the sqlite3_close return other thing then just SQLITE_OK? Especially if the file handle is not released? If it did, I would find my mistake much earlier. Here is my script that demonstrate it (you can use /usr/sbin/lsof in linux to see how many times the file is opened): #include int main(int argc, char **argv) { sqlite3* db; sqlite3* db_inner; int rc; int i; system("rm -f open_many_test.db"); rc = sqlite3_open("open_many_test.db", &db); sqlite3_exec(db, "begin", 0, 0, 0); sqlite3_stmt *pStmt; rc = sqlite3_prepare(db, "create table a (id varchar)", -1, &pStmt, 0); rc = sqlite3_step(pStmt); sqlite3_finalize(pStmt); rc = sqlite3_prepare(db, "insert into a values('bla')", -1, &pStmt, 0); rc = sqlite3_step(pStmt); sqlite3_finalize(pStmt); for (i = 0; i < 10000; i++) { rc = sqlite3_open("open_many_test.db", &db_inner); printf("sqlite3_open gives %d\n", rc); rc = sqlite3_close(db_inner); printf("sqlite3_close gives %d\n", rc); } sqlite3_exec(db, "commit", 0, 0, 0); rc = sqlite3_close(db); } _2006-Sep-23 15:29:46 by drh:_ {linebreak} This behavior is intentional. It is there to work around bugs in the design of posix advistory locks. See ticket #561 and check-in [1171]. Under posix, if you have the same file open multiple times and you close one of the file descriptors, all locks on that file for all file descriptors are cleared. To prevent this from occurring, SQLite defers closing file descriptors until all locks on the file have been released. One possible work-around would be to reuse file descriptors that waiting to be closed for the next open, rather than creating a new file descriptor. ---- _2006-Sep-23 15:35:21 by anonymous:_ {linebreak} The inner call should to sqlite3_open() should simply fail in that case, rather than set up a condition where by a file descriptor is leaked (which no one wants). This is unfortunate because sqlite3_open()'s behavior would not be uniform across platforms. ---- _2006-Sep-23 16:43:32 by anonymous:_ {linebreak} SQLite should do a lookup via stat()'s st_dev/st_ino fields prior to open() and if found to be the same as an already opened database file, it should use the same (refcounted) file descriptor, eliminating the need for open() in this case. ...upon reflection, having two sqlite connections using the same file descriptor would be a bad thing. stat() could be used to decide if a fd pending close() is recyclable, though. ---- _2006-Sep-23 18:17:34 by drh:_ {linebreak} Two points: 1: SQLite does not and has never leaked file descriptors. All file descriptors are eventually closed. The close is merely deferred until the pending transaction COMMITs. 2: I will be taking a very caution and careful approach toward resolving this issue. The issue itself is minor (it has only just now been reported but the behavior has been there for 3 years) but the consequences of getting the fix wrong are severe (database corruption.) And there are abundant opportunities for getting the fix wrong.
#f2dcdc 1983 code active 2006 Sep anonymous 2006 Sep 2 2 I/O Error at a size of 4GB and auto_vacuum=1 when i'm building a database with auto_vacuum=1 and page_size=8192, i get an I/O error at a size of about 4GB. All tables are still readable but then it isn't possible to insert any more data. The table is filled with a column of BLOBs and some columns with numbers. I use the 3.3.7 binary with Windows 2000 Server.
#f2dcdc 1980 code active 2006 Sep drh 2006 Sep 1 1 Initializing FTS1 twice causes it to fail. If you try to load the shared module twice, it causes the module to no longer work.
#e8e8bd 1975 new active 2006 Sep anonymous 2006 Sep 5 4 Request for sqlite3_table_column_metadata16 It would be nice to have a sqlite3_table_column_metadata16() function as an UTF-16 version of the existing sqlite3_table_column_metadata().
#f2dcdc 1972 code active 2006 Sep anonymous 2006 Sep 2 4 segfault on empty query SQLite 2.8.17 used in latest versions of PHP segfaults with empty query (i.e. " ", 1 whitespace). PHP reproduce code: query(" ")); ?> GDB backrace: Program received signal SIGSEGV, Segmentation fault. [Switching to Thread 1077220512 (LWP 3909)] 0x0814d227 in sqlite_step (pVm=0x0, pN=0x40364218, pazValue=0x40364210, pazColName=0x40364214) at /local/dev/php-src_5_2/ext/sqlite/libsqlite/src/vdbe.c:117 117 if( p->magic!=VDBE_MAGIC_RUN ){ (gdb) bt #0 0x0814d227 in sqlite_step (pVm=0x0, pN=0x40364218, pazValue=0x40364210, pazColName=0x40364214) at /local/dev/php-src_5_2/ext/sqlite/libsqlite/src/vdbe.c:117 #1 0x0812556a in pdo_sqlite2_stmt_execute (stmt=0x40364094) at /local/dev/php-src_5_2/ext/sqlite/pdo_sqlite2.c:102 #2 0x080bf4d5 in zim_PDO_query (ht=1, return_value=0x40363110, return_value_ptr=0x0, this_ptr=0x40363178, return_value_used=1) at /local/dev/php-src_5_2/ext/pdo/pdo_dbh.c:1033 #3 0x0824c1d6 in zend_do_fcall_common_helper_SPEC (execute_data=0xbfffca90) at /local/dev/php-src_5_2/Zend/zend_vm_execute.h:200 #4 0x0824c722 in ZEND_DO_FCALL_BY_NAME_SPEC_HANDLER (execute_data=0xbfffca90) at /local/dev/php-src_5_2/Zend/zend_vm_execute.h:322 #5 0x0824bde9 in execute (op_array=0x403637e4) at /local/dev/php-src_5_2/Zend/zend_vm_execute.h:92 #6 0x0822e66a in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /local/dev/php-src_5_2/Zend/zend.c:1095 #7 0x081e7993 in php_execute_script (primary_file=0xbfffef00) at /local/dev/php-src_5_2/main/main.c:1759 #8 0x082933de in main (argc=2, argv=0xbfffefe4) at /local/dev/php-src_5_2/sapi/cli/php_cli.c:1102 (gdb) f 0 #0 0x0814d227 in sqlite_step (pVm=0x0, pN=0x40364218, pazValue=0x40364210, pazColName=0x40364214) at /local/dev/php-src_5_2/ext/sqlite/libsqlite/src/vdbe.c:117 117 if( p->magic!=VDBE_MAGIC_RUN ){ (gdb) p p $1 = (Vdbe *) 0x0 Proposed patch: http://cvs.php.net/viewvc.cgi/php-src/ext/sqlite/libsqlite/src/vdbe.c?r1=1.7.4.1&r2=1.7.4.1.2.1 _2006-Sep-10 11:10:26 by drh:_ {linebreak} PHP is calling sqlite_step() with a NULL sqlite_stmt pointer. This seems more like a bug in PHP than SQLite. I suggest patching PHP, somewhere in pdo_sqlite2.c I'm guessing, so that it checks to see if the statement pointer returned by sqlite_prepare() is NULL and skips the sqlite_step() and sqlite_finalize() calls if it is. The proposed patch above seems to confirm this strategy: The proposed patch would cause PHP to receive an SQLITE_MISUSE error. An SQLITE_MISUSE error indicates that the API is begin used incorrectly. The right fix, it seems to me, would be to use the API correctly. ---- _2006-Sep-10 15:40:46 by anonymous:_ {linebreak} "An SQLITE_MISUSE error indicates that the API is begin used incorrectly". I agree - and this simple NULL check would be the perfect place for SQLite to issue such a MISUSE error. Having SQLite recover from such a relatively common type of NULL input error would be beneficial to its users. SQLite already goes to great lengths to recover from out of memory situations. I don't see any reason not to add a simple "if NULL" check here to avoid crashing the the user's application. ---- _2006-Sep-10 16:56:12 by drh:_ {linebreak} I would quickly add such a change to SQLite version 3. And in fact I have already done so. See Ticket #870 and check-in [1906]. But we are talking about SQLite version 2, here, which is in maintenance mode and should not be used for new development. ---- _2006-Sep-10 20:05:41 by anonymous:_ {linebreak} >I suggest patching PHP, somewhere in pdo_sqlite2.c That's possible too, but currently I see no reasons to do it. >I'm guessing, so that it checks to see if the statement pointer returned by > sqlite_prepare() is NULL and skips the sqlite_step() and sqlite_finalize() calls if it is. The statement pointer in this case is (not) returned by sqlite_compile() call. Here is the piece of code: ... S->einfo.errcode = sqlite_compile(S->H->db, stmt->active_query_string, &tail, &S->vm, &errmsg); if (S->einfo.errcode != SQLITE_OK) { pdo_sqlite2_error_stmt(errmsg, stmt); return 0; } S->done = 0; S->einfo.errcode = sqlite_step(S->vm, &S->ncols, &S->rowdata, &S->colnames); ... From what I see, sqlite_compile() considers " " query as valid and doesn't return error, but in the same time the statement pointer remains NULL, which is clearly wrong. As I've said, it's easy to check if it's NULL or not in PHPs code, but I really think that the problem is in sqlite_compile().
#f2dcdc 1974 code active 2006 Sep anonymous Unknown 2006 Sep 1 1 column type not consistent in views package require sqlite3 sqlite3 db test.db db eval { create table one ( size FLOAT ); create view two as select size from one; } db eval {insert into one values(50.0)} puts [db eval {select size from one}] puts [db eval {select size from two}] outputs: 50.0 50
#f2dcdc 1445 code active 2005 Sep anonymous 2006 Sep 3 3 Errors testing sqlite 3.2.6 (& v3.3.7) $ make test [...] conflict-6.0... Ok conflict-6.1... Ok conflict-6.2... Expected: [0 {7 6 9} 1 1] Got: [0 {7 6 9} 1 0] conflict-6.3... Expected: [0 {6 7 3 9} 1 1] Got: [0 {6 7 3 9} 1 0] conflict-6.4... Ok conflict-6.5... Ok conflict-6.6... Ok conflict-6.7... Expected: [0 {6 7 3 9} 1 1] Got: [0 {6 7 3 9} 1 0] conflict-6.8... Expected: [0 {7 6 9} 1 1] Got: [0 {7 6 9} 1 0] conflict-6.9... Expected: [0 {6 7 3 9} 1 1] Got: [0 {6 7 3 9} 1 0] conflict-6.10... Expected: [0 {7 6 9} 1 1] Got: [0 {7 6 9} 1 0] conflict-6.11... Expected: [0 {6 7 3 9} 1 1] Got: [0 {6 7 3 9} 1 0] conflict-6.12... Expected: [0 {6 7 3 9} 1 1] Got: [0 {6 7 3 9} 1 0] conflict-6.13... Expected: [0 {7 6 9} 1 1] Got: [0 {7 6 9} 1 0] conflict-6.14... Ok conflict-6.15... Ok conflict-6.16... Ok [...] date-3.12... Ok date-3.13... Ok date-3.14... Ok date-3.15... Ok date-3.16... Ok date-3.17... Ok /tmp/sqlite-3.2.6/.libs/lt-testfixture: invalid command name "clock" while executing "clock seconds" invoked from within "clock format [clock seconds] -format "%Y-%m-%d" -gmt 1" invoked from within "set now [clock format [clock seconds] -format "%Y-%m-%d" -gmt 1]" (file "./test/date.test" line 142) invoked from within "source $testfile" ("foreach" body line 4) invoked from within "foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] if {[lsearch -exact $EXCLUDE $tail]>=0} continue so..." (file "./test/quick.test" line 45) make: *** [test] Error 1 _2005-Sep-19 23:03:56 by drh:_ {linebreak} The test scripts do not (yet) work with Tcl 8.5. Use Tcl 8.4. ---- _2005-Sep-20 01:59:42 by anonymous:_ {linebreak} FYI, The conflict failures occur even when using tcl-8.4. The problem was reported on the mailing list: http://www.mail-archive.com/sqlite-users%40sqlite.org/msg10203.html Curiously, the failures correspond exactly to the test cases that were changed by the following patch: http://www.sqlite.org/cvstrac/filediff?f=sqlite/test/conflict.test&v1=1.24&v2=1.25 ---- _2006-Aug-31 23:49:40 by anonymous:_ {linebreak} building v337 on OSX 10.4.7 w/ TCL8.5 installed as Framework, 'make test' still fails w/: date-3.16... Ok date-3.17... Ok /usr/ports/sqlite-3.3.7/build/.libs/testfixture: invalid command name "clock" while executing "clock seconds" invoked from within "clock format [clock seconds] -format "%Y-%m-%d" -gmt 1" invoked from within "set now [clock format [clock seconds] -format "%Y-%m-%d" -gmt 1]" (file "../test/date.test" line 142) invoked from within "source $testfile" ("foreach" body line 4) invoked from within "foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] if {[lsearch -exact $EXCLUDE $tail]>=0} continue so..." (file "../test/quick.test" line 66) make: *** [test] Error 1 any resolution for this, other than revert to TCL 8.4? ---- _2006-Sep-01 01:26:37 by anonymous:_ {linebreak} SQLite under Cygwin fails all tests that involve integers larger than 32 bits. Sqlite produces the correct 64 bit values, but Tcl as distributed with Cygwin cannot grok 64 bit ints, so the comparisons fail. Would it be possible to change Sqlite's test harness to compare SQL results as strings rather than as integers? Then it would not matter if Tcl worked in 64 bit or not. ---- _2006-Sep-01 15:50:48 by drh:_ {linebreak} The test suite has been revised so that it now works with Tcl8.5. But, no, it is not practical to rewrite the tests to compare the results using strings instead of integers in order to work with the (broken) tcl implementation that comes with cygwin. ---- _2006-Sep-06 02:39:24 by anonymous:_ updating to latest cvs-checkout to get the aforementioned fix for: date-3.17... Ok /usr/ports/sqlite-3.3.7/build/.libs/testfixture: invalid command name "clock" while executing i can verify that _that_ is now ok: ... date-3.14... Ok date-3.15... Ok date-3.16... Ok date-3.17... Ok date-4.1... Expected: [2006-09-01] Got: [2006-09-06] date-5.1... Ok date-5.2... Ok date-5.3... Ok ... but now, 'make test' fails next @: delete-8.4... Ok delete-8.5... Ok delete-8.6... Ok delete-8.7... Ok /usr/ports/sqlite-cvs/build/.libs/testfixture: error deleting "test.db": not owner while executing "file delete -force test.db" (file "../test/tester.tcl" line 62) invoked from within "source $testdir/tester.tcl" (file "../test/delete2.test" line 36) invoked from within "source $testfile" ("foreach" body line 4) invoked from within "foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] if {[lsearch -exact $EXCLUDE $tail]>=0} continue so..." (file "../test/quick.test" line 66) make: *** [test] Error 1 ---- _2006-Sep-06 11:11:19 by drh:_ {linebreak} Run the build starting from an empty directory as a non-root user. ---- _2006-Sep-06 13:27:18 by anonymous:_ {linebreak} per INSTALL instructions, i did: cvs -d :pserver:anonymous@www.sqlite.org:/sqlite checkout -d sqlite-cvs sqlite cd /usr/ports/sqlite-cvs mkdir build cd build ../configure \ ... make chown -R myuser:wheel /usr/ports/sqlite-cvs sudo -u myuser make test and, as reported, the error was the result. ---- _2006-Sep-30 21:43:45 by anonymous:_ {linebreak} bump. anyone? ---- _2006-Sep-30 22:19:24 by anonymous:_ {linebreak} If you don't happen to be testing on Linux/gcc or Windows/VC++ I find that the Tcl test results have more than a few failures. It is not always easy to discern which failures are due to some odd quirk of Tcl or whether it is a legitimate SQLite issue on a given platform. Be prepared to change test scripts and tinker with the code.
#e8e8bd 1961 build active 2006 Sep anonymous 2006 Sep 3 3 3.3.7 : wrong readline.h path in Makefile We have readline.h installed in /usr/local/include/readline. In SQLite it is accessed with : #include But unfortunatly in Makefile, READLINE flags contains : -I /usr/local/include/readline instead of -I /usr/local/include
#f2dcdc 1960 code active 2006 Sep anonymous 2006 Sep 4 2 Issues with .import in sqlite.exe I ran into two possible problems when using the .import operation in sqlite3: - .import seems to be confused by NULLs; in the file NullTest.dat the null is at the end of the line - .import chokes on empty field when importing to field of type: integer PRIMARY KEY AUTOINCREMENT For example line like: ~2~3~4~5~6 Example: Schema: --Table with autoincrement CREATE TABLE test1( id integer PRIMARY KEY AUTOINCREMENT, c1 integer NULL , c2 integer NULL , c3 text NULL, c4 text NULL, c5 text NULL ); -- Table with no autoincrement field CREATE TABLE test2( id integer NULL, c1 integer NULL , c2 integer NULL , c3 text NULL, c4 text NULL, c5 text NULL ); .separator ~ .import NullTest.dat test1 .import NullTest.dat test2 .import NoNullTest.dat test2 I have short test files that I can email to the person who is looking at this.
#e8e8bd 1959 new active 2006 Sep anonymous 2006 Sep 4 3 Unblockable TEMP TABLES TEMP TABLES locks the complete database as long as a prepared stmt is running at the main database. Temp Tables are in separate files... so I hope it can be changed without big problems. The new driver for OpenOffice.org needs temp tables that won't lock the complete database because of cached resultsets. It only can be emulate it with "attach a database, copy data, detach". But the problem is that the API of OOo needs to change the cached resultset. It isn't possible to add this without temporary tables. So the driver could use sqlite3_update_hook() to know when he needs to reload the resultset. Thanks
#f2dcdc 1958 code active 2006 Sep anonymous 2006 Sep 4 4 some printf tests fail with Tcl 8.5a5, ok with Tcl 8.4 Tcl 8.5a5: printf-1.7.6... Expected: [Three integers: (1000000) ( f4240) (3641100)] Got: [Three integers: ( 1000000) ( f4240) (3641100)] printf-1.8.6... Expected: [Three integers: (999999999) (3b9ac9ff) (7346544777)] Got: [Three integers: ( 999999999) (3b9ac9ff) (7346544777)] printf-1.9.7... Expected: [Three integers: ( 0) ( 0x0) ( 0)] Got: [Three integers: ( 0) ( 0) ( 0)] Tcl 8.4: printf-1.7.6... Ok printf-1.8.6... Ok printf-1.9.7... Ok _2006-Sep-05 02:27:00 by anonymous:_ {linebreak} This is not directly related to the ticket, but concerns the same test file... Why are these tests not run on windows? I thought sqlite3_mprintf() is platform independent. if {$::tcl_platform(platform)!="windows"} { set m 1 foreach {a b} {1 1 5 5 10 10 10 5} { set n 1 foreach x {0.001 1.0e-20 1.0 0.0 100.0 9.99999 -0.00543 -1.0 -99.99999} { do_test printf-2.$m.$n.1 [subst { sqlite3_mprintf_double {A double: %*.*f} $a $b $x }] [format {A double: %*.*f} $a $b $x] do_test printf-2.$m.$n.2 [subst { sqlite3_mprintf_double {A double: %*.*e} $a $b $x }] [format {A double: %*.*e} $a $b $x] do_test printf-2.$m.$n.3 [subst { sqlite3_mprintf_double {A double: %*.*g} $a $b $x }] [format {A double: %*.*g} $a $b $x] do_test printf-2.$m.$n.4 [subst { sqlite3_mprintf_double {A double: %d %d %g} $a $b $x }] [format {A double: %d %d %g} $a $b $x] do_test printf-2.$m.$n.5 [subst { sqlite3_mprintf_double {A double: %d %d %#g} $a $b $x }] [format {A double: %d %d %#g} $a $b $x] do_test printf-2.$m.$n.6 [subst { sqlite3_mprintf_double {A double: %d %d %010g} $a $b $x }] [format {A double: %d %d %010g} $a $b $x] incr n } incr m } } ;# endif not windows
#f2dcdc 1953 code active 2006 Sep anonymous TclLib 2006 Sep 4 3 Fix for false 64-bit comparisons "make test" failures on Cygwin The trivial patch below allows Cygwin to correctly pass all (two dozen or so) 64-bit integer-related tests in "make test". It does so by treating all 64-bit integer SQL results as strings. (Note: SQLite has always produced correct 64-bit integer results, it's just that the test harness on Cygwin produces false failures without this patch.) There is no impact to other platforms, and allows us unfortunate Windows users to be useful members of society. RCS file: /sqlite/sqlite/src/tclsqlite.c,v retrieving revision 1.172 diff -u -r1.172 tclsqlite.c --- src/tclsqlite.c 31 Aug 2006 15:07:15 -0000 1.172 +++ src/tclsqlite.c 1 Sep 2006 17:27:44 -0000 @@ -432,7 +432,12 @@ if( v>=-2147483647 && v<=2147483647 ){ pVal = Tcl_NewIntObj(v); }else{ +#ifndef __CYGWIN__ pVal = Tcl_NewWideIntObj(v); +#else + int bytes = sqlite3_value_bytes(pIn); + pVal = Tcl_NewStringObj((char *)sqlite3_value_text(pIn), bytes); +#endif } break; } @@ -1420,7 +1425,11 @@ if( v>=-2147483647 && v<=2147483647 ){ pVal = Tcl_NewIntObj(v); }else{ +#ifndef __CYGWIN__ pVal = Tcl_NewWideIntObj(v); +#else + pVal = dbTextToObj((char *)sqlite3_column_text(pStmt, i)); +#endif } break; } Example test failures before patch: $ ./testfixture.exe test/misc2.testmisc2-1.1... Ok misc2-1.2... Ok misc2-2.1... Ok misc2-2.2... Ok misc2-2.3... Ok misc2-3.1... Ok misc2-4.1... Expected: [4000000000] Got: [-294967296] misc2-4.2... Expected: [4000000000 2147483648] Got: [-294967296 -2147483648] misc2-4.3... Ok misc2-4.4... Expected: [1 2147483648 2147483647] Got: [1 -2147483648 2147483647] misc2-4.5... Expected: [1 4000000000 2147483648 2147483647] Got: [1 -294967296 -2147483648 2147483647] misc2-4.6... Expected: [1 2147483647 2147483648 4000000000] Got: [1 2147483647 -2147483648 -294967296] misc2-5.1... Ok misc2-6.1... Ok misc2-7.1... Ok misc2-7.2... Ok misc2-7.3... Ok misc2-7.4... Ok misc2-7.5... Ok misc2-7.6... Ok misc2-7.7... Ok misc2-7.8... Ok misc2-8.1... Ok misc2-9.1... Ok misc2-9.2... Ok misc2-9.3... Ok misc2-10.1... Ok Thread-specific data deallocated properly 5 errors out of 28 tests Failures on these tests: misc2-4.1 misc2-4.2 misc2-4.4 misc2-4.5 misc2-4.6 After patch applied: $ ./testfixture.exe test/misc2.testmisc2-1.1... Ok misc2-1.2... Ok misc2-2.1... Ok misc2-2.2... Ok misc2-2.3... Ok misc2-3.1... Ok misc2-4.1... Ok misc2-4.2... Ok misc2-4.3... Ok misc2-4.4... Ok misc2-4.5... Ok misc2-4.6... Ok misc2-5.1... Ok misc2-6.1... Ok misc2-7.1... Ok misc2-7.2... Ok misc2-7.3... Ok misc2-7.4... Ok misc2-7.5... Ok misc2-7.6... Ok misc2-7.7... Ok misc2-7.8... Ok misc2-8.1... Ok misc2-9.1... Ok misc2-9.2... Ok misc2-9.3... Ok misc2-10.1... Ok Thread-specific data deallocated properly 0 errors out of 28 tests Failures on these tests: The only new regression on Cygwin is this test, which is expected: types3-2.3... Expected: [wideInt] Got: [] _2006-Sep-01 18:55:25 by drh:_ {linebreak} The TCL interface is more than just part of the test harness. A lot of people use the TCL interface as part of their applications. I believe what this patch does is mask a real problem. I would prefer to fix the underlying problem, not just treat the symptom. ---- _2006-Sep-02 02:48:57 by anonymous:_ {linebreak} I have no interest in fixing bugs in Tcl itself on Cygwin. I just want to reliably build and test SQLite. The proposed fix is purely pragmatic and is intended only for the test harness. Indeed, when dealing with testing only, the fix is not Cygwin-specific and would work on any platform. The test harness under stock Cygwin as it stands simply does not work for 64 bit values. When you see such a failure you assume that SQLite is in error. Perhaps a compromise can be made and the code fix in question can be wrapped in #ifdef SQLITE_TESTFIXTURE or equivalent instead of #ifdef __CYGWIN__. I would hate to see someone else waste any time on this trivially fixable issue. ---- _2006-Sep-02 13:27:08 by drh:_ {linebreak} Perhaps you could put an "if" statement in the test scripts that skipped over the tests that do not work if running under cygwin. You can probably figure out if you are running under cygwin by looking at elements of the tcl_platform array. ---- _2006-Sep-02 13:48:56 by drh:_ {linebreak} I retract my previous suggestion. I do not want such patches in the SQLite source tree. I will resist any patches such as shown here because they are really hacks to work around a faulty Tcl build on Cygwin. The correct way to fix this is to fix the Tcl build for Cygwin. This is probably as simple as download a copy of Tcl and recompiling. I'm curious to know why the default Tcl build for Cygwin only supports 32-bit integers. Is there some problem with 64-bit integer support on Cygwin? The patch shown in the Description section above is not good because it presumes that Cygwin will always be broken. I think a better assumption is that Cygwin will get fixed. And I do not want to cripple the TCL interface to work around a bug that is not related to SQLite and which might not exist on every system. That is *so* wrong. I will be willing to put in a test that checks for the cygwin brokenness and prints a warning to the user. Perhaps something like this: if {"*[db eval {SELECT 10000000000}]*"!="*10000000000*"} { puts "*********** WARNING *************" puts "Your build of TCL only supports 32-bit integers." puts "This will cause many test failures. To run these" puts "tests you must install a version of TCL that supports" puts "64-bit integers." exit } The question is, does that test correctly detect the broken Cygwin? Since I have no ready access to a windows machine, I have no way of testing it. ---- _2006-Sep-02 14:06:28 by anonymous:_ {linebreak} Then you would have an on-going maintenance issue with future tests. If'ing out valid tests just masks the problem and defeats the purpose of having a test regression suite. If a test fails legitimately, it should be reported as such. But these particular 64-bit tests work correctly if the simple proposed patch to the test harness is checked in. There is nothing wrong with the tests themselves - just the test harness on certain platforms for which Tcl does support 64-bit integers for whatever reason. Is the purpose of the test suite to test SQLite or Tcl implementations? I know that Cygwin is a considered a tier "C" platform for SQLite, but appreciate that from a Cygwin environment me and many others have reported at least couple of dozen non-platform-specific SQLite bugs over the past year. You probably have as many or more Cygwin users on the mailing list than Mac OSX users. Why put up artificial ideological roadblocks? ---- _2006-Sep-02 14:10:35 by anonymous:_ {linebreak} Please do not check the "Your build of TCL only supports 32-bit integers". It is couter-productive to exit when the great majority of tests will pass. Such a check will basically exclude stock Cygwin installs from testing SQLite. Given the choice between having a broken test harness and this TCL 32-bit check, it is more useful to have a broken test harness. ---- _2006-Sep-04 01:25:00 by anonymous:_ {linebreak} {link: http://sourceforge.net/tracker/index.php?func=detail&aid=1551762&group_id=10894&atid=110894 Cygwin Tcl 8.5 64-bit integer math bug report} {link: http://sourceforge.net/tracker/download.php?group_id=10894&atid=110894&file_id=191898&aid=1551762 Cygwin Tcl 8.5a5 64-bit integer math fix}
#f2dcdc 1954 code active 2006 Sep anonymous Unknown 2006 Sep 1 1 Dual Core Processor Lockup I seem to be seeing a problem with dual core processors in the the Open call is locking and does not release or throw an exception. It does not occur every time, but occurs around 50% of the time. I have not seen the problem on non dual core processors. _2006-Sep-02 21:06:38 by anonymous:_ {linebreak} This ticket is way too vague to be actionable. What operating system? AMD or Intel? What specific version of SQLite? Was the library precompiled or did you compile it yourself? Personally, I can report no errors or problems with dual-core CPU's on Windows XP using an AMD X2 4400+ dual-core CPU. Tested with both a 32-bit build and a 64-bit build of SQLite on x64 Windows.
#f2dcdc 2048 code active 2006 Oct anonymous 2006 Oct drh 1 1 table_info on columns with no default value are returned as string On line 486, noDflt is declared as{linebreak} static const Token noDflt = { (unsigned char*)"", 0, 0 };{linebreak} {linebreak} And on line 493:{linebreak} if( pDflt->z ){{linebreak} sqlite3VdbeOp3(v, OP_String8, 0, 0, (char*)pDflt->z, pDflt->n);{linebreak} }else{{linebreak} sqlite3VdbeAddOp(v, OP_Null, 0, 0);{linebreak} {linebreak} So columns with no default value aren't being set to null because the (pDflt->z) condition is non-null.
#f2dcdc 2043 code active 2006 Oct anonymous 2006 Oct 1 1 Spaces in view statement If you have a table defined with fields that contain spaces. create table table1 ("field one", "field two", "field three"); Then you do a select select "field one" from table1; That works fine. However if you save it as a view create view view_one as select "field one" from table1; Then if you run a select on the view it fails. select * from view_one;
#e8e8bd 1820 warn active 2006 May anonymous TclLib 2006 Oct anonymous 5 1 make: *** [tclsqlite.lo] Error 1 unding ReHatlinux9.0 $ make .............. .............. ./libtool --mode=compile gcc -g -O2 -DOS_UNIX=1 -DHAVE_USLEEP=1 -DHAVE_FDATASYNC=1 -I. -I../sqlite-3.3.5/src -DNDEBUG -DTHREADSAFE=0 -DSQLITE_THREAD_OVERRIDE_LOCK=-1 -DSQLITE_OMIT_CURSOR -c ../sqlite-3.3.5/src/tclsqlite.c gcc -g -O2 -DOS_UNIX=1 -DHAVE_USLEEP=1 -DHAVE_FDATASYNC=1 -I. -I../sqlite-3.3.5/src -DNDEBUG -DTHREADSAFE=0 -DSQLITE_THREAD_OVERRIDE_LOCK=-1 -DSQLITE_OMIT_CURSOR -c ../sqlite-3.3.5/src/tclsqlite.c -fPIC -DPIC -o .libs/tclsqlite.o ../sqlite-3.3.5/src/tclsqlite.c: In function `DbUpdateHandler': ../sqlite-3.3.5/src/tclsqlite.c:333: warning: passing arg 3 of `Tcl_ListObjAppendElement' makes pointer from integer without a cast ../sqlite-3.3.5/src/tclsqlite.c: In function `tclSqlFunc': ../sqlite-3.3.5/src/tclsqlite.c:419: warning: passing arg 1 of `Tcl_NewByteArrayObj' discards qualifiers from pointer target type ../sqlite-3.3.5/src/tclsqlite.c:427: warning: assignment makes pointer from integer without a cast ../sqlite-3.3.5/src/tclsqlite.c:485: `Tcl_WideInt' undeclared (first use in this function) ../sqlite-3.3.5/src/tclsqlite.c:485: (Each undeclared identifier is reported only once ../sqlite-3.3.5/src/tclsqlite.c:485: for each function it appears in.) ../sqlite-3.3.5/src/tclsqlite.c:485: parse error before "v" ../sqlite-3.3.5/src/tclsqlite.c:486: `v' undeclared (first use in this function) ../sqlite-3.3.5/src/tclsqlite.c: In function `DbObjCmd': ../sqlite-3.3.5/src/tclsqlite.c:685: warning: passing arg 3 of `Tcl_GetIndexFromObj' from incompatible pointer type ../sqlite-3.3.5/src/tclsqlite.c:1309: warning: passing arg 2 of `Tcl_GetVar2Ex' discards qualifiers from pointer target type ../sqlite-3.3.5/src/tclsqlite.c:1331: `Tcl_WideInt' undeclared (first use in this function) ../sqlite-3.3.5/src/tclsqlite.c:1331: parse error before "v" ../sqlite-3.3.5/src/tclsqlite.c:1332: `v' undeclared (first use in this function) ../sqlite-3.3.5/src/tclsqlite.c:1382: warning: passing arg 1 of `Tcl_NewByteArrayObj' discards qualifiers from pointer target type ../sqlite-3.3.5/src/tclsqlite.c:1390: warning: assignment makes pointer from integer without a cast ../sqlite-3.3.5/src/tclsqlite.c:1838: warning: passing arg 3 of `Tcl_GetIndexFromObj' from incompatible pointer type ../sqlite-3.3.5/src/tclsqlite.c: In function `DbMain': ../sqlite-3.3.5/src/tclsqlite.c:2024: warning: passing arg 2 of `Tcl_CreateObjCommand' discards qualifiers from pointer target type make: *** [tclsqlite.lo] Error 1 To solve this error, install ActiveTCL and then: ../sqlite-3.3.8/configure --with-tcl=/usr/local/ActiveTcl/lib make
#f2dcdc 2037 code active 2006 Oct anonymous 2006 Oct 1 1 Sqlite3 can't use datafile in Chinese path with Win2000 and WindowsXP. Sqlite3 can't use datafile in Chinese path with Win2000 and WindowsXP. This is a bug in os_win.c . My firend modify code to so , it work right. /* ** Convert a UTF-8 string to UTF-32. Space to hold the returned string ** is obtained from sqliteMalloc. */ static WCHAR *utf8ToUnicode(const char *zFilename){ int nChar; WCHAR *zWideFilename; if( !isNT() ){ return 0; } nChar = MultiByteToWideChar(CP_THREAD_ACP, MB_COMPOSITE, zFilename, -1, NULL, 0); zWideFilename = sqliteMalloc( nChar*sizeof(zWideFilename[0]) ); if( zWideFilename==0 ){ return 0; } nChar = MultiByteToWideChar(CP_THREAD_ACP, MB_COMPOSITE, zFilename, -1, zWideFilename, nChar); if( nChar==0 ){ sqliteFree(zWideFilename); zWideFilename = 0; } return zWideFilename; } /* ** Convert UTF-32 to UTF-8. Space to hold the returned string is ** obtained from sqliteMalloc(). */ static char *unicodeToUtf8(const WCHAR *zWideFilename){ int nByte; char *zFilename; nByte = WideCharToMultiByte(CP_THREAD_ACP, WC_COMPOSITECHECK, zWideFilename, -1, 0, 0, 0, 0); zFilename = sqliteMalloc( nByte ); if( zFilename==0 ){ return 0; } nByte = WideCharToMultiByte(CP_THREAD_ACP, WC_COMPOSITECHECK, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ sqliteFree(zFilename); zFilename = 0; } return zFilename; } _2006-Oct-20 10:26:46 by anonymous:_ {linebreak} The proposed fix is completely wrong, but the bug exists nonetheless. The problem is that SQLite expects file names in UTF-8 encoding (and there is probably bug in your application too guessing from the proposed fix). While this works fine on NT systems where the UTF-8 encoding is converted to UTF-16 and passed to system wide-character APIs, the code path for non-NT systems (Win 9x) with ANSI-only APIs doesn't convert the UTF-8 file names into the ANSI code page which is expected by the system APIs.
#f2dcdc 2032 code active 2006 Oct anonymous 2006 Oct 1 1 AV in btree.c running FTS2 compiled with SQLITE_OMIT_SHARED_CACHE If compiled with FTS2 support as well as SQLITE_OMIT_SHARED_CACHE=1, the sqlite console application causes an Access Violation: btree.c, line 3538: Read of address x00000014 if( pCur->idx>=pPage->nCell ){ if the SQL (attatched) is executed. I believe that this is a bug in btree.c, for the following reasons: *: The AV does not show if the #ifndef SQLITE_OMIT_SHARED_CACHE (lines 3514 and 3525) are commentet out. *: From my reading, all virtual tables use the extension API only and do not access the btree directly. _2006-Oct-25 06:30:43 by shess:_ {linebreak} Note that the attached SQL has exactly 273 INSERT statements. 273==256+16+1, so this is kicking in at a merge point. Don't know how that's relevant, but it seems suspicious. ---- _2006-Oct-25 16:31:34 by anonymous:_ {linebreak} Many thanks for looking into this - it was driving me mad until I came up with the rather simple SQL to reproduce it. I am not sure if the number of INSERTS is 100% the number needed to cause the problem, but the crash always happens after the exact same number of inserts. I did not count them but added roughly enough of them to cause the error. Sidenote: I can also make FTS2 to crash at another point, which I thought was related to the sizeof() bug I also reported. But apprarently it is not. Unfortunately I can not provide a test case for this since I can reproduce it only after adding some 3000 or so copyrighted documents to an empty database. At the time of the crash the DB is about 250 MB in size. However, I will run a test after the next commits to FTS2. ---- _2006-Oct-26 08:57:41 by anonymous:_ {linebreak} My previious comments from yesterday seem to be invalidated by the latest checkins [3486], [3488] and [3489]. Many thanks for those! However, the problem with =SQLITE_OMIT_SHARED_CACHE= still persists.
#f2dcdc 2028 code active 2006 Oct anonymous 2006 Oct 4 2 FTS1: UNIQUE() expression and UPDATE command not working I'm working with tables, containing around 1,4 million entries (1GB file size). To allow faster fulltext search I tried FTS1 now. What I saw is: creating the virtual FTS1 table with one keyword "UNIQUE(code), reference, text, ..." I had the idea to have faster access to "code", because this entry is only one time existing in table. In my actual SQLITE table "UNIQUE" was good idea, because "UPDATE"ing of entries was much faster as without "UNIQUE" expression. Unfortunately, in that moemnt I use "UNIQUE" expression in fulltext table, the FTS1 table doesn't accept insertion of entries like "INSERT into receipe (code, reference, text) values ('4711', 'RefnotAvailable', 'Test');" So I removed the "UNIQUE" keyword, knowing that later "UPDATE" command to modify entries will be slower. So I built new table with additional FTS1 fulltext table. Then I tried to "UPDATE" one entry. In that moment the program stopped immediately working (WIN XP system), what means that the application stopped without comment and returned to desktop. I tried the same in SQLITE3.exe (command line program) but also that program suspended immediately after the UPDATE command (like "UPDATE Volltext SET code = '4710', reference = 'RefChanged', text = 'notext';" That seems to me to be a bug. By the way, creating fulltext table to search inside my whole database increased the filesize a lot (4 times). May be that is solved in FTS2? Last wish: Fulltext search like "foo*" to find "fool" and "foot" would be a really great improvement. Best regards Ingo _2006-Oct-23 13:56:59 by anonymous:_ {linebreak} Ooops, as I saw today, also "DELETE" statements are causing SQLITE to stop working (crash). Program returns to Desktop on WIN XP after DELETE command.
#f2dcdc 2027 code active 2006 Oct anonymous 2006 Oct 1 1 FTS: Phrase searches return Offsets for individual phrase words With FTS (one as well as two), phrase searches return offsets for all individual words instead of the phrase as a whole, like in select name, ingredients from recipe where ingredients match '"broccoli cheese"'; Offsets() returns at least two matches for both individual words: *: broccoli *: cheese
#f2dcdc 2026 code active 2006 Oct anonymous 2006 Oct 4 5 \n in .output is not allowed even with quote *.output drive:\nabc.txt* {linebreak} *.output e:\new0.txt* {linebreak} *.output z:\new1.txt* {linebreak} *.output "c:\new2.txt"* will result an error omitting the *\* will just put the file in the same folder to the sqlite3.exe (doesnt help) solve it by replace *\* by */* _2006-Oct-15 21:28:55 by anonymous:_ {linebreak} How about c:\\new.txt? ---- _2006-Oct-16 11:49:58 by anonymous:_ {linebreak} How about "c:/new.txt"?
#f2dcdc 2025 code active 2006 Oct anonymous 2006 Oct drh 5 5 Add pragama command to return loaded extension list How about add a new pragama command to return loaded extension list?
#f2dcdc 2024 code active 2006 Oct anonymous 2006 Oct drh 5 5 Add If not exist syntax to Create Virtual Table Parser Enhancement request: is it possible to add If not exist syntax to Create Virtual Table? all other create schema support such syntax.
#f2dcdc 2022 code active 2006 Oct anonymous 2006 Oct 1 1 .import command is not working I have a windows system running version 3.3.6 and a linux system running 3.3.3 when I run .import catalog.csv TEMPDATA on the windows system, it works fine. On the linux system, no data gets imported. There are no error messages. Is this a known issue in 3.3.3? _2006-Oct-14 01:15:07 by anonymous:_ {linebreak} A sample SQL schema and a 3 line import file demonstrating the problem would be helpful. ---- _2006-Nov-08 15:48:28 by anonymous:_ {linebreak} Schema: CREATE TABLE Catalog ( UPC text , SKU text primary key , DESC text , PACK text , PRICE text , SIZE text ); test.csv contents 00000000103,103,EFFEM CHOCOLATE FUNSIZE 75PPK 1 X1EA,1,$155.94,1 EA 00000000152,414317,CLEARLIGHT SLUSH CUP 16OZ CDL16 1X50EA,1,$5.04,50 EA 00000000152,56880,CLEARLIGHT SLUSH CUP 16OZ CDL16 20X50EA,20,$96.31,50 EA Command that does nothing: .import test.csv Catalog ---- _2006-Nov-08 15:52:40 by anonymous:_ {linebreak} Sorry, I'll try this again: Schema: CREATE TABLE Catalog ( UPC text , SKU text primary key , DESC text , PACK text , PRICE text , SIZE text ); test.csv contents 00000000103,103,EFFEM CHOCOLATE FUNSIZE 75PPK 1 X1EA,1,$155.94,1 EA 00000000152,414317,CLEARLIGHT SLUSH CUP 16OZ CDL16 1X50EA,1,$5.04,50 EA 00000000152,56880,CLEARLIGHT SLUSH CUP 16OZ CDL16 20X50EA,20,$96.31,50 EA Command that does nothing: .import test.csv Catalog
#f2dcdc 2019 code active 2006 Oct anonymous 2006 Oct 1 1 FTS1: Create table in transaction raises Out of Sequence error (21) This error: SQL error: library routine called out of sequence is caused if the following script is executed by the Windows version of the SQLite3 console application with .load fts1.dll extension. If it does not show immediately, it will eventually surface if the script is run multiple times. The cause of the problem seems to be related to the transaction, the create virtual table as well as the amount of data inserted. Finally, the script is attached.
#f2dcdc 2017 code active 2006 Oct anonymous 2006 Oct 1 1 DROP TABLE fails on FTS1 utility tables with certain OMIT_s defined The following SQL fails when SQLite is compiled with the SQLITE_OMIT_ defines stated below: create virtual table foo using fts1 (content); drop table foo; create virtual table foo using fts1 (content); Cause: The foo_content and foo_term tables are not deleted. To verify, please define these SQLITE_OMIT_s: OPTS += -DSQLITE_OMIT_ALTERTABLE OPTS += -DSQLITE_OMIT_ANALYZE OPTS += -DSQLITE_OMIT_AUTHORIZATION OPTS += -DSQLITE_OMIT_AUTOINCREMENT OPTS += -DSQLITE_OMIT_AUTOVACUUM OPTS += -DSQLITE_OMIT_BETWEEN_OPTIMIZATION OPTS += -DSQLITE_OMIT_BLOB_LITERAL OPTS += -DSQLITE_OMIT_CAST OPTS += -DSQLITE_OMIT_CHECK OPTS += -DSQLITE_OMIT_COMPLETE OPTS += -DSQLITE_OMIT_COMPOUND_SELECT OPTS += -DSQLITE_OMIT_EXPLAIN OPTS += -DSQLITE_OMIT_FLAG_PRAGMAS OPTS += -DSQLITE_OMIT_FOREIGN_KEY OPTS += -DSQLITE_OMIT_GET_TABLE OPTS += -DSQLITE_OMIT_GLOBALRECOVER OPTS += -DSQLITE_OMIT_INTEGRITY_CHECK OPTS += -DSQLITE_OMIT_LIKE_OPTIMIZATION OPTS += -DSQLITE_OMIT_MEMORYDB OPTS += -DSQLITE_OMIT_OR_OPTIMIZATION OPTS += -DSQLITE_OMIT_ORIGIN_NAMES OPTS += -DSQLITE_OMIT_PAGER_PRAGMAS OPTS += -DSQLITE_OMIT_PROGRESS_CALLBACK OPTS += -DSQLITE_OMIT_QUICKBALANCE OPTS += -DSQLITE_OMIT_REINDEX OPTS += -DSQLITE_OMIT_SCHEMA_VERSION_PRAGMAS OPTS += -DSQLITE_OMIT_SHARED_CACHE OPTS += -DSQLITE_OMIT_SUBQUERY OPTS += -DSQLITE_OMIT_TCL_VARIABLE OPTS += -DSQLITE_OMIT_TEMPDB OPTS += -DSQLITE_OMIT_TRACE OPTS += -DSQLITE_OMIT_TRIGGER OPTS += -DSQLITE_OMIT_UTF16 OPTS += -DSQLITE_OMIT_VACUUM OPTS += -DSQLITE_OMIT_VIEW Without the SQLITE_OMIT_s, everything works just fine.
#f2dcdc 2015 code active 2006 Oct anonymous 2006 Oct anonymous 5 4 Enhancement Req: "EXPRn" PRAGMA for result set expression column names I would like to propose a new PRAGMA command that could be set to control how expression column names are represented in result sets. The current behavior appears to be that the expression that generated the column becomes the column name. For example, "SELECT COLUMN1, COLUMN 2, COLUMN1 + COLUMN2 FROM MYTABLE" yields: [COLUMN1] | [COLUMN2] | [COLUMN1 + COLUMN2] I propose a PRAGMA to remove the expression itself and replace it with 'EXPRn', where n is an ordinal based on the number of expression columns in the result set. (First expression is 0, second is 1, and so on): [COLUMN1] | [COLUMN2] | [EXPR0] _2006-Oct-12 09:17:25 by anonymous:_ {linebreak} Maybe you can just use SELECT COLUMN1, COLUMN 2, COLUMN1 + COLUMN2 AS EXPR0 FROM MYTABLE instead.
#f2dcdc 2014 code active 2006 Oct anonymous 2006 Oct anonymous 4 3 Enhancement Req: CREATE [TEMP | TEMPORARY] VIRTUAL TABLE Regarding the experimental VIRTUAL TABLE implementation, I believe it would of benefit to provide a "temp", or volatile construct when working with them. -- From a SQL syntax perspective, adding an optional keyword "TEMP" to the declaration: CREATE [TEMP | TEMPORARY] VIRTUAL TABLE. -- From a code perspective, I would envision this to invoke xCreate as it does now, but when the database is closed, the table is automatically dropped like any temp table, and xDestroy invoked rather than xDisconnect. One sticky point I can picture is behavior when multiple opens exist to a single database from the same process space. Since virtual tables are already reference counted (in SQLite 3.3.8), perhaps the reference count could be made to span database handles and be bubbled up to the process level instead. That would allow the table to be CREATEd on one handle, CONNECTed on a second handle, then DISCONNECTed/DESTROYed based on the process-wide reference count. I feel that there are numerous implementation possibilities for this. Having no option to auto-drop a virtual table can lead to stray module references, creating SQLite database files that cannot be properly utilized if the vtable module is not available. Of course this can be implemented by the application calling DROP TABLE on it's own, but an embedded solution that takes care of it seems more 'proper' given the thought that goes into SQLite as a whole.
#f2dcdc 2013 code active 2006 Oct anonymous 2006 Oct drh 4 3 Autoincrement increments on failing INSERT OR IGNORE % package require sqlite3 3.3.8 % sqlite3 db "" % db eval "CREATE TABLE test (counter INTEGER PRIMARY KEY AUTOINCREMENT, value text NOT NULL UNIQUE)" % db eval "INSERT INTO test VALUES(4, 'hallo')" % db eval "SELECT * FROM sqlite_sequence" test 4 % db eval "INSERT OR IGNORE INTO test(value) VALUES('hallo')" % db eval "SELECT * FROM sqlite_sequence" test 5 ---> there has no dataset been inserted but the AUTOINCREMENT-counter is incremented % db eval "INSERT OR IGNORE INTO test VALUES(4, 'hallo')" % db eval "SELECT * FROM sqlite_sequence" test 5 ---> right behavior: no inserted dataset and no incrementation This maybe could be a problem if the "INSERT OR IGNORE" happens very often.
#f2dcdc 2012 code active 2006 Oct anonymous 2006 Oct 4 3 trigger4.test aborts "make test" on Windows The failure to remove these files causes "make test" to abort without completing remaining tests: trigger4-99.9... Ok ./testfixture: error deleting "trigtest.db": permission denied while executing "file delete -force trigtest.db trigtest.db-journal" (file "test/trigger4.test" line 199) fix: Index: test/trigger4.test =================================================================== RCS file: /sqlite/sqlite/test/trigger4.test,v retrieving revision 1.9 diff -u -3 -p -r1.9 trigger4.test --- test/trigger4.test 4 Oct 2006 11:55:50 -0000 1.9 +++ test/trigger4.test 9 Oct 2006 14:09:07 -0000 @@ -195,6 +195,6 @@ do_test trigger4-7.2 { integrity_check trigger4-99.9 -file delete -force trigtest.db trigtest.db-journal +catch {file delete -force trigtest.db trigtest.db-journal} finish_test Not sure why this ticket was set to Fixed_in_3.0, but I can reproduce the "make test" abort on Windows. ---- _2006-Oct-11 00:27:16 by drh:_ {linebreak} I do not know why the resolution was set to "Fixed_In_3.0" either. It seems to have been set that why by the original submitter. I will fix this eventually, but since it does not represent a real malfunction, it has a lower priority.
#f2dcdc 2011 code active 2006 Oct anonymous 2006 Oct 3 2 Escaping Porblem with .mode insert (double apostrophe) select * from messages where message_id="74B23AAF-5FFD6BF2"; 74B23AAF-5FFD6BF2|75|0|0|0|0|Europe talks, acts tough on Iran||http://www.ncr-iran.org/index.php?option=com_content&task=view&id=1052&Itemid=71|1140529235.0|By Gareth HardingThe United Press International, BRUSSELS -- Europeans are supposed to prefer soft to hard power, jaw-jaw to war-war and appeasement to confrontation. In short, in the words of neo-conservative scholar Robert Kagan: \'Americans are from Mars; Europeans are from Venus.\' The ".mode insert / .output" file looks like this. INSERT INTO messages VALUES('74B23AAF-5FFD6BF2',75,0,0,0,0,'Europe talks, acts tough on Iran','','http://www.ncr-iran.org/index.php?option=com_content&task=view&id=1052&Itemid=71',1140529235.0,'By Gareth HardingThe United Press International, BRUSSELS -- Europeans are supposed to prefer soft to hard power, jaw-jaw to war-war and appeasement to confrontation. In short, in the words of neo-conservative scholar Robert Kagan: \''Americans are from Mars; Europeans are from Venus.\'''); Now there are two apostrophe and the Escaping is broken.
#f2dcdc 2010 code active 2006 Oct anonymous 2006 Oct 3 3 Timeout ignored in Shared-Cache locking model With shared cache enabled, the busy timeout seems to be ignored. SQLITE_BUSY comes immediately. This occurs at least for locking situations within one shared cache. My server (if i may call the cache sharing thread that way) has its own timeout handling. But I thought that a small timeout in sqlite3 might help to distinguish locks from deadlocks. This was reproduced with both Python wrappers. These just call sqlite3_enable_shared_cache and sqlite3_busy_timeout and then execute BEGIN IMMEDIATE from two connections. _2006-Oct-06 13:56:21 by anonymous:_ {linebreak} Weird, I thought it's my fault, but I see exactly the same behaviour with the C# ADO.NET 2.0 wrapper w/ the shared cache patch.
#e8e8bd 2090 build active 2006 Nov anonymous 2006 Nov 4 3 Test corrupt2.test fails: Solaris While running 'make test', I had come across the following errors: ... corrupt2-1.1... Ok corrupt2-1.2... Expected: [1 {file is encrypted or is not a database}] Got: [0 {table abc abc 2 {CREATE TABLE abc(a, b, c)}}] corrupt2-1.3... Expected: [1 {file is encrypted or is not a database}] Got: [0 {table abc abc 2 {CREATE TABLE abc(a, b, c)}}] corrupt2-1.4... Expected: [1 {database disk image is malformed}] Got: [0 {table abc abc 2 {CREATE TABLE abc(a, b, c)}}] corrupt2-1.5... Expected: [1 {database disk image is malformed}] Got: [0 {table abc abc 2 {CREATE TABLE abc(a, b, c)}}] ... Turns out that SQLite was working fine, but TCL was not corrupting the database correctly (who would ever have thought I would want to?). Apparently the 'a' mode for opening a file in Solaris was resetting the position of a write to the end of a file before actually writing (this appears to be a point of contention on the TCL bug tracker). From the way the test is written, it appears that portions of the file were to be overwritten, instead of appending to the end of the file. I will attach a patch to corrupt2.test after posting this message which, instead of attempting an overwrite, writes individual portions of the database file at a time, with requested strings inserted (technically, replacing) into the file at the requested offsets. _2006-Nov-29 23:37:05 by anonymous:_ {linebreak} I should mention that this is SunOS 5.8, and TCL version 8.4.14. ---- _2006-Dec-05 10:22:08 by anonymous:_ {linebreak} I also get these errors (same tcl version but not SunOS). Have you tried a simpler patch by replacing the 'a' in the open calls by a 'r+'? This solved the problem for me. ---- _2007-Jan-23 19:55:08 by anonymous:_ {linebreak} That worked! Thank you so much!
#e8e8bd 2087 new active 2006 Nov anonymous 2006 Nov 3 3 Ability to add a check constraint via the alter table command *This is an improvement request*. Add the possibility to add a check constraint to an existing table via the alter table statement. Example : CREATE TABLE a(x INTEGER, y INTEGER); INSERT INTO a VALUES (1,2); ALTER TABLE a ADD CHECK (y>0); Actual result : SQL error: near "CHECK": syntax error
#f2dcdc 2046 code active 2006 Oct anonymous 2006 Nov shess 1 1 FTS1 - Error closing database due to unfinished statements The following script causes an error in SQLite3.exe with FTS1. The error will surface only AFTER the script has finished AND you have typed .exit at the sqlite> prompt to quit SQLite3. The problem seems that the SELECT statement is not properly finalized due to an internal error. -- The next line is for Windows only, please adopt it -- if running Linux or use a FTS1-enabled SQLite3 binary. select load_extension ('fts1.dll'); CREATE TABLE Snippets( SnippetID INTEGER PRIMARY KEY, SnippetTitle TEXT, FtsID INTEGER); CREATE VIRTUAL TABLE SnippetsFts USING FTS1 (SnippetTitle, SnippetText); INSERT INTO Snippets (SnippetTitle) VALUES ('one'); INSERT INTO Snippets (SnippetTitle) VALUES ('two'); SELECT SnippetID FROM Snippets JOIN SnippetsFts ON FtsID = +SnippetsFts.RowID WHERE SnippetsFts MATCH 'one'; -- After the script is done, type .exit at the prompt to close the database. -- -- SQLite3 will close, but report the following error before doing so: -- -- "error closing database: Unable to close due to unfinalised statements" -- -- Does this qualify for a bug? The script is also attached to this ticket. _2006-Nov-27 22:58:49 by shess:_ {linebreak} Attached tighter version of the replication script, generated in isolating what mattered to the bug.
#f2dcdc 2089 code active 2006 Nov anonymous 2006 Nov 3 3 Decouple sqlite_int64 from other 64bit datatypes Currently sqlite3 makes the (valid) assumption that sqlite_int64 (or i64, u64) is 64 bit wide, matches with Tcl_WideInt and has the same datasize (and byte order) than double. The following patch fixes this and allows sqlite_int64 to be any integral type, e.g. a 32bit int (with the limitations of the reduced datatype size). The use case for this is for systems that do not support 64bit integers (e.g. lack of compiler feature, embedded system), db's of small data size, and systems without large file support. The patch allows compiling with -DSQLITE_INT64_TYPE=int -DSQLITE_32BIT_ROWID for such a system. _2006-Nov-29 01:13:07 by anonymous:_ {linebreak} Hm, now I wanted to add the patch file but I don't get the formatting right without editing the file and removing empty lines. How am I supposed to add a patch file (created with diff -ru)?
#f2dcdc 2086 code active 2006 Nov anonymous 2006 Nov 5 4 Alias in update Aliases at UPDATE clause doesn't work, i.e.: UPDATE table1 t1 SET uid=(SELECT rowid FROM table2 WHERE uid=t1.uid AND data=t1.data); Code whithout aliases work fine.
#f2dcdc 2084 code active 2006 Nov anonymous 2006 Nov 4 3 Add API function mapping column decl string to SQLite type This is an API feature request. It would be nice to be able to obtain the SQLite type (e.g. SQLITE_INTEGER) from the declared column type string as returned by sqlite3_column_decltype. This was discussed briefly on the mailing list here: http://marc.10east.com/?l=sqlite-users&m=116422872301957&w=2 The function I have in mind is: int sqlite3_decltype_to_type(const char *decl) { Token decl_token; char aff_type; int col_type; decl_token.z = decl; if( decl_token.z ){ decl_token.n = strlen(decl_token.z); aff_type = sqlite3AffinityType(&decl_token); switch( aff_type ){ case SQLITE_AFF_INTEGER: col_type = SQLITE_INTEGER; break; case SQLITE_AFF_NUMERIC: /* falls through */ case SQLITE_AFF_REAL: col_type = SQLITE_FLOAT; break; case SQLITE_AFF_TEXT: col_type = SQLITE_TEXT; break; case SQLITE_AFF_NONE: col_type = SQLITE_BLOB; break; default: col_type = 0; /* unknown */ break; } } return col_type; } If this seems agreeable, I would be willing to put together a real patch. However, I would need some guidance on where it should go. I'm not sure what should happen when no type can be determined. _2006-Nov-26 22:32:45 by anonymous:_ {linebreak} According to the comment above the function sqlite3AffinityType: "If none of the substrings in the above table are found, SQLITE_AFF_NUMERIC is returned". The default condition in sqlite3_decltype_to_type will not be reached. ---- _2006-Nov-26 23:04:23 by anonymous:_ {linebreak} Thanks for pointing to that comment. Looks like SQLITE_AFF_NUMERIC is, for these purposes, unknown. So the case statement could be: switch( aff_type ){ case SQLITE_AFF_INTEGER: col_type = SQLITE_INTEGER; break; case SQLITE_AFF_REAL: col_type = SQLITE_FLOAT; break; case SQLITE_AFF_TEXT: col_type = SQLITE_TEXT; break; case SQLITE_AFF_NONE: col_type = SQLITE_BLOB; break; case SQLITE_AFF_NUMERIC: /* falls through */ default: col_type = 0; /* unknown */ break; } ---- _2006-Nov-27 02:43:06 by anonymous:_ {linebreak} Your first function was correct, it just had some unreachable code. There's no unknown affinity, in the absence of a match the affinity is assumed to be numeric: int sqlite3_decltype_to_type(const char *decl) { int type = SQLITE_FLOAT; if( decl ){ Token token; token.z = decl; token.n = strlen(token.z); switch( sqlite3AffinityType(&token) ){ case SQLITE_AFF_INTEGER: type = SQLITE_INTEGER; break; case SQLITE_AFF_TEXT: type = SQLITE_TEXT; break; case SQLITE_AFF_NONE: type = SQLITE_BLOB; break; default: break; } } return type; }
#f2dcdc 2083 code active 2006 Nov anonymous 2006 Nov 4 4 Give more detailed extension loading error information with dlerror When using loadable extensions. if dlopen returns an error then SQLite just gives a generic "unable to open shared library" message back. This makes it quite hard to diagnose problems. I suggest that on Unix platforms you append %s/dlerror() to the message and on Windows append %d/GetLastError()
#f2dcdc 2082 code active 2006 Nov anonymous 2006 Nov 3 4 UNIX: configure script doesn't enable loading of extensions The code in loadext.c:234 looks for HAVE_DLOPEN being #defined in order to enable library loading on Linux. However as best I can tell, the configure script never looks for dlopen. It does look for dlfcn.h. (Based on examining the output of configure and config.log) Consequently extension loading isn't available on Unixen that do support it if you build using ./configure Work around is to use this commmand: env CFLAGS="-DHAVE_DLOPEN" ./configure _2006-Nov-26 20:53:59 by drh:_ {linebreak} The "autoconf" command is busted in SuSE 10.2, which is the OS I am currently running. So I am unable to rebuild configure after editing configure.ac. Until the autoconf problem is resolved, I am unable to address the request in this ticket. Sorry. ---- _2006-Nov-26 22:36:48 by anonymous:_ {linebreak} What happens when you upgrade to the latest version of autoconf for SuSE? I'm sure someone on the list could help you resolve this issue. ---- _2006-Nov-27 07:05:27 by anonymous:_ {linebreak} I am actually using Gentoo. There is a trivial workaround as I noted so there is no need for a solution for 3.3.8. It would be nice to have it fixed for whatever version comes next so that I don't need to document the workaround. ---- _2006-Nov-27 18:32:11 by anonymous:_ {linebreak} Another open autoconf ticket: Check-in [3397] : Add HAVE_GMTIME_R and HAVE_LOCALTIME_R flags and use them if defined. Unable to modify the configure script to test for gmtime_r and localtime_r, however, because on my SuSE 10.2 system, autoconf generates a configure script that does not work. Bummer. Ticket #1906
#f2dcdc 2081 code active 2006 Nov anonymous 2006 Nov doughenry 1 1 sqlite3_column_decltype throws exception, if selection is grouped If I "group by" a selection over several columns I can't find out the orgin type of these columns using sqlite3_column_decltype(..). An exception is thrown. _2006-Nov-23 18:37:47 by anonymous:_ {linebreak} You also get no decl type from a subselect. This goes to the typeless nature of SQLite - I don't think a type can even be derived in this case.
#f2dcdc 2080 code active 2006 Nov anonymous 2006 Nov 4 4 Tranfering large BLOB data not efficient The current approach for tranfering BLOB data (sqlite3_bind_blob, sqlite3_column_blob) is not efficent for large BLOBs, since the whole BLOB data needs to be kept (multiple times?) in memory. It would be nice to have (additional) methods for streaming the (large) BLOB data to/from the database. Alternatively we could have methods for transfering the BLOB data in chunks. Same holds to some extend for large text fields. _2006-Dec-03 09:53:02 by anonymous:_ {linebreak} What is your definition of large? (1MB, 100MB, 1GB?) Note also that SQLite has an upper limit of 2GB on a field due to the use of signed int in the apis which is 32 bit even on 64 bit platforms. That will limit you to 2GB for blobs, 1GB for UTF-16 strings and somewhere inbetween for UTF-8 strings
#f2dcdc 2077 code active 2006 Nov anonymous 2006 Nov 2 1 Problems with using ASCII symbols 0x80 - 0xFF in database path Platform: Windows.
The SQLite library and executable doesn't see database files that are placed into folders named using ASCII symbols with codes 0x80-0xFF. That symbols are used to represent language-specific symbols (for example, Russian). In result, database cannot be placed into folder with name in Russian language. This bug is "unstable": it doesn't appear in all cases. Below are logs from my experiments with this problem. In all cases the path I requested exists, and database file is placed there. I have noticed that problem depends on filename path and name lengths. =========================================================
// creating test database
E:\!DISTRIB\sqlite-3_3_7>sqlite3.exe test.sqb
SQLite version 3.3.7
Enter ".help" for instructions
sqlite> create table a(id int);
sqlite> insert into a values (1);
sqlite> ^C
E:\!DISTRIB\sqlite-3_3_7>copy test.sqb e:\test.sqb
'3'\'`'a'Z'b'`'S'Q'_'` 'f'Q'[']'`'S: 1. //This means that 1 file was copied
E:\!DISTRIB\sqlite-3_3_7>sqlite3 e:\test.sqb
SQLite version 3.3.7
Enter ".help" for instructions
sqlite> select * from a;
1
sqlite> ^C
// Works!
E:\!DISTRIB\sqlite-3_3_7>mkdir e:\'/
//Using ASCII symbol "'/" (0x8D) to represent cyrillic letter which can be entered in the command line by using Alt+(141) combination
E:\!DISTRIB\sqlite-3_3_7>copy test.sqb E:\'/\test.sqb
'3'\'`'a'Z'b'`'S'Q'_'` 'f'Q'[']'`'S: 1.
E:\!DISTRIB\sqlite-3_3_7>sqlite3 e:\'/\test.sqb
SQLite version 3.3.7
Enter ".help" for instructions
sqlite> select * from a;
1
sqlite> ^C
// That is works too!
E:\!DISTRIB\sqlite-3_3_7>mkdir E:\'/\1
E:\!DISTRIB\sqlite-3_3_7>copy test.sqb E:\'/\1\test.sqb
'3'\'`'a'Z'b'`'S'Q'_'` 'f'Q'[']'`'S: 1.
E:\!DISTRIB\sqlite-3_3_7>sqlite3 E:\'/\1\test.sqb
Unable to open database "E:\(T\1\test.sqb": unable to open database file
// Doesn't work, and writes the wrong symbol "(T" in place of "'/"! I've noticed that if we convert symbol "'/" from DOS encoding to Windows encoding and then write it in DOS encoding, then we'll get "(T".
E:\!DISTRIB\sqlite-3_3_7>copy test.sqb E:\'/\tst.sqb
'3'\'`'a'Z'b'`'S'Q'_'` 'f'Q'[']'`'S: 1.
E:\!DISTRIB\sqlite-3_3_7>sqlite3 E:\'/\tst.sqb
SQLite version 3.3.7
Enter ".help" for instructions
sqlite> select * from a;
SQL error: no such table: a
sqlite> ^C
// It seems to work, i don't get an error, but doesn't see the tables! =(
=================================
#f2dcdc 2076 code active 2006 Nov anonymous 2006 Nov a.rottmann 1 1 % exists as value in varchar abnormal abend of client application (C++) when sqlite returns stream of data containing "%" value. Is % a special character? _2006-Nov-21 14:14:25 by anonymous:_ {linebreak} % is not a special character. Can you post a small C program demonstrating the problem?
#f2dcdc 2074 code active 2006 Nov anonymous 2006 Nov 4 4 feature request: .dump with page_size It would be useful for sqlite shell users to have a .dump command variant that would cause .dump to output the current database setting of "PRAGMA page_size;". Something similar to: sqlite> .dump2 PRAGMA page_size=16364; ...rest of dump... This way they can trivially preserve the page size when exporting/importing data from/to SQLite: sqlite3 old.db .dump2 | sqlite3 new.db without resorting to non-portable shell gymnastics: (echo -n "PRAGMA page_size=" ; sqlite3 old.db "PRAGMA page_size;" ; echo ";" ; ./sqlite3.exe old.db .dump) | sqlite3 new.db Perhaps other PRAGMA settings could also optionally be exported (legacy_file_format, cache size, etc).
#f2dcdc 2070 code active 2006 Nov anonymous 2006 Nov 4 4 No error for ambiguous result alias in WHERE clause This SELECT should result in an error since 'x' is ambiguous: select a x, 2*a x from (select 3 a union select 4 a) where x>3; 4|8 The current heuristic seems to be the first matching result set expression alias from left-to-right "wins". _2006-Nov-17 19:38:15 by anonymous:_ {linebreak} In this test case, the right-most ambiguous expression wins: CREATE TABLE t1(a); INSERT INTO t1 VALUES(3); INSERT INTO t1 VALUES(4); INSERT INTO t1 VALUES(5); select a*2 a, a from t1 where a>4; 10|5 It appears that table values take precedence over result set aliases in where clauses.
#f2dcdc 2068 code active 2006 Nov anonymous 2006 Nov 4 4 pkgIndex.tcl contains incomplete version number The pkgIndex.tcl file generated by sqlite 3.3.8 contains the line: package ifneeded sqlite3 3.3 ... Whereas the library actually provides version 3.3.8. In a 8.5 Tcl interpreter, this results in an error message when package require'd: attempt to provide package sqlite3 3.3 failed: package sqlite3 3.3.8 provided instead The solution seems to be to adjust the Makefile.in tcl_install target to pass $(RELEASE) rather than $(VERSION) to the tclinstaller.tcl script.
#f2dcdc 2066 code active 2006 Nov anonymous 2006 Nov 2 2 Incorrect error message in the case of ENOLCK If you're trying to open a sqlite database that is stored on a filesystem that doesn't support locking, then you'll get the error when you try to execute any commands on it: Error: file is encrypted or is not a database If you run sqlite under strace, you see: read(0, ".schema\n.quit\n", 4096) = 14 fcntl64(3, F_SETLK64, {type=F_RDLCK, whence=SEEK_SET, start=1073741824, len=1}, 0xafa5cd70) = 0 fcntl64(3, F_SETLK64, {type=F_RDLCK, whence=SEEK_SET, start=1073741826, len=510}, 0xafa5cd70) = 0 fcntl64(3, F_SETLK64, {type=F_UNLCK, whence=SEEK_SET, start=1073741824, len=1}, 0xafa5cd70) = 0 access("/mnt/www/zzz_old_sites/trac.db-journal", F_OK) = -1 ENOENT (No such file or directory) fstat64(3, {st_mode=S_IFREG|0644, st_size=584704, ...}) = 0 _llseek(3, 0, [0], SEEK_SET) = 0 read(3, "** This file contains an SQLite "..., 1024) = 1024 fcntl64(3, F_SETLK64, {type=F_UNLCK, whence=SEEK_SET, start=0, len=0}, 0xafa5cdd0) = -1 ENOLCK (No locks available) write(2, "Error: file is encrypted or is n"..., 46Error: file is encrypted or is not a database Sqlite should really check the exact error code, and give a more helpful error (eg "Locking not available on this filesystem. Databases may only be stored on filesystems that support locking")
#f2dcdc 1992 code active 2006 Sep anonymous 2006 Nov shess 1 1 FTS1: Problems after dropping utility tables There are problems if FTS1 utilities tables are dropped from a database. See following SQL for details. drop table if exists x; -- Create a FTS1 table. create virtual table x using fts1 ('content'); -- Drop table x_content: Works fine, but should this be allowed? -- The same errors below also show if table x_term is dropped. drop table x_content; -- All attempts to access table x now result in errors, -- including dropping table x. There seems to be no way out -- except of recreating the database. All three commands below -- cause the same error, regardless if executed in sequence -- or individually: insert into x (content) values ('one two three'); -- Error! delete from x; -- Error! drop table x; -- Error! Added "not exists" to allow dropping an fts table with corrupted backing. Allowing updates to such tables is unlikely to happen (not even clear what it would mean, in most cases!).
#f2dcdc 2063 code active 2006 Nov anonymous 2006 Nov 4 4 vtab_err.test fails if sqlite is compiled without -DSQLITE_MEMDEBUG I noticed that when running `make fulltest', vtab_err.test fails with an error message like this one (repeated over and over) if sqlite has been compiled without the option -DSQLITE_MEMDEBUG vtab_err-2.1... Error: invalid command name "sqlite_malloc_fail" altermalloc.test also has the same "sqlite_malloc_fail" command in it, but it doesn't cause an error because it skips the test if it detects that -DSQLITE_MEMDEBUG isn't available. I'll attach a patch that should fix it. The code is pretty much copied directly from altermalloc.test.
#f2dcdc 2062 code active 2006 Nov anonymous 2006 Nov 4 4 document 'pk' column of PRAGMA table_info() Comment in pragma.c and sqlite.org/pragma.html does not mention the sixth column of PRAGMA table_info().
#f2dcdc 2061 code active 2006 Nov anonymous 2006 Nov anonymous 5 5 cleanup for quickstart.html just compiled the C example from quickstart.html (gcc/glibc Debian SID) a small hint (a bit pea-counting): to avoid warnings either #include for the exit() function or (maybe better) use return instead of calling exit().
#f2dcdc 2059 code active 2006 Nov anonymous 2006 Nov 1 1 Still missing .DEF file from Windows 3.3.8 source code distribution The file sqlite3.def is missing from the zip archive of sources used to build sqlite3 on Windows. Ticket number 2031 was closed with a remark that this file is generated during the build process. That is true if one is building on Linux with MinGW32 configured as a cross-compiler. If one were building using that method then I assume one would not be downloading the src.zip archive anyway. My impression is that the src.zip archive is prepared once the build has been performed on Linux so Windows developers can directly build sqlite (and the generated files) without need of the other tools that the build process depends on. If this is accurate, then it would be very helpful if the src.zip archive could also include the sqlite3.def file. Without this file it is not possible for Windows developers to create a DLL from the src.zip archive. Thanks _2006-Nov-09 20:05:23 by anonymous:_ {linebreak} Works fine as is with MinGW ./configure && make sqlite3.exe
#f2dcdc 2057 code active 2006 Nov anonymous 2006 Nov 3 1 full_column_names when 2 or more tables are joined is not working Version 2.8 has the behavior described in the documentation in respect to full_column_names when 2 or more tables are present with (table/alias).*, but 3.3.8 doesn't, mixing the pragmas "full_column_names" and "short_column_names" can only force to have full_column_names allways or never, some programs expect the behavior described in the documentation to remain working. _2006-Nov-08 20:10:13 by anonymous:_ {linebreak} Version 3.3.3 as well has the same problem. ---- _2006-Nov-09 09:34:52 by anonymous:_ {linebreak} Changing the line 977 of select.c (3.3.8) from: if( pEList->a[i].zName){ to: if( pEList->a[i].zName && pTabList->nSrc==1){ with pragma short_column_names = 0 behaves like 2.8 series.
#f2dcdc 2051 code active 2006 Nov anonymous 2006 Nov 5 5 minor documentation bug On the page http://www.sqlite.org/lang_attach.html you wrote: If an attached table doesn't have a duplicate table name in the main database, it doesn't require a database name prefix. When a database is attached, all of its tables which don't have duplicate names become the default table of that name. *Any tables of that name attached afterwards require the table prefix*. If the default table of a given name is detached, then the last table of that name attached becomes the new default. I think the right form should be: Any tables of that name attached afterwards require the *database* prefix. Am I right? Thank you, Dim Zegebart
#f2dcdc 1816 code active 2006 May anonymous VDBE 2006 May 1 2 Database corruption with pragma auto_vacuum We had a database created with PRAGMA auto_vacuum=1, that started returning the following message on a DELETE statement. SQL error: database disk image is malformed Running the VACUUM command and running the same DELETE statement succeeds. Running PRAGMA integrity_check on the database (before the VACUUM command is issued) results in the following output: sqlite> PRAGMA integrity_check; *** in database main *** Page 3393 is never used Page 3398 is never used Page 3400 is never used Page 3401 is never used Page 3402 is never used Page 3405 is never used Page 3406 is never used sqlite> VACUUM; sqlite> PRAGMA integrity_check; ok We tried as a temporary workaround, running PRAGMA integrity_check and, based on the result, deciding whether or not to run VACUUM, but this can consume too much time. If needed, I can send a small database that exhibits this problem. _2006-May-22 21:45:47 by drh:_ {linebreak} The database is probably not helpful. What I need to know is: *: What sequence of SQL statements do you issue to cause this to occur? *: What operating system you are using. *: Is the application multi-threaded? *: Is the problem reproducible? *: Are you using a precompiled binary or did you compile it yourself? *: Does the problem go away if you turn off autovacuum? ---- _2006-May-22 22:11:09 by anonymous:_ {linebreak} *: What sequence of SQL statements do you issue to cause this to occur? It is unknown exactly what all of the the statements are leading up to the corruption. I can send the possible statements via private e-mail. *: What operating system you are using. Windows XP Professional w/ Service Pack 2. *: Is the application multi-threaded? Yes. *: Is the problem reproducible? The corruption happens on occasion -- so far it is not known to be easily reproducable in a finite number of steps. *: Are you using a precompiled binary or did you compile it yourself? Self-compiled library. When we use the database in our application, it is contained in abstracted classes with concurrency control. *: Does the problem go away if you turn off autovacuum? We have not seen database corruption if auto_vacuum is off when the database is initially created. Is it possible to turn off auto vacuum after the database tables have been created (no when using pragma auto_vacuum, according to the docs)? ---- _2006-May-22 22:28:46 by anonymous:_ {linebreak} Rather than relying on trial and error to reproduce the bug, one technique the bug reporter might try to reproduce the problem is to take a snapshot of the database when it is in a known good state and save it somewhere and then have every process that comes into contact with the database file log every SQLite command (and pragma) complete with millisecond-resolution timestamp and process/thread ID as follows: SELECT * FROM WHATEVER; -- 2006-05-23 14:44:45.237 PID 345 Thread 0 insert into blah values(3,4,5); -- 2006-05-23 14:50:15.345 PID 345 Thread 0 update foo set v=5 where y>4; -- 2006-05-23 15:05:12.930 PID 239 Thread 0 Should the problem happen again, each command could easily be replayed in an appropriate thread in the same order from the last known "good" state, greatly increasing the chances of repeating the bug. If repeating these commands does not lead to database corruption, it is fairly likely that the bug is in your multithreaded code, and not in SQLite. Perhaps SQLite already has such a command tracing facility already. I don't know. ---- _2006-May-22 22:42:04 by anonymous:_ sqlite3_trace(); It passes all the caller-generated SQL statements to a callback (although it doesn't fill in bindings). It also outputs a lot of "internal" SQL statements (VACUUM, for example, is a collection of operations on a temp table), but you should be able to recognize that stuff as something your app would never generate.
#f2dcdc 1815 code active 2006 May anonymous Parser 2006 May 3 3 Support of W3C-DTF(ISO8601 subset) is incomplete "Z" of a time zone is ignored. Reference: http://www.w3.org/TR/NOTE-datetime CREATE table test(dt); INSERT INTO "test" VALUES('2006-05-20T01:10:20+00:00'); INSERT INTO "test" VALUES('2006-05-20T01:10:20Z'); INSERT INTO "test" VALUES('2006-05-20T10:10:20+09:00'); SELECT datetime(dt) from test; 2006-05-20 01:10:20 2006-05-20 01:10:20
#e8e8bd 1814 new active 2006 May anonymous 2006 May 3 4 Autoconf support for MacOSX univeral binaries SQLite is used widely on OS X. One problem for many OS X developers is preparing SQLite for universal binaries. I think there are a couple ways to solve this. 1. Add a new configure option --enable-macosx-universal to put the right compiler switches 2. Rework the configure makefiles so that you can override with the typical CFLAGS overrides as suggested by Apple in this article: http://developer.apple.com/documentation/Porting/Conceptual/PortingUnix/compiling/chapter_4_section_3.html I've attached a patch that will enable universal binaries if you chose the --enable-macosx-universal option, are on *-*-darwin* os, and have disabled shared libraries. This patch may not be exactly how you would want to integrate but it should serve as a good starting point. I've tested in a PPC powermac, PPC powerbook, and Intel Mac mini. ---- --- sqlite-3.3.5/configure.ac 2006-04-03 13:16:01.000000000 -0700
+++ sqlite-3.3.x/configure.ac 2006-05-18 16:42:08.000000000 -0700
@@ -661,6 +661,32 @@
AC_CHECK_FUNC(fdatasync, [TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_FDATASYNC=1"])
+##########
+# Mac OS X Universal Binary support
+#
+AC_MSG_CHECKING([whether building for macosx])
+case "${build}" in
+ *-*-darwin* )
+ AC_ARG_ENABLE(macosx-universal,
+ AC_HELP_STRING([--enable-macosx-universal],[Enable macosx universal binaries]),,enable_macosxuniversal=no)
+ AC_MSG_CHECKING([whether building Universal Binaries])
+ if test "$enable_macosxuniversal" = "no"; then
+ AC_MSG_RESULT([no])
+ else
+ AC_MSG_CHECKING([if shared libraries are disabled])
+ if test "$enable_shared" = "no"; then
+ TARGET_CFLAGS="$TARGET_CFLAGS -arch ppc -arch i386 -isysroot /Developer/SDKs/MacOSX10.4u.sdk"
+ AC_MSG_RESULT([yes (Universal Binaries enabled)])
+ else
+ AC_MSG_RESULT([no (Universal Binaries disabled)])
+ fi
+ fi
+ ;;
+ *)
+ AC_MSG_RESULT([no])
+esac
+
+
#########
# Put out accumulated miscellaneous LIBRARIES
#
----
#e8e8bd 1812 new active 2006 May anonymous 2006 May 4 1 alter table add column default current_timestamp I wish I can alter my schema and add column with non-constant default. How difficult is this compared with constant default?
#e8e8bd 1811 doc active 2006 May anonymous 2006 May 5 3 how many open cursors are allowed for one application? I would like to know, how many open cursors allowed at the same time.
#f2dcdc 1797 code active 2006 May anonymous TclLib 2006 May drh 1 1 COPY command doesn't work in tclsqlite 3.3.5 The COPY command doesn't seem to work in the tcl sqlite lib. This same script and datafile works in version 3.2.7. load ./lib/libtclsqlite[info sharedlibextension] sqlite MEMORY_DB :memory: MEMORY_DB onecolumn "PRAGMA empty_result_callbacks=1" puts [MEMORY_DB version] MEMORY_DB eval "create table xyz (col1,col2)" MEMORY_DB copy ignore win_pol /home/centadm/win_pol4.csv \t MEMORY_DB eval "select * from xyz" sqlite_array { puts "Here in the callback" foreach sqlite_value $sqlite_array(*) { puts "$sqlite_value $sqlite_array($sqlite_value)" } } The data file win_pol4.csv consists of two columns, tab seperated. DATA1 DATA2 And the output: -bash-3.00$ tclsh test_sqlite.tcl 3.3.5 while executing "MEMORY_DB copy ignore win_pol /home/centadm/win_pol4.csv \t" (file "test_sqlite.tcl" line 5) -bash-3.00$ pwd /home/centadm -bash-3.00$ ls -l /home/centadm/win_pol4.csv -rw-r--r-- 1 centadm centadm 12 May 5 14:21 /home/centadm/win_pol4.csv -bash-3.00$ more /home/centadm/win_pol4.csv DATA1 DATA2 A TCL Error is returned from the copy command, no message tho. I have used catch to capture the command and verified that there is no data going into the table. Also, PRAGMA empty_result_callbacks=1 still doesn't seem to work in the tcllib. If you catch the COPY command above, you still never see the "Here in the callback" message. _2006-May-05 17:57:42 by anonymous:_ {linebreak} Clarification: The line MEMORY_DB copy ignore win_pol /home/centadm/win_pol4.csv \t should read MEMORY_DB copy ignore xyz /home/centadm/win_pol4.csv \t However the result is the same: -bash-3.00$ tclsh test_sqlite.tcl 3.3.5 while executing "MEMORY_DB copy ignore xyz /home/centadm/win_pol4.csv \t" (file "test_sqlite.tcl" line 7) -bash-3.00$ ---- _2006-May-05 19:46:56 by anonymous:_ {linebreak} I have narrowed it down to the code here in tclsqlite.c: zSql = sqlite3_mprintf("SELECT * FROM '%q'", zTable); if( zSql==0 ){ Tcl_AppendResult(interp, "Error: no such table: ", zTable, 0); return TCL_ERROR; } nByte = strlen(zSql); rc = sqlite3_prepare(pDb->db, zSql, 0, &pStmt, 0); sqlite3_free(zSql); if( rc ){ Tcl_AppendResult(interp, "Error: ", sqlite3_errmsg(pDb->db), 0); nCol = 0; }else{ nCol = sqlite3_column_count(pStmt); <--- RETURNING 0 FOR COLUMN COUNT, HAVE VERIFIED TABLE HAS TWO COLUMNS } sqlite3_finalize(pStmt); if( nCol==0 ) { return TCL_ERROR; <--- NO ERROR MESSAGE RETURNED } ---- _2006-May-16 17:51:28 by anonymous:_ {linebreak} I found the problem. The first sqlite3_prepare under DB_COPY should have -1 as it's third argument. When this was change from a 0 to -1 the copy command works in tclsqlite. rc = sqlite3_prepare(pDb->db, zSql,0, &pStmt, 0); should be rc = sqlite3_prepare(pDb->db, zSql,-1, &pStmt, 0); ---- _2006-May-16 18:01:11 by anonymous:_ {linebreak} There is also another reference (the insert statement) to the prepare statement under DB_COPY that needs to change it's third argument from 0 to -1. ---- _2006-Sep-27 16:24:53 by anonymous:_ {linebreak} The same problem is present with version 3.3.7 over here. However, the indicated patch seem to work.
#e8e8bd 1810 new active 2006 May anonymous 2006 May 3 3 localtime() not threadsafe on UNIX The following SQLite code from src/date.c is only guaranteed to function correctly with multiple threads on UNIX if the SQLite library is the only caller of localtime(). If an application that uses the SQLite library's date functions happens to call localtime() either directly or indirectly via another third party library, then localtime() can return a pointer to inconsistant data. localtime_r() should be use instead. sqlite3OsEnterMutex(); pTm = localtime(&t); y.Y = pTm->tm_year + 1900; y.M = pTm->tm_mon + 1; y.D = pTm->tm_mday; y.h = pTm->tm_hour; y.m = pTm->tm_min; y.s = pTm->tm_sec; sqlite3OsLeaveMutex(); Windows and some versions of UNIX may use thread-local storage to make localtime() threadsafe. This is not the case with Linux or any other OS that uses GNU libc: /* Convert `time_t' to `struct tm' in local time zone. Copyright (C) 1991,92,93,95,96,97,98,2002 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include /* The C Standard says that localtime and gmtime return the same pointer. */ struct tm _tmbuf; /* Return the `struct tm' representation of *T in local time, using *TP to store the result. */ struct tm * __localtime_r (t, tp) const time_t *t; struct tm *tp; { return __tz_convert (t, 1, tp); } weak_alias (__localtime_r, localtime_r) /* Return the `struct tm' representation of *T in local time. */ struct tm * localtime (t) const time_t *t; { return __tz_convert (t, 1, &_tmbuf); } libc_hidden_def (localtime) As an added benefit, you get better thread concurrency by getting rid of the sqlite3OsEnterMutex/sqlite3OsLeaveMutex when you use localtime_r. _2006-May-15 12:53:42 by drh:_ {linebreak} All of this is pointed out already in the documentation. I will therefore change this ticket from a bug to an enhancement request. Note that we have considered the use of localtime_r() in the past and rejected it since it will lead to a significant complication of the build process.
#f2dcdc 1809 code active 2006 May anonymous CodeGen 2006 May 1 3 Huge slowdown/increased memory use when using GROUP BY on big dataset This seemingly nonsensical query is a greatly reduced test case taken from several queries I use with SQLite 3.2.1. The real example joins various huge tables and much more complicated views. I'd like to upgrade beyond SQLite 3.2.1, but this is a showstopper. It takes 13 seconds to run on SQLite 3.2.1 and uses just 1.2M of memory. With 3.3.5+ from CVS it takes 185 seconds and uses 230M of memory. PRAGMA temp_store=MEMORY; CREATE TABLE n1(a integer primary key); INSERT INTO "n1" VALUES(1); INSERT INTO "n1" VALUES(2); INSERT INTO "n1" VALUES(3); INSERT INTO "n1" VALUES(4); INSERT INTO "n1" VALUES(5); INSERT INTO "n1" VALUES(6); INSERT INTO "n1" VALUES(7); INSERT INTO "n1" VALUES(8); INSERT INTO "n1" VALUES(9); INSERT INTO "n1" VALUES(10); INSERT INTO "n1" VALUES(11); INSERT INTO "n1" VALUES(12); INSERT INTO "n1" VALUES(13); INSERT INTO "n1" VALUES(14); INSERT INTO "n1" VALUES(15); CREATE VIEW vu as select v3.a a, v5.a-v2.a*v7.a b from n1 v1,n1 v2,n1 v3,n1 v4,n1 v5,n1 v6,n1 v7; select a a, sum(b) T from vu where a=7 group by a; It seems that SQLite 3.2.1 had a much more efficient GROUP BY algorithm that discarded unnecessary data as the view was traversed. _2006-May-13 03:01:28 by anonymous:_ {linebreak} Seeing as this ticket concerns the GROUP BY statement it would make more sense to have an example like this: select a a, sum(b) T from vu where a<4 group by a; But both queries exhibit the same slowdown and memory increase, in any event. ---- _2006-May-13 15:09:39 by anonymous:_ {linebreak} This GROUP BY slowdown/memory increase is not specific to VIEWs. I repeated the test against a comparably sized table with the same results. You'll see this effect for any SELECT operating on a large number of rows using GROUP BY. ---- _2006-May-13 16:44:04 by anonymous:_ {linebreak} The slowdown first appears in SQLite 3.2.6 in check-in [2662]. ---- _2006-May-24 13:19:29 by anonymous:_ {linebreak} Here's an example to show an actual real-life use of GROUP BY in SQLite <= 3.2.5... Imagine performing mathematical operations on every combination of rows in several large tables for statistical analysis. The GROUP BY algorithm change in 3.2.6 now makes using GROUP BY on huge cross joins not usable for this purpose because it creates an intermediate result set of the product of all cross joins - several times larger than the size of the (already huge) database itself. Indexing is not useful in this case because there is nothing to index by design. All table rows must be traversed. Older versions of SQLite performed this operation extremely efficiently because grouping took place in the main traversal loop. I would think that the old algorithm could be used, but instead of keeping the intermediate results in memory, an index and a table in temp store could be used.
#f2dcdc 1804 code active 2006 May anonymous Unknown 2006 May 4 3 Inconsistent value type returned by SUM when using a GROUP BY Using a schema with test table: CREATE TABLE Tbl1 (Key1 INTEGER, Num1 REAL) And test data: INSERT INTO Tbl1 (Key1,Num1) VALUES (1,5.0) The query: SELECT SUM(Tbl1.Num1) AS Num1Sum FROM Tbl1 Returns a column with the value type correctly reported as FLOAT (2). However, the query: SELECT Tbl1.Key1, SUM(Tbl1.Num1) AS Num1Sum FROM Tbl1 GROUP BY Tbl1.Key1 Returns two columns with value types INT (1) and INT (1). The SUM function is returning a different value type for these two queries when both should return FLOAT (2). This problem does not occur when any SUMmed value is not a whole number in which case, both queries return a value type of FLOAT for the SUM column. I have applied the patch from Check In 3169 (relating to #1726 and #1755) to select.c but this does not resolve the problem. _2006-May-10 09:34:11 by anonymous:_ {linebreak} I should have added that this problem was seen in a Windows CE build running on Pocket PC 2003 and built using eMbedded Visual C++ 4.0. ---- _2006-May-10 11:24:47 by anonymous:_ {linebreak} I can confirm that exactly the same behaviour is exhibited when built under Windows XP (32-bit). ---- _2006-May-10 12:40:21 by drh:_ {linebreak} The answer you are getting back is exactly correct. Why do you care what its datatype is? If you don't like the datatype, cast it. ---- _2006-May-10 13:42:14 by anonymous:_ {linebreak} For maximum compatibility with other SQL databases, both SELECT SUM(field2) FROM table and SELECT field1, SUM(field2) FROM table GROUP BY field1 should return the same data type for the SUM column. All other databases I have worked with do this. I understand that SQLite uses manifest typing but believe that it should be consistent. The problem I have is that in my query function (which takes an SQL string and returns a page of results as a 2D array of objects), I don't know whether to use sqlite3_column_double or sqlite3_column_int because I don't know what the calling function requires this column to be returned as. I am currently using the sqlite3_column_decltype call to switch which sqlite3_column_* function I use (and falling back on sqlite3_column_type when the declared type is not known e.g. for aggregate functions like SUM). If the return type of SUM is unpredictable, my calling functions can't assume returned values will be of the same type as the field that is being SUMmed (as is the case with other SQL databases). If you don't consider this to be a problem with SQLite, then I think my only option will be for calling functions to pass in an array of return types so that I always return objects of the correct type.
#e8e8bd 1801 new active 2006 May anonymous Pager 2006 May owensmk 5 4 Include Support for User-defined Lock Synchronization SQLite uses a busy-wait model for locking. For high concurrency applications this can be become inefficient. I have written a patch for =pager.c= which introduces two hooks - =lock()= and =unlock()= - whereby the application can participate in locking/sychronization of connections. This can in some cases increase overall concurrency by an order of magnitude. The callback implementation is very similar to the busy handler. All of the synchronization is implemented by the application. SQLite simply calls the hooks at the appropriate times, if they are registered. A full description of the patch is available at {link: http://www.gintana.com/sqlite/}.
#f2dcdc 1799 code active 2006 May anonymous Pager 2006 May 2 3 temp_store=MEMORY slower than FILE for large intermediate result sets (This ticket was split off from #1790 because that ticket was becoming too broad.) When temp_store=MEMORY it can negatively effect the performance of queries with large intermediate result sets generated from SELECTs of either file-based tables or memory-based tables. This is true when sufficient RAM is available to the SQLite process to completely hold the intermediate results in memory without swapping to disk. In the example below, "big" is a file-based table in foo.db with 10 million rows. It was created with "create table big(x,y)". # unmodified stock SQLite built from May 5 2006 CVS (after check-in [3178]) # compiled with default settings for SQLITE_DEFAULT_PAGE_SIZE and N_PG_HASH $ time ./may5-sqlite/sqlite3 foo.db "PRAGMA temp_store = MEMORY; select x, y from big order by y, x" >/dev/null real 13m23.828s user 13m18.452s sys 0m0.811s # SQLite built from May 5 2006 CVS, but compiled with proposed change of # SQLITE_DEFAULT_PAGE_SIZE set to 4096, and N_PG_HASH set to 16384 $ time ./may5-sqlite-hash-opt/sqlite3 foo.db "PRAGMA temp_store = MEMORY; select x, y from big order by y, x" >/dev/null real 6m16.031s user 6m13.108s sys 0m0.811s Compiling with SQLITE_DEFAULT_PAGE_SIZE = 1024, and N_PG_HASH = 32768 resulted in the same timing as the may5-sqlite-hash-opt test run above. If temp_store=FILE (with default SQLite values for SQLITE_DEFAULT_PAGE_SIZE and N_PG_HASH), the timings are comparable to temp_store=MEMORY with SQLITE_DEFAULT_PAGE_SIZE=4096, and N_PG_HASH=16384. Large intermediate results sets can cause SQLite to spend more than half of its CPU time in the function pager_lookup(). By increasing the value of N_PG_HASH and SQLITE_DEFAULT_PAGE_SIZE, the time spent in pager_lookup can be reduced to near zero, thus doubling performance in such cases. % cumulative self self total time seconds seconds calls ms/call ms/call name 51.97 118.31 118.31 119658386 0.00 0.00 pager_lookup 4.36 128.25 9.94 4000009 0.00 0.06 sqlite3VdbeExec 3.06 135.21 6.96 315629923 0.00 0.00 parseCellPtr 3.05 142.16 6.95 171797186 0.00 0.00 sqlite3VdbeRecordCompare 2.67 148.22 6.07 12000005 0.00 0.01 sqlite3BtreeMoveto 2.14 153.10 4.88 343594380 0.00 0.00 sqlite3VdbeSerialGet 1.68 156.93 3.83 171797188 0.00 0.00 sqlite3MemCompare 1.63 160.65 3.72 77995781 0.00 0.00 sqlite3pager_get 1.60 164.29 3.65 169734946 0.00 0.00 sqlite3pager_unref 1.58 167.88 3.59 654100795 0.00 0.00 get2byte _2006-May-07 18:37:50 by anonymous:_ {linebreak} Timings on same Windows machine with check-in [3180] applied: # FILE $ time ./may7-sqlite/sqlite3 foo.db "PRAGMA temp_store = FILE; select x, y from big order by y, x" >/dev/null real 5m7.157s user 4m19.905s sys 0m20.827s # MEMORY $ time ./may7-sqlite/sqlite3 foo.db "PRAGMA temp_store = MEMORY; select x, y from big order by y, x" >/dev/null real 5m12.328s user 5m9.781s sys 0m0.984s Much better. temp_store=MEMORY is now competitive with FILE, although temp_store=FILE (when the OS is able to cache the file entirely in memory) is marginally faster. I still think the MEMORY time can be reduced further by another 20 seconds judging by the sys time of 20.827s in the FILE test. The MEMORY subsystem of SQLite ought to have an advantage over the FILE subsystem because it does not incur any system call overhead. I'll see if a profile turns up anything obvious.
#f2dcdc 1790 code active 2006 May anonymous Pager 2006 May 3 3 :memory: performance difference between v2 and v3 Please see the following link for details: http://www.mail-archive.com/sqlite-users%40sqlite.org/msg14937.html Possible fix? RCS file: /sqlite/sqlite/src/pager.c,v retrieving revision 1.266 diff -u -r1.266 pager.c --- pager.c 7 Apr 2006 13:54:47 -0000 1.266 +++ pager.c 3 May 2006 19:02:17 -0000 @@ -1663,7 +1663,7 @@ pPager->memDb = memDb; pPager->readOnly = readOnly; /* pPager->needSync = 0; */ - pPager->noSync = pPager->tempFile || !useJournal; + pPager->noSync = pPager->tempFile || !pPager->useJournal; pPager->fullSync = (pPager->noSync?0:1); /* pPager->pFirst = 0; */ /* pPager->pFirstSynced = 0; */ _2006-May-03 19:32:12 by drh:_ {linebreak} The suggested change makes no difference in performance when I try it. ---- _2006-May-03 21:41:24 by anonymous:_ {linebreak} If transactions are not used, 85% of the time of this memory database benchmark is spent in pager_get_all_dirty_pages(). Each sample counts as 0.01 seconds. % cumulative self self total time seconds seconds calls ms/call ms/call name 85.25 31.20 31.20 100002 0.31 0.31 pager_get_all_dirty_pages 1.39 31.71 0.51 100011 0.01 0.20 sqlite3VdbeExec 1.17 32.14 0.43 10487713 0.00 0.00 parseCellPtr 0.63 32.37 0.23 12943618 0.00 0.00 sqlite3VdbeSerialGet 0.61 32.59 0.23 3432951 0.00 0.00 pager_lookup 0.52 32.78 0.19 4849544 0.00 0.00 sqlite3VdbeRecordCompare 0.44 32.95 0.16 400006 0.00 0.00 sqlite3BtreeMoveto 0.41 33.09 0.15 2064924 0.00 0.00 sqlite3pager_get 0.40 33.24 0.14 6471807 0.00 0.00 sqlite3MemCompare 0.06 31.25 100002/100002 sqlite3BtreeCommit [4] [5] 85.6 0.06 31.25 100002 sqlite3pager_commit [5] 31.20 0.00 100002/100002 pager_get_all_dirty_pages [6] 0.05 0.00 389365/389365 clearHistory [65] ----------------------------------------------- 31.20 0.00 100002/100002 sqlite3pager_commit [5] [6] 85.2 31.20 0.00 100002 pager_get_all_dirty_pages [6] ---- _2006-May-03 21:51:30 by anonymous:_ {linebreak} Stats with BEGIN/COMMIT enabled: Each sample counts as 0.01 seconds. % cumulative self self total time seconds seconds calls ms/call ms/call name 11.88 0.34 0.34 4849544 0.00 0.00 sqlite3VdbeRecordCompare 8.16 0.56 0.23 10487713 0.00 0.00 parseCellPtr 7.80 0.79 0.22 12943618 0.00 0.00 sqlite3VdbeSerialGet 6.38 0.96 0.18 100013 0.00 0.03 sqlite3VdbeExec 4.26 1.08 0.12 29816 0.00 0.02 balance_nonroot 3.90 1.20 0.11 6471807 0.00 0.00 sqlite3MemCompare 3.19 1.28 0.09 1964925 0.00 0.00 sqlite3pager_get 3.19 1.38 0.09 400006 0.00 0.00 sqlite3BtreeMoveto 2.84 1.46 0.08 19170231 0.00 0.00 get2byte 2.66 1.53 0.07 700015 0.00 0.00 sqlite3VdbeSerialPut 2.13 1.59 0.06 600993 0.00 0.00 sqlite3Malloc 1.77 1.64 0.05 4400155 0.00 0.00 sqlite3pager_unref 1.77 1.69 0.05 3332952 0.00 0.00 pager_lookup 1.77 1.74 0.05 1418379 0.00 0.00 decodeFlags 1.77 1.79 0.05 1332302 0.00 0.00 initPage 1.60 1.83 0.04 5270826 0.00 0.00 findOverflowCell 1.42 1.88 0.04 12181181 0.00 0.00 findCell 1.42 1.92 0.04 4849549 0.00 0.00 fetchPayload 1.42 1.96 0.04 359548 0.00 0.00 insertCell 1.24 1.99 0.04 4896877 0.00 0.00 parseCell 1.06 2.02 0.03 5284245 0.00 0.00 cellSizePtr 1.06 2.05 0.03 3227291 0.00 0.00 binCollFunc 1.06 2.08 0.03 2616113 0.00 0.00 _page_ref 1.06 2.11 0.03 1368027 0.00 0.00 reparentPage 1.06 2.14 0.03 934205 0.00 0.00 sqlite3GenericMalloc 1.06 2.17 0.03 300010 0.00 0.00 sqlite3BtreeCursor 0.89 2.19 0.03 2536689 0.00 0.00 get4byte 0.89 2.22 0.03 1864920 0.00 0.00 getPage ... 0.00 2.82 0.00 3 0.00 0.00 pager_get_all_dirty_pages 0.00 0.00 3/3 sqlite3BtreeCommit [116] [119] 0.0 0.00 0.00 3 sqlite3pager_commit [119] 0.00 0.00 6551/6551 clearHistory [118] 0.00 0.00 3/3 pager_get_all_dirty_pages [370] ----------------------------------------------- 0.00 0.00 3/3 sqlite3pager_commit [119] [370] 0.0 0.00 0.00 3 pager_get_all_dirty_pages [370] ---- _2006-May-03 22:27:35 by anonymous:_ {linebreak} with the outer BEGIN/COMMIT disabled, the memory database benchmark stats: static PgHdr *pager_get_all_dirty_pages(Pager *pPager){ // this point is reached 100,002 times PgHdr *p, *pList; pList = 0; for(p=pPager->pAll; p; p=p->pNextAll){ // this point is reached 322,956,271 times if( p->dirty ){ // this point is reached 389,365 times p->pDirty = pList; pList = p; } } return pList; } ---- _2006-May-04 05:23:08 by anonymous:_ {linebreak} This patch makes the test (with transaction) run 7% faster for gcc 3.4.4 with -O2. At -O3, gcc performs the inlining of these functions even without the inline hint, so this patch has no effect. RCS file: /sqlite/sqlite/src/btree.c,v retrieving revision 1.324 diff -u -3 -p -r1.324 btree.c --- btree.c 4 Apr 2006 01:54:55 -0000 1.324 +++ btree.c 4 May 2006 05:12:35 -0000 @@ -439,17 +439,17 @@ static int checkReadLocks(BtShared*,Pgno /* ** Read or write a two- and four-byte big-endian integer values. */ -static u32 get2byte(unsigned char *p){ +inline static u32 get2byte(unsigned char *p){ return (p[0]<<8) | p[1]; } -static u32 get4byte(unsigned char *p){ +inline static u32 get4byte(unsigned char *p){ return (p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]; } -static void put2byte(unsigned char *p, u32 v){ +inline static void put2byte(unsigned char *p, u32 v){ p[0] = v>>8; p[1] = v; } -static void put4byte(unsigned char *p, u32 v){ +inline static void put4byte(unsigned char *p, u32 v){ p[0] = v>>24; p[1] = v>>16; p[2] = v>>8; ---- _2006-May-04 19:44:57 by anonymous:_ {linebreak} I just want to confirm that a _file database is faster_ than a memory database for 3.3.5+. Are these numbers correct? 43,478 inserts/second best case for file for 3.3.5+ and 40,000 inserts/second best case for memory? Even with the OS caching the entire database file entirely in RAM, this finding is quite surprising. Test DB IDX TX RC 3.3.5+ 3.3.5 2.8.17 1 mem n y 1000000 40000 33333 76923 2 mem y y 1000000 27027 22727 58824 3 mem n n 1000000 35714 5263 83333 4 mem y n 1000000 24390 2778 62500 5 file n y 1000000 43478 35714 40000 6 file y y 1000000 28571 24390 23256 7 file n n 1000 11 11 13 8 file y n 1000 9 10 13 http://www.sqlite.org/cvstrac/attach_get/256/sqlite_speed.txt ---- _2006-May-04 20:19:18 by anonymous:_ {linebreak} I'm seeing slightly different results. The memory database using a transaction is (slightly) faster than the file-based database using a transaction. Timings on 3.3.5+ on Windows XP, gcc 3.4.4 -O3 -fomit-frame-pointer IDX TX # inserts wall time inserts/sec --- --- --------- ---------- ----------- mem no no 100,000 4.8s 20,833 mem no yes 100,000 4.3s 23,255 file no yes 100,000 4.7s 21,276 file no no 1,000 99.8s 10 ...things get worse for :memory: as you increase the number of inserts, while the file database numbers remain constant: IDX TX # inserts wall time inserts/sec --- --- --------- ---------- ----------- mem no yes 1,000,000 48.5s 20,638 mem no yes 2,000,000 118.6s 16,863 mem no yes 4,000,000 364.7s 10,967 file no yes 1,000,000 46.8s 21,354 file no yes 2,000,000 93.8s 21,321 file no yes 4,000,000 187.5s 21,333 Do Linux users get similar results? Considering I have 512K CPU L2 cache, I wonder if there's some CPU cache effect going on here with the way the :memory: db is allocated. ---- _2006-May-04 21:35:07 by anonymous:_ {linebreak} It seems there is some quadratic behavior in pager_lookup (latest CVS). 52% of the time is spent in that function. Profile data from :memory: db, TX on, no IDX, 4 million inserts: /* ** Find a page in the hash table given its page number. Return ** a pointer to the page or NULL if not found. */ static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){ PgHdr *p = pPager->aHash[pager_hash(pgno)]; while( p && p->pgno!=pgno ){ p = p->pNextHash; } return p; } % cumulative self self total time seconds seconds calls ms/call ms/call name 51.97 118.31 118.31 119658386 0.00 0.00 pager_lookup 4.36 128.25 9.94 4000009 0.00 0.06 sqlite3VdbeExec 3.06 135.21 6.96 315629923 0.00 0.00 parseCellPtr 3.05 142.16 6.95 171797186 0.00 0.00 sqlite3VdbeRecordCompare 2.67 148.22 6.07 12000005 0.00 0.01 sqlite3BtreeMoveto 2.14 153.10 4.88 343594380 0.00 0.00 sqlite3VdbeSerialGet 1.68 156.93 3.83 171797188 0.00 0.00 sqlite3MemCompare 1.63 160.65 3.72 77995781 0.00 0.00 sqlite3pager_get 1.60 164.29 3.65 169734946 0.00 0.00 sqlite3pager_unref 1.58 167.88 3.59 654100795 0.00 0.00 get2byte 1.30 170.84 2.95 973877 0.00 0.07 balance_nonroot 1.27 173.74 2.90 56939555 0.00 0.00 initPage 1.24 176.56 2.83 171797188 0.00 0.00 binCollFunc 0.93 178.69 2.12 386371475 0.00 0.00 findCell 0.86 180.65 1.96 96207437 0.00 0.00 pageDestructor 0.83 182.53 1.89 95976540 0.00 0.00 _page_ref 0.80 184.36 1.83 2708031 0.00 0.00 assemblePage 0.80 186.19 1.82 41662605 0.00 0.00 reparentPage 0.74 187.88 1.70 171797188 0.00 0.00 fetchPayload 0.73 189.55 1.67 73995778 0.00 0.00 getPage 0.67 191.07 1.52 59647596 0.00 0.00 decodeFlags 0.63 192.51 1.44 132945443 0.00 0.00 findOverflowCell 0.62 193.93 1.41 40148167 0.00 0.00 sqlite3PutVarint 0.59 195.27 1.34 134687272 0.00 0.00 releasePage 0.59 196.62 1.34 73764879 0.00 0.00 getAndInitPage 0.54 197.84 1.22 8000003 0.00 0.02 sqlite3BtreeInsert 0.52 199.01 1.18 60000030 0.00 0.00 sqlite3VdbeSerialType 0.52 200.19 1.18 24000011 0.00 0.00 moveToRoot 0.49 201.30 1.10 179797130 0.00 0.00 getCellInfo 0.43 202.28 0.98 9882306 0.00 0.00 insertCell 0.42 203.22 0.94 47288132 0.00 0.00 moveToChild 0.40 204.15 0.92 173434760 0.00 0.00 parseCell 0.40 205.06 0.91 95806930 0.00 0.00 get4byte 0.34 205.83 0.78 41662605 0.00 0.00 sqlite3pager_lookup 0.33 206.57 0.74 165099370 0.00 0.00 sqlite3MallocFailed 0.32 207.31 0.73 20000010 0.00 0.00 sqlite3VdbeSerialPut 0.31 208.02 0.71 8000015 0.00 0.00 sqlite3VdbeHalt 0.30 208.70 0.68 27052986 0.00 0.00 sqlite3GetVarint 0.28 209.33 0.63 174637767 0.00 0.00 put2byte 0.27 209.96 0.62 8000006 0.00 0.00 sqlite3BtreeCursor 0.26 210.54 0.59 8148152 0.00 0.00 fillInCell 0.25 211.12 0.57 3385610 0.00 0.01 reparentChildPages 0.25 211.69 0.57 16000006 0.00 0.00 checkReadLocks 0.22 212.19 0.51 48000093 0.00 0.00 sqlite3VdbeFreeCursor 0.22 212.69 0.50 133898861 0.00 0.00 cellSizePtr 0.22 213.19 0.50 24000010 0.00 0.00 popStack 0.20 213.65 0.46 50076560 0.00 0.00 sqlite3pager_ref 0.20 214.10 0.45 pager_reset 0.19 214.54 0.44 8000024 0.00 0.00 closeAllCursors 0.19 214.97 0.42 12000024 0.00 0.00 sqlite3VdbeMemMakeWriteable 0.18 215.38 0.41 32000052 0.00 0.00 sqlite3VdbeMemSetStr 0.18 215.78 0.40 11616158 0.00 0.00 allocateSpace 0.17 216.16 0.39 8000000 0.00 0.00 bindText 0.16 216.51 0.35 25098767 0.00 0.00 sqlite3MallocRaw 0.16 216.87 0.35 8000005 0.00 0.00 sqlite3BtreeCloseCursor 0.15 217.22 0.34 45560699 0.00 0.00 sqlite3FreeX 0.15 217.56 0.34 36000014 0.00 0.00 sqlite3VarintLen 0.15 217.90 0.34 36000009 0.00 0.00 sqlite3VdbeMemShallowCopy 0.14 218.22 0.33 47999969 0.00 0.00 sqlite3VdbeSerialTypeLen 0.14 218.54 0.33 4000008 0.00 0.00 sqlite3VdbeMakeReady ----------------------------------------------- 41.19 0.00 41662605/119658386 sqlite3pager_lookup [15] 77.12 0.00 77995781/119658386 sqlite3pager_get [8] [5] 52.0 118.31 0.00 119658386 pager_lookup [5] ----------------------------------------------- 0.19 4.02 4000003/77995781 sqlite3BtreeGetMeta [28] 3.53 74.30 73995778/77995781 getPage [9] [8] 36.0 3.72 78.31 77995781 sqlite3pager_get [8] 77.12 0.00 77995781/119658386 pager_lookup [5] 1.12 0.00 56939550/95976540 _page_ref [40] 0.03 0.00 230897/230897 page_remove_from_stmt_list [139] 0.03 0.00 230897/230897 makeClean [138] 0.01 0.00 230897/461804 sqlite3pager_pagecount [150] 0.00 0.00 230897/25098767 sqlite3MallocRaw [58] ----------------------------------------------- 1.82 44.94 41662605/41662605 reparentChildPages [13] [14] 20.5 1.82 44.94 41662605 reparentPage [14] 0.78 41.96 41662605/41662605 sqlite3pager_lookup [15] 2.21 0.00 41672966/131189801 sqlite3pager_unref [31] 0.00 0.00 93099/50076560 sqlite3pager_ref [75] ----------------------------------------------- 0.78 41.96 41662605/41662605 reparentPage [14] [15] 18.8 0.78 41.96 41662605 sqlite3pager_lookup [15] 41.19 0.00 41662605/119658386 pager_lookup [5] 0.77 0.00 39036990/95976540 _page_ref [40] ----------------------------------------------- 0.77 0.00 39036990/95976540 sqlite3pager_lookup [15] 1.12 0.00 56939550/95976540 sqlite3pager_get [8] [40] 0.8 1.89 0.00 95976540 _page_ref [40] ---- _2006-May-04 21:41:37 by anonymous:_ {linebreak} I guess increasing this array size is in order: PgHdr *aHash[N_PG_HASH]; /* Hash table to map page number to PgHdr */ Too many hash collisions leading to growing linked lists in buckets. Or perhaps pager_hash has to be replaced with a better hash function. ---- _2006-May-04 22:04:47 by anonymous:_ {linebreak} Increasing the size of N_PG_HASH to 8192 seems to help the "4 million insert in a transaction into a memory database" benchmark. It now runs in 203.5 seconds (19656 inserts/sec), as opposed to 364.7 seconds (10967 inserts/sec) previously. This is closer to the 187.5 seconds for the file-based database timing. ---- _2006-May-04 22:13:16 by anonymous:_ {linebreak} Increasing N_PG_HASH to 16384 yields 21,052 inserts/second for a 4 million insert single-transaction :memory: database no-index run. This is very close to the file database figure of 21,333 inserts/second. ---- _2006-May-04 22:23:19 by anonymous:_ {linebreak} Setting N_PG_HASH to 32768 yields 21,621 inserts/second in the 4M insert s in a single-transaction in a memory db test. This is marginally faster than the file based database timing. Increasing N_PG_HASH has diminishing returns after 16384. ---- _2006-May-05 15:33:31 by anonymous:_ {linebreak} You should get the same effect if you increase the page size instead of increasing the size of the hash table. With a larger page size there will be fewer pages to be managed by the hash table. This might be a better solution for many applications. A hash table with 32K entries occupies 128K of RAM, whether it is used or not. ---- _2006-May-05 19:37:51 by anonymous:_ {linebreak} 128K of RAM when dealing with a 230M :memory: database is not terribly significant. Here's the timings for various N_PG_HASH and SQLITE_DEFAULT_PAGE_SIZE values for 4 million inserts into a :memory: database in a single transaction: N_PG_HASH SQLITE_DEFAULT_PAGE_SIZE inserts/sec --------- ------------------------ ----------- 16384 4096 21,622 32768 1024 21,621 8192 8192 20,513 4096 4096 20,101 4096 8192 19,417 2048 4096 16,878 2048 8192 16,598 2048 16384 15,038 2048 32768 13,937 2048 1024 10,782 So it seems the default values of N_PG_HASH and SQLITE_DEFAULT_PAGE_SIZE should be raised. ---- _2006-May-05 21:34:01 by anonymous:_ {linebreak} My point was that most users do not have 230 MB memory databases, so having a large hash table which is fixed at that size may be a burden. 128K for the hash table is a lot if you only have 128K in your memmory database. I agree that increasing these values would seem to provide a substantial performance increase at little cost. I would suggest using the 4K hash table and the 4K page size. These values are close to the current values. Many users have reported a general speed improvement using a page size of 4K which matches the value used by WinXP (and think many other Os's as well) for disk I/O blocks. These values nearly double the insert rate over the current default values. The fixed size hash table only takes twice the space. ---- _2006-May-06 14:54:32 by anonymous:_ {linebreak} Memory page speed should be as fast as possible as it effects the general performance of SQLite. Perhaps a static hash table is not the best data structure here. Don't temp tables and intermediate select results on file-based tables use memory-based pages? Making memory page speed as fast as possible will improve overall SQLite performance whether you are using a file or memory based database. For example, when ordering result sets from a file-based database select this routine is used to generate the code: static void pushOntoSorter( Parse *pParse, /* Parser context */ ExprList *pOrderBy, /* The ORDER BY clause */ Select *pSelect /* The whole SELECT statement */ ){ Vdbe *v = pParse->pVdbe; sqlite3ExprCodeExprList(pParse, pOrderBy); sqlite3VdbeAddOp(v, OP_Sequence, pOrderBy->iECursor, 0); sqlite3VdbeAddOp(v, OP_Pull, pOrderBy->nExpr + 1, 0); sqlite3VdbeAddOp(v, OP_MakeRecord, pOrderBy->nExpr + 2, 0); sqlite3VdbeAddOp(v, OP_IdxInsert, pOrderBy->iECursor, 0); For those of us who have very complicated nested sub-selects of file-based tables in many queries or even ORDER BYs on huge result sets, speeding up the memory page performance should be a performance win for SQLite in general. ---- _2006-May-06 17:37:32 by anonymous:_ {linebreak} The following test demonstrates that this memory page issue can greatly effect the performance of queries against file-based tables if temp_store is set to MEMORY. "big" is a file-based table in foo.db with 10 million rows. It was created with "create table big(x,y)". # unmodified stock SQLite built from May 5 2006 CVS (after check-in [3178]) # compiled with default settings for SQLITE_DEFAULT_PAGE_SIZE and N_PG_HASH $ time ./may5-sqlite/sqlite3 foo.db "PRAGMA temp_store = MEMORY; select x, y from big order by y, x" >/dev/null real 13m23.828s user 13m18.452s sys 0m0.811s # SQLite built from May 5 2006 CVS, but compiled with proposed change of # SQLITE_DEFAULT_PAGE_SIZE set to 4096, and N_PG_HASH set to 16384 $ time ./may5-sqlite-hash-opt/sqlite3 foo.db "PRAGMA temp_store = MEMORY; select x, y from big order by y, x" >/dev/null real 6m16.031s user 6m13.108s sys 0m0.811s This is not even what I would consider to be a big table. I should mention that compiling with SQLITE_DEFAULT_PAGE_SIZE = 1024, and N_PG_HASH = 32768 resulted in same timing as the may5-sqlite-hash-opt test run above. A pretty good return for an extra 126K. ---- _2006-May-08 04:07:25 by anonymous:_ {linebreak} You now get 20,725 inserts/second as of the latest check-in [3180] for 4 million inserts into a :memory: database in a single transaction (using the default SQLITE_DEFAULT_PAGE_SIZE of 1K). This is nearly twice as fast as SQLite prior to the check-in [3180] (10,782 inserts/second). However, it is 4% slower than the best timing prior to [3180] when compiled with N_PG_HASH=32768 and SQLITE_DEFAULT_PAGE_SIZE=1024 which got 21,622 inserts/second (see table above). Increasing the size of SQLITE_DEFAULT_PAGE_SIZE with the latest CVS either has no effect or makes the memory insert benchmark timings slightly worse.
#e8e8bd 1798 doc active 2006 May anonymous 2006 May 5 4 preprocessed is misspelled on download page On the download page you'll find the text "proprocessed".
#f2dcdc 1791 code active 2006 May anonymous Unknown 2006 May 1 1 Native threads support for BeOS BeOS ports lacks native thread support. BeOS has very powerful but lightweight threading system, being throughout multithreaded, but it differs from posix-thread ideology, thus our pthreads implementation atm looks more like flacky workaround. Ideally will be to have separate implementation for thread-support, like for Win16/32 versions. At the moment this problem caused bustage of BeOS Mozilla port, https://bugzilla.mozilla.org/show_bug.cgi?id=330340 nearest workaround might be pthreads usage, inspite its flackyness, but it also causes mess for Mozilla build/configure system, because for other parts in Mozilla we use nspr-threads, which, for BeOS, use native version _2006-Oct-27 05:48:51 by anonymous:_ {linebreak} BeOS locking extensions (using native bthreads) have been written and are included in the SQLite3 built into Mozilla Firefox. Is there some process wherein these changes might be incorporated into the SQLite tree? ---- _2006-Oct-27 12:48:11 by anonymous:_ {linebreak} Follow the example of OS/2 and propose a patch against the latest SQLite CVS that has proper #ifdef's around BeOS code so it won't break other platforms. Since you're probably the only one interested in this patch, you'll have to do the diffing/merging/testing work yourself. ---- _2006-Nov-07 03:55:36 by anonymous:_ {linebreak} Thanks for the advice. We've completed updates to code so it works with the sqlite 3.3.8 patches proposed for Firefox. Current implementation has a parallel os-specific file (os_beos.c). However, with the latest round of locking enhancements to os_unix.c, we're now wondering if it makes more sense to simply enhance this file to support BeOS locking. (yes, we. surprisingly, there is more than one BeOS user left on the planet.) :)
#e8e8bd 1789 doc active 2006 May anonymous Unknown 2006 May drh 4 4 sqlite3_result_error() not adequately documented Documentation for =sqlite3_result_error()= and friends says "operation of these routines is very similar to the operation of sqlite3_bind_blob() and its cousins", but none of the bind routines are really that similar. So it's not obvious from existing documentation whether the =int= argument is a string length or, say, a =SQLITE_= error code, or a static/transient flag. A glance at /sqlite/src/func.c _suggests_ that it's a static/transient flag, but it's not entirely clear (and if it is, why isn't the signature similar to =sqlite3_result_text()=?) =sqlite3.h= and =capi3.html= should probably have a little more discussion about returning error situations from user-defined functions. c.
#f2dcdc 1743 code active 2006 Mar anonymous Parser 2006 Mar 3 3 A very very deep IN statement failure Ok the problem is simple. I need to create a VERY VERY large IN statement. The problem is SQLite seems to have a limit on either query length or depth of an IN statement. Here is my example See attached 1 That would be a 2 levels deep In statement. I can only get up to 9 with SQLite but I need to get to 20. Since it works for 9 I can only assume that my 10 is correct even though the error is a syntax error. Below is the code that creates the select statement. See attached 4 Attachment 2 and 3 show a 9 and 10 level respectively. Thanks for your help _2006-Mar-30 21:30:51 by anonymous:_ Select "Wow !!" from "Wow !!" :-) Maybe VIEWs could help ?? ---- _2006-Mar-30 21:37:50 by anonymous:_ {linebreak} This may be a work around for your problem. From looking at your sample SQL: SELECT * FROM xs where classname like '%Bonus_Pay_Weight_Entry%' or classname in ( select parentname from xs where classname in ( select parentname from xs where classname in ( select parentname from xs where classname like '%Bonus_Pay_Weight_Entry%' ) ) ) or classname in ( select parentname from xs where classname in ( select parentname from xs where classname like '%Bonus_Pay_Weight_Entry%' ) ) or classname in ( select parentname from xs where classname like '%Bonus_Pay_Weight_Entry%' ) ; It seems you are trying to find all the parent classes of all the classes with this magic string in their name. If so, I think there is another way to do this. Instead of using a C program to build a huge SQL statement and then collect the results, use a different C program to execute a series of small SQL commands that generate the same result set. The following series of SQL statements should generate the same set of results. create temp table xt as select classname from xs where classname like '%Bonus_Pay_Weight_Entry%'; insert into xt select parentname from xs where classname in xt and parentname not in xt; select changes(); insert into xt select parentname from xs where classname in xt and parentname not in xt; select changes(); insert into xt select parentname from xs where classname in xt and parentname not in xt; select changes(); ... repeat until changes returns zero select * from xs where classname in xt; drop table xt; This can be execute by code that looks something like the following pseudo-C code. string sql; sql = "create temp table xt as select classname from xs where classname like '%Bonus_Pay_Weight_Entry%'"; sqlite3_exec(db, sql); sql = "insert into xt select parentname from xs where classname in xt and parentname not in xt"; sqlite3_stmt* extend = sqlite3_prepare(db, sql); sql = "select changes()" sqlite3_stmt* check = sqlite3_prepare(db, sql); int changes = 0; do { sqlite3_step(extend); sqlite3_reset(extend); sqlite3_step(check); changes = sqlite3_column_int(check, 0); sqlite3_reset(check); } while (changes > 0); sqlite3_finalize(extend); sqlite3_finalize(check); sql = "select * from xs where classname in xt"; sqlite3_stmt* get = sqlite3_prepare(db, sql); int rc; do { rc = sqlite3_step(get); if (rc == SQLITE_DONE) break; // process a result row } while (1); sqlite3_finalize(get); sql = "drop table xt"; sqlite3_exec(db, sql); ---- _2006-Apr-05 17:25:30 by anonymous:_ {linebreak} Where did you find the select changes(); function? I would like to find all the functions that SQLite has and their uses. (and no I dont want the C API. I found that) ---- _2006-Apr-05 18:53:08 by anonymous:_ {linebreak} There is no complete listing of the functions in the documentation that I am aware of. Most are documented on this page http://www.sqlite.org/lang_expr.html but some are missing. The ultimate list of the predefined functions is the source file func.c which implements all the functions. You can view it here http://www.sqlite.org/cvstrac/rlog?f=sqlite/src/func.c
#f2dcdc 1742 code active 2006 Mar anonymous Unknown 2006 Mar drh 2 3 ORDER BY on more than one column causes a big slowdown Put simply, any query which contains an ORDER BY clause that sorts on more than one column incurs a strange slowdown. Running SQLite 3.3.4 on WindowsXPSP2 and on OS X 1.4.5, the behavior is similar; if the ORDER BY clause contains one column, the query is very fast; on two or more columns, it is terribly slow. _2006-Mar-28 23:58:15 by anonymous:_ {linebreak} Also worth noting that this behavior seems to start with SQLite 3.3.x; earlier versions of SQLite handle multiple ORDER BY columns much faster. ---- _2006-Mar-29 01:22:11 by anonymous:_ {linebreak} Note also that this behavior is being exhibited when sorting on *indexed* columns ---- _2006-Mar-29 01:50:38 by drh:_ {linebreak} Some examples would be helpful. ---- _2006-Mar-29 18:11:43 by anonymous:_ {linebreak} Most definitely! I will attach a sample 3.3.4 database dump, that displays this behavior.
#e8e8bd 1741 warn active 2006 Mar anonymous VDBE 2006 Mar 5 4 unused variable with SQLITE_OMIT_UTF16 defined vdbemem.c, function sqlite3VdbeChangeEncoding(): int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){
int rc;
if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){
return SQLITE_OK;
}
#ifdef SQLITE_OMIT_UTF16
return SQLITE_ERROR;
#else
...
If SQLITE_OMIT_UTF16 is defined then the "rc" variable is unused and compiler (Windows Intel 7.0) emits useless warning.
#f2dcdc 1735 code active 2006 Mar anonymous Unknown 2006 Mar 1 3 Encoding problem I use latin2 (iso-8859-2) encoding in my system. When operating on sqlite 3 I can insert data that contains national characters into a database (for example using sqlite3 console) and then when I select them back, I am given the proper result. But when I use sqlite driver from Qt4, which uses sqlite3_column_text16() to fetch data from the database, I don't get the expected result (meaning the conversion to UTF-16 probably messed things up). Now the problem can be in one of two places -- either sqlite3 console application doesn't use a proper conversion to convert from my locale encoding into its internal encoding or the database internal mechanisms mess some things up. In short: sqlite3(somelatin2string) ==> SQLITE DMBS ==> sqlite3_column_text16() ==> garbage != somelatin2string At first I thought this was Qt problem as data stored through sqlite console and retrieved from it was correct and data stored by Qt and retrieved by Qt was also correct whereas data stored by Qt and retrieved by sqlite3 console or stored by the console and retrieved by Qt was not correct. I contacted Qt support guys @ trolltech and talked about it and it looks like Qt side if fine -- it expects a UTF-16 encoded data (because it uses the function mentioned earlier) and it converts from UTF-16 to whatever encoding it needs (and vice versa). So the error is probably somewhere in the line between the console and the database itself or in the database internally. It could be that sqlite3 expects UTF-8 (or UTF-16) encoded data on input but is given ISO-8859-2 data (entered manually by me at the console). _2006-Mar-27 16:36:26 by anonymous:_ {linebreak} The console app doesn't convert from your local code page to UTF-8 (or UTF-16). ---- _2006-Mar-27 22:45:21 by anonymous:_ {linebreak} It probably should, in the documentation of sqlite a suggested method of converting databases between versions 2 and 3 is: sqlite OLD.DB .dump | sqlite3 NEW.DB Now =sqlite= outputs the data in "local" format and if =sqlite3= doesn't encode it properly, such a conversion will be invalid because the incoming data won't be utf encoded. A solution could be to do: sqlite OLD.DB .dump | iconv -f -t UTF-8 | sqlite3 NEW.DB But it is the console which should be responsible for the conversion. Also because otherwise using =sqlite3= console on a non-utf system with a perfectly well UTF-8 encoded database will result in improper output too.
#f2dcdc 1733 code active 2006 Mar anonymous VDBE 2006 Mar drh 4 3 Unaligned Access on ia64: aggregate_context ptr isn't 16-bytes aligned There is a problem on ia64 with pointer returned by sqlite3_aggregate_context function. If the size requested is less than NBFS bytes, then the pointer returned is 8 bytes aligned while every pointer returned by allocator function must be 16-bytes aligned (the specification requires that the pointer is aligned so that every basic typed can be stored there and long double is 16 bytes on Itanium). So if a user allocates, say, 24 bytes for his context, and the first member in his context happens to be a long double, he will get unaligned access exception. This will lead to performance hit on Linux and to crash on HP-UX, since no default SIGBUS handler is present on HP-UX (IIRC). _2006-Mar-27 10:37:37 by anonymous:_ {linebreak} Additional details can be found in this mailing list thread: http://thread.gmane.org/gmane.comp.db.sqlite.general/18144
#e8e8bd 1732 doc active 2006 Mar anonymous 2006 Mar 5 5 Info on Home page According to "Check-in [3144] : Increase test coverage to above 98%. (By drh)" the info on home page should be changed from 95% to 98%
#e8e8bd 1731 doc active 2006 Mar anonymous 2006 Mar 5 5 Typo in select4.test # Make sure the names of columns are takenf rom the right-most subquery should be # Make sure the names of columns are taken from the right-most subquery
#f2dcdc 1700 code active 2006 Mar anonymous Parser 2006 Mar 2 2 Handling column names for aliased queries is broken The following query does not work, SELECT DISTINCT * FROM (SELECT t1.ID FROM GR_ADDRESS t1 WHERE t1.ID > 1 UNION ALL SELECT t1.ID FROM PERSON t1) t1 ORDER BY t1.ID DESC but this one does, SELECT DISTINCT * FROM (SELECT t1.ID FROM GR_ADDRESS t1 WHERE t1.ID > 1 UNION ALL SELECT t1.ID FROM PERSON t1 ORDER BY t1.ID DESC) Dennis Cote responded with: I think you have found another example of the problems SQLite has handling columns names. The following log first shows what SQLite thinks the column name is for the query without the order by clause (i.e. t1.ID). Then we try to order by that column name, with or without the table alias. Both cases result in an error. Finally there is a work around that you could use that applies an alias to the selected columns in the two tables that are combined by the union operation. SQLite version 3.3.2 Enter ".help" for instructions sqlite> create table GR_ADDRESS(id, data); sqlite> create table PERSON(id, data); sqlite> .mode column sqlite> .header on sqlite> insert into gr_address values(1, 10); sqlite> insert into person values(2, 20); sqlite> insert into gr_address values(3, 30); sqlite> SELECT DISTINCT * ...> FROM ...> (SELECT t1.ID ...> FROM GR_ADDRESS t1 ...> WHERE t1.ID > 1 ...> UNION ALL ...> SELECT t1.ID ...> FROM PERSON t1) ...> t1; t1.ID ---------- 3 2 sqlite> SELECT DISTINCT * ...> FROM ...> (SELECT t1.ID ...> FROM GR_ADDRESS t1 ...> WHERE t1.ID > 1 ...> UNION ALL ...> SELECT t1.ID ...> FROM PERSON t1) ...> t1 ORDER BY t1.ID DESC; SQL error: no such column: t1.ID sqlite> SELECT DISTINCT * ...> FROM ...> (SELECT t1.ID ...> FROM GR_ADDRESS t1 ...> WHERE t1.ID > 1 ...> UNION ALL ...> SELECT t1.ID ...> FROM PERSON t1) ...> t1 ORDER BY ID DESC; SQL error: no such column: ID sqlite> SELECT DISTINCT * ...> FROM ...> (SELECT t1.ID as ID ...> FROM GR_ADDRESS t1 ...> WHERE t1.ID > 1 ...> UNION ALL ...> SELECT t1.ID as ID ...> FROM PERSON t1) ...> t1 ORDER BY t1.ID DESC; ID ---------- 3 2 You may also be interested in the discussion of a similar problem under ticket #1688.
#e8e8bd 1720 new active 2006 Mar anonymous 2006 Mar 3 4 Actions made by a trigger are not triggered Hellow, CREATE TRIGGER trg_del_event BEFORE DELETE ON event FOR EACH ROW BEGIN SELECT RAISE(ROLLBACK, 'Cannot delete : event is referenced in table active_stuff') WHERE (SELECT id FROM active_stuff WHERE event_id = OLD.id) IS NOT NULL; DELETE from event WHERE parent_event_id = OLD.id; END; This trigger verify if the current event is referenced in active_stuff table and if not, delete the row and the childs of the event. The problem is that the delete made by this trigger is not triggered itself : There's no verification on event's child and no global rollback. I don't know exactly if it's a feature or an incident. If it's a feature, you can simply close this ticket. Thanks :) _2006-Mar-19 17:38:15 by anonymous:_ {linebreak} I believe it was mentioned on the mailing list that SQLite triggers are not recursive. However, I don't see any mention of this in the documentation: http://www.sqlite.org/lang_createtrigger.html ---- _2006-Mar-19 17:49:57 by anonymous:_ {linebreak} Yes, it's right, I've just found this in ML : Re: [sqlite] Are DELETE TRIGGERS recursive or not? drh Tue, 25 Oct 2005 06:38:42 -0700 Ralf Junker <[EMAIL PROTECTED]> wrote: > I wonder if a DELETE TRIGGER should trigger itself recursively Not at this time. Though work is underway to change this. We need recusive delete triggers in order to implement cascadinig deletes for referential integrity. -- D. Richard Hipp <[EMAIL PROTECTED]> So it's not a bug :) It's : - a documentation problem (missing "triggers are currently no recursive") - a feature request :) ---- _2006-Mar-19 19:42:58 by anonymous:_ {linebreak} Finally I've found the limitation here : http://www.sqlite.org/omitted.html
#f2dcdc 1719 code active 2006 Mar anonymous 2006 Mar 4 4 Solaris 8(SQL: bus error while creating temporary file The problem occours since we using gcc 4.0.2 (with gcc 3.4.3 there was no problem). This problem has the same root as Ticket #1584, with the same solution (but only needed for SQLite <= 2.8.17): --- src/os.c.orig 2006-03-17 14:02:53.759531000 +0100 +++ src/os.c 2006-03-17 14:03:18.529535000 +0100 @@ -1652,7 +1652,9 @@ #if OS_UNIX && !defined(SQLITE_TEST) { int pid; - time((time_t*)zBuf); + time_t t; + time(&t); + memcpy(zBuf, &t, sizeof(t)); pid = getpid(); memcpy(&zBuf[sizeof(time_t)], &pid, sizeof(pid)); }
#e8e8bd 1718 new active 2006 Mar anonymous 2006 Mar 1 1 allow *.sqlite as parameter to sqlite3_analyzer it would be nice if it was possible to to: sqlite3_analyzer.exe *.sqlite currently you cant and you have to do: sqlite3_analyzer.exe 1.sqlite sqlite3_analyzer.exe 2.sqlite etc..
#f2dcdc 1714 code active 2006 Mar anonymous CodeGen 2006 Mar 4 4 Slow query when tables in 2 different files This slow query seems to be sped up by using an explicit CROSS JOIN. ANALYZE apparently does not help. The tables span 2 different database files. Taken from the SQLite mailing list: -- table1.schema (file 1) ATTACH DATABASE './table1.db' AS t1 ; CREATE TABLE t1.table1 ( i_id INT4, b_id INT4, d_id INT4, c_id INT2, data_in REAL, data_out REAL ); CREATE INDEX t1.ix_table1_b_id ON table1( b_id ); DETACH DATABASE t1 ; -- table2.schema (file 2) ATTACH DATABASE './table2.db' AS t2 ; CREATE TABLE t2.table2 ( d_id INT4 PRIMARY KEY, r_id INT2, m_id INT2, i TEXT, ct TEXT, cc TEXT, type TEXT, notes TEXT ); DETACH DATABASE t2 ; -- the slow query (does not use indexes on both tables?) select t1.b_id, t1.c_id, t2.r_id, t2.m_id, sum( t1.data_in ) as data_in, sum( t1.data_out ) as data_out from table1 t1 join table2 t2 on t2.d_id = t1.d_id and t1.b_id >= 100 and t1.b_id < 200 group by t1.b_id, t1.c_id, t2.m_id, t2.r_id; -- the fast query (seems to use both tables' indices) select t1.b_id, t1.c_id, t2.r_id, t2.m_id, sum( t1.data_in ) as data_in, sum( t1.data_out ) as data_out from table1 t1 cross join table2 t2 where t2.d_id = t1.d_id and t1.b_id >= 100 and t1.b_id < 200 group by t1.b_id, t1.c_id, t2.m_id, t2.r_id; More information can be found here: http://www.mail-archive.com/sqlite-users%40sqlite.org/msg13648.html
#e8e8bd 1689 new active 2006 Feb anonymous 2006 Mar 2 4 triggers and temporary tables CREATE TRIGGER trg_upd_dict AFTER UPDATE ON dict BEGIN UPDATE dict SET code = (SELECT code from tmp_connected_user) WHERE old.dict_id = dict_id ; END ;
This trigger doesn't work if tmp_connected_user is a temporary table. The message is : SQL error: no such table: main.tmp_connected_user The goal is to have persistant triggers who works with temporary tables. Exemple of use : - Workarround who replace the non existing connection by user / password. When we insert/update, the database doesn't know who insert/update. If we have a table user, we can on each table fill by trigger fields like last_user_id, last_modif_d. The trigger cannot know who make the connection but we stock the user_id when he connects to the db in a temporary table, the trigger will work. - Security (no one can update / insert the database if a special temporary table is not created and filled). ---- _2006-Mar-03 20:25:41 by drh:_ {linebreak} You can create a TEMP trigger that will reference tables in the main database and/or attached databases. But SQLite currently does not allow triggers in the main or attached database to reference tables in other databases. I will enter this as an enhancement request. ---- _2006-Mar-06 16:17:11 by anonymous:_ {linebreak} Workarround for this ticket : if we only need 1 result, we can use user defined function instead temporary table in the trigger. Tested with php : it works :)
#e8e8bd 1655 new active 2006 Feb anonymous VDBE 2006 Mar 4 4 Every function can have their private data like agreagates Is it possible to modify the way functions are handled in sqlite ? My idea is to allow functions to have their own private data space to save data from row to row like the agregates have, with that we can have functions that remember last row values, create counters and totalizers that return their updated values for each row. Ex: select increment(1),* from my_table; 1|car|rose|3 2|sea|bike|7 3|flower|water|33 select sum_and_return_row_by_row(row_value_to_sum),* from my_table; 3|car|rose|3 10|sea|bike|7 43|flower|water|33 select current_row_value + last_row_value(current_row_value),* from my_table; 3|car|rose|3 10|sea|bike|7 40|flower|water|33
The structure for that is already there, in fact is the same used by agregates, I was scratching the code but I could not find easily where to introduce code to push the context and pop it for functions that aren't agregates, someone know how to do that ? _2006-Feb-04 20:45:32 by anonymous:_ {linebreak} /* ** Implementation of the increment() function */ static void incrementFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_INTEGER: { i64 iVal = sqlite3_value_int64(argv[0]); i64 *pi = (i64*)&context->s.zShort; *pi = *pi + iVal; sqlite3_result_int64(context, *pi); break; } } }
I've tried that but context are not saved row to row, of course here I was using a hidden member (s.zShort) of the context structure that seems not to be used or damaged, ideally should have a function like "void *sqlite3_set_presistent_data(sqlite3_context *context, void* value, int size_to_be_allocated)" or something like it that will allocate memory and return the actual value stored before if any.
#e8e8bd 1656 doc active 2006 Feb anonymous Parser 2006 Mar 5 4 lemon versions and changelog Lemon lacks whatsnew.txt files and versioning to be used in other products than SQLite. Seems like to be useful utility. It will be nice to see some performance tests/comparison with other parsers.
#e8e8bd 1693 new active 2006 Feb anonymous Parser 2006 Mar 4 4 Parser sensitive to position of word AUTOINCREMENT Command: create table unique_ids ( 'id' INTEGER AUTOINCREMENT PRIMARY KEY) fails, while the create table unique_ids ( 'id' INTEGER PRIMARY KEY AUTOINCREMENT) suceeds. The only difference is position of the word AUTOINCREMENT. I originally expected the AUTOINCREMENT to be standalone feature, not tied with PRIMARY KEY. Perhaps the parser may be more forgiving here.
#e8e8bd 1704 new active 2006 Mar anonymous 2006 Mar 4 3 extern "C" block in sqliteInt.h can we put #ifdef __cplusplus extern "C" { #endif ... #ifdef __cplusplus } /* End of the 'extern "C"' block */ #endif around the declarations in sqliteInt.h. It would help as we need to access SQLite internals from our C++ code.
#e8e8bd 1703 new active 2006 Mar anonymous Pager 2006 Mar 2 3 Second parameter to gettimeofday() in os_unix.c should be NULL in os_unix.c, function sqlite3UnixCurrentTime(): the second argument to gettimeofday() should be NULL and the declaration of sTz should be removed. struct timezone seems to cause trouble on Linux systems.
#e8e8bd 1702 new active 2006 Mar anonymous 2006 Mar 5 4 feature request: API to write in-memory DB to file. Hello! A feature suggestion regarding in-mem databases: It would be interesting to be able to save this to files as normal sqlite3 dbs. i'm assuming that this is internally a rather simple operation, but i didn't find a function for doing it. Take care, ----- stephan _2006-Jun-28 05:04:05 by anonymous:_ {linebreak} There have been already good solutions. Watch this wiki page. http://www.sqlite.org/cvstrac/wiki?p=InMemoryDatabase
#e8e8bd 1701 doc active 2006 Mar anonymous 2006 Mar drh 3 1 3.3 build option not documented? 3.3 db can not be read in easlier 3.X versions. Ok, then it says there is a 'rare' compile option to force them to be. I can not use 3.3 yet from php and perl, so I have the rare condidtion that I prefer to have all my tools in sync more. I have searched the incompatibilies page, the ./configure -h, grepped the source and read the options page... so far none seem to have lead me to this magic compile option. Maybe it should be documented someplace? _2006-Mar-03 15:49:35 by anonymous:_ {linebreak} The compilation option you are looking for is SQLITE_DEFAULT_FILE_FORMAT. I found it by looking back through the time line before version 3.3.0. You are correct it should be added to the options displayed on http://www.sqlite.org/compile.html which is reached from the Compilation Options link on the documentation page.
#e8e8bd 1698 build active 2006 Mar anonymous 2006 Mar 1 1 sqlite_4y6ngs9FlYvAMGO 0kb 3/1/2006 1:16 PM sqlite_4y6ngs9FlYvAMGO 0kb 3/1/2006 1:16 PM I like to stop this file,don't know where is coming from _2006-Mar-02 03:50:33 by anonymous:_ {linebreak} You might want to check the free tools on http://www.sysinternals.com to monitor file events and process state. ProcessExplorer will tell you what files (and much more) are open per process and you can even find the process that has some file open. FileMon will monitor all file accesses, so you can check which program is creating those files, if it doesn't let them open for enough time to use ProcessExplorer. Hope this helps.
#f2dcdc 1878 code active 2006 Jun anonymous CodeGen 2006 Jun 2 3 No index used when specifying alias name in ORDER BY clause Using an alias name in the ORDER BY clause prevents indices from being used in the query for sorting purposes: For this schema: CREATE TABLE t1 (c1, c2); CREATE TABLE t2 (c3, c4); CREATE INDEX t1_idx ON t1(c2); the following select query: EXPLAIN QUERY PLAN SELECT t1.c2 AS col2, t2.c4 AS col4 FROM t1 LEFT JOIN t2 ON t1.c1=t2.c3 ORDER BY t1.c2; will indeed use index t1_idx: sqlite> EXPLAIN QUERY PLAN SELECT t1.c2 AS col2, t2.c4 AS col4 FROM t1 LEFT JOIN t2 ON t1.c1=t2.c3 ORDER BY t1.c2; 0|0|TABLE t1 WITH INDEX t1_idx 1|1|TABLE t2 However, when using the alias name =col2= in the =ORDER BY= clause, the index won't be used: sqlite> EXPLAIN QUERY PLAN SELECT t1.c2 AS col2, t2.c4 AS col4 FROM t1 LEFT JOIN t2 ON t1.c1=t2.c3 ORDER BY col2; 0|0|TABLE t1 1|1|TABLE t2 IMHO, the same index should be used in both queries? _2006-Jun-30 13:54:10 by anonymous:_ {linebreak} Not sure whether it's a different issue, but when using a second column in the ORDER BY clause, also no index will be used: sqlite> EXPLAIN QUERY PLAN SELECT t1.c2 AS col2, t2.c4 AS col4 FROM t1 LEFT JOIN t2 ON t1.c1=t2.c3 ORDER BY t1.c2, t2.c4; 0|0|TABLE t1 1|1|TABLE t2 Personally, I'd expect sqlite to use the =t1_idx= index as well to fulfill the primary ordering? ---- _2006-Jun-30 16:04:31 by anonymous:_ {linebreak} As a workaround try "ORDER BY 1" ---- _2006-Jul-03 08:41:01 by anonymous:_ {linebreak} Sorry, I'm not sure how "ORDER BY 1" would be a workaround, when I really need the results to be sorted by table column data... (I don't want to start a discussion in the bug tracker, so you're welcome to take any suggestions/answers to the sqlite-user mailing list, which I also monitor.) ---- _2006-Jul-03 15:51:11 by anonymous:_ {linebreak} I'm not the poster of previous comment, but ORDER BY (n) order by result column index. In your case, using ORDER BY 1, it will be ordered by the first column. ---- _2006-Jul-04 07:33:30 by anonymous:_ {linebreak} Thanks for the clarification. This would be a workaround for the first problem mentioned, but when sorting by two columns, still no index will be used, even if using =ORDER BY 1,2= ---- _2006-Jul-04 21:34:24 by anonymous:_ {linebreak} SQLite really needs a way to explicitly state which index(es) to use. Perhaps something similar to Oracle's comment hints.
#e8e8bd 1817 new active 2006 May anonymous 2006 Jun 1 2 Patch to enable SQLite again on OS/2 As we urgently need OS/2 support to be able to build Mozilla applications (Firefox, SeaMonkey, Thunderbird) we have to activate it again in SQLite CVS. Daniel Lee Kruse ported the two C files with some input from Andy Willis and me (Peter Weilbacher). I hope this is the right way to go about this. The OS/2 changes from this and from follow-up ticket #1836 were checked in some time ago, so I am marking this fixed.
#e8e8bd 1874 new active 2006 Jun anonymous CodeGen 2006 Jun 2 3 IN is much slower than making separate queries I have a 500,000-row table with the following schema: CREATE TABLE foo(bar INTEGER NOT NULL, baz INTEGER NOT NULL, biz INTEGER NULL, buzz INTEGER NULL); CREATE INDEX biz ON foo (bar, baz, biz); CREATE INDEX buzz ON foo (bar, baz, buzz); I'm performing the query: SELECT * FROM foo WHERE bar IN (0,1) AND baz IN (0,1) AND (biz IN (0,1) OR buzz IN (0,1)); On both Apple's 3.1.3 and a stock 3.3.6 on Mac OS X 10.4.6 PowerPC, this query consistently takes 3 seconds to execute. However, if I unroll the query: SELECT * FROM foo WHERE bar=0 AND baz=0 AND biz=0; SELECT * FROM foo WHERE bar=0 AND baz=0 AND buzz=0; ...and so on for the other values of bar and baz... it takes 0.2 seconds. I was able to reproduce this with the attached scripts by doing: ./mkdb > mkdb.sql sqlite3 testdb < mkdb.sql time sqlite3 testdb < q1 > /dev/null time sqlite3 testdb < q2 > /dev/null The database was recreated between the 3.1.3 and the 3.3.6 testing. EXPLAIN on the IN query segfaults on both 3.1.3 and 3.3.6, otherwise I'd attach that output. I'll write up a separate bug for that :) _2006-Jun-27 20:52:35 by anonymous:_ {linebreak} [3315] fixed the EXPLAIN crash, so I've attached EXPLAIN output for the IN query. ---- _2006-Jun-27 22:01:31 by drh:_ {linebreak} On SuSE Linux 10.0 running on a Dell Latitude D600 laptop and using the latest code from CVS, I'm getting times of 1.2s and 0.45s. If I create an additional index: CREATE INDEX i2 ON foo(bar,baz,biz,buzz); then the time for the first query drops to 0.7s. Note, however, the q1 and q2 are very different queries. In particular q2 omits half the rows. The (rough) equivalent of q2 is this: SELECT * FROM foo WHERE bar IN (0,1) AND baz IN (0,1) AND (biz=0 or buzz=0); If I modify q2 so that it includes the biz=1 and buzz=1 cases, its query time increases to 0.7s, the same as q1 with the added index. Further note that q1 and q2 are still not exactly the same. Q2 includes multiple copies of rows where biz IN (0,1) AND buzz IN (0,1) where q1 only includes such lines once. There are only 120 such lines in the database, but it still a difference. I will recast this ticket as a request for performance enhancements on queries using the IN operator on a fixed list of values. ---- _2006-Jun-28 01:52:32 by drh:_ {linebreak} Note to self: The expression +x IN (1,2,3,...) appears to be faster than +x=1 OR +x=2 OR ... when there are 6 or more terms. With 5 terms or fewer, a string of ORs is faster.
#f2dcdc 1872 code active 2006 Jun anonymous 2006 Jun 4 3 sqlite3_open doesn't support RFC1738 format for filename sqlite3_open only supports UTF-8 encoding as a format for its filename argument (http://www.sqlite.org/capi3ref.html#sqlite3_open). If your application receives a RFC1738 encoded URL for filename, that has to be UTF-8-encoded for use in SQLite. It would be nice if that could be instead passed directly to sqlite3_open. Is RFC1738 URL decoding support planned for SQLite? (RFC1738 link: http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc1738.html)
#e8e8bd 1871 event active 2006 Jun anonymous 2006 Jun 2 3 VACUUM should not change 3.x file format VACUUM should not "upgrade" the file format as it violates the principle of least astonishment. VACUUM upgrading the file format prevents users working with older versions of SQLite 3.x from sharing a common database file with users of more recent versions of the library. At the very least, if a version of SQLite can not produce the same version of the database file after VACUUM, it should do nothing, or perhaps return a warning. _2006-Jun-27 13:25:30 by drh:_ {linebreak} What if a user wants to upgrade the file format so that they can take advantage of descending indices, for example? How should they accomplish that? Should they be forced to dump and restore the database? ---- _2006-Jun-27 14:10:16 by anonymous:_ {linebreak} Manually dumping the old database and restoring it with a more recent version of SQLite is reasonable given the incompatible nature of the change. ---- _2006-Jun-27 15:30:32 by anonymous:_ {linebreak} I agree. Can't SQLite do the equivalent of pragma legacy_file_format=1 on the new database created by the VACUUM command if the current file is in the old format? The dump and restore operation is usable for updating the file format, but does require two installed versions of SQLite, one old and one new. Couldn't you add a new command or pragma that would do the format upgrade. Perhaps an optional upgrade argument to the VACUUM command that defualts to off could be used. If it is off the format of the new database is unchanged, if it is on, the format is upgraded. The VACUUM command doesn't seem like the obvious place tto look for a format upgrade option though. A new UPGRADE command would be more obvious, even if it is actually implemented by the same routines that do the VACUUM. It may be better to use something a little lower profile than a new command, perhaps a "PRAGMA upgrade_file_format" would be better. It would also allow future extension to provide a format version number that the database is to be upgraded to (ie PRAGMA upgrade_file_format=4). ---- _2006-Jun-27 17:55:23 by anonymous:_ {linebreak} it's a simple issue. since SQLite know the file format of database that is opened, on VACUUM it should create a file with the same version (such like a internal _sqlite3_open_ex() call that receive the file format that should be created. since SQLite can read/write those formats, there's no reason for doing a 'file format' upgrade.
#e8e8bd 1869 doc active 2006 Jun anonymous Unknown 2006 Jun anonymous 4 4 Website typo http://www.sqlite.org/capi3ref.html#sqlite3_exec has this: "As an example, suppose the query result where this table:" Instead of "where," "were" should have been used.
#f2dcdc 1867 code active 2006 Jun anonymous BTree 2006 Jun 1 3 Access Violation after set a new page_size An access violation occured on W2K when I try to create a new table in the empty database. There was a following sequence of SQL commands select count(*)==2 as cnt from sqlite_master where type='table' and tbl_name in ('tbl1', 'tbl2'); so if cnt is equal 0 then I execute command pragma page_size=4096; and then create a new table. I gess that some of internal structures by this time have been initialized and so when I try to create new table the page_size is lower then needed. we overwrite memory in the function zeroPage in instruction: memset(&data[hdr], 0, pBt->usableSize - hdr); Size of structure data less then pBt->usableSize Below result after memset 0:000> dt MemPage 004c3cf0
+0x000 isInit : 0 ''
+0x001 idxShift : 0 ''
+0x002 nOverflow : 0 ''
+0x003 intKey : 0x1 ''
+0x004 leaf : 0x1 ''
+0x005 zeroData : 0 ''
+0x006 leafData : 0x1 ''
+0x007 hasData : 0 ''
+0x008 hdrOffset : 0 ''
+0x009 childPtrSize : 0 ''
+0x00a maxLocal : 0
+0x00c minLocal : 0
+0x00e cellOffset : 0
+0x010 idxParent : 0
+0x012 nFree : 0xf94
+0x014 nCell : 0
+0x018 aOvfl : [5] _OvflCell
+0x040 pBt : (null)
+0x044 aData : (null)
+0x048 pgno : 0
+0x04c pParent : (null)
0012ea50 10006861 004c3cf0 0000000d 00000064 dblited!decodeFlags+0x80 [D:\sqllite\sqlite-3.3.6\btree.c @ 1349]
0012ea70 10006710 004c3cf0 0000000d 004c3cf0 dblited!zeroPage+0xd0 [D:\sqllite\sqlite-3.3.6\btree.c @ 1466]
0012ea8c 10006215 002fd390 002fd390 00000000 dblited!newDatabase+0xf9 [D:\sqllite\sqlite-3.3.6\btree.c @ 2061]
0012eaa0 10052ba0 002f7c30 00000001 0012f0e4 dblited!sqlite3BtreeBeginTrans+0xd6 [D:\sqllite\sqlite-3.3.6\btree.c @ 2141]
0012f0a4 10057cf5 004c3d80 0012f13c 0012f478 dblited!sqlite3VdbeExec+0x2c6d [D:\sqllite\sqlite-3.3.6\vdbe.c @ 2386]
0012f0e4 00412801 004c3d80 0012f1d4 0012f478 dblited!sqlite3_step+0x1db [D:\sqllite\sqlite-3.3.6\vdbeapi.c @ 223]
#e8e8bd 1864 new active 2006 Jun anonymous 2006 Jun drh 3 3 List availabe SQL functions Once LoadableExtensions are implemented it would be really nice to be able to get list of all available SQL functions and number[s] of arguments.{linebreak} Mostly I'm thinking about a scenario where you'd load someone else's library from SQLite shell. Hopefully those will become common once infrastructure is in place. I guess SQLite already keeps track of all available functions internally? _2006-Jun-21 19:48:07 by drh:_ {linebreak} Perhaps this could be done using a {link: wiki?p=VirtualTables virtual table}. ---- _2006-Jun-21 20:15:27 by anonymous:_ {linebreak} Yes, this seems to be a really nice fit for virtual tables. I lurked through the sources briefly, and it seems like all required info is already there, in =struct sqlite3=. Right?
#f2dcdc 1862 code active 2006 Jun anonymous TclLib 2006 Jun tclguy 1 1 SQLite cannot load/import data from file I found the problem when I tried to load a data file into a table. To reproduce the problem, I got a mini testcase. DATA FILE - test.dat --------------------------- 1 0 0 2 90000 0 3 366000 0 --------------------------- Log from SQLite: ------------------------------------------------------ khronos-yajun>sqlite3 test SQLite version 3.3.6 Enter ".help" for instructions sqlite> create table test (id INT, x1 INT, x2 INT); sqlite> .import test.dat test test.dat line 1: expected 3 columns of data but found 1 sqlite> .exit ------------------------------------------------------- The problem also exists when I use tcl wrapper (sql copy abort test test.dat). I looked into the code in src/tclsqlite.c, In Lines 1045 nByte = strlen(zSql); 1046 rc = sqlite3_prepare(pDb->db, zSql, 0, &pStmt, 0); 1047 sqlite3_free(zSql); Is the third argument of sqlite3_prepare supposed to be the length of zSql, hence nByte? Also in lines 1070 zSql[j++] = ')'; 1071 zSql[j] = 0; 1072 rc = sqlite3_prepare(pDb->db, zSql, 0, &pStmt, 0); 1073 free(zSql); If I change these two places to reflect the length of zSql, I seem to succeed. Yajun _2006-Sep-27 16:25:47 by anonymous:_ {linebreak} This is a duplicate of #1797
#f2dcdc 1861 code active 2006 Jun anonymous Pager 2006 Jun 1 1 Problem in using Triggers and multithreading I am using SQLite3 database with triggers . This database is used by my processing engine which is having 10 threads accessing the same database. Trigger is used to updata and insert records in a table and that very table is also updated by threads. Processing engine crashes whenever a trigger updates or inserts a record in the table. Can you tell me how to configure my existing engine to avoid crashing? Is it safe to use trigger?
#e8e8bd 1860 doc active 2006 Jun anonymous Pager 2006 Jun danielk1977 1 1 Problem in using SQLite3 with trigger and multithreading I am using SQLite3 database with triggers . This database is used by my processing engine which is having 10 threads accessing the same database. Trigger is used to updata and insert records in a table and that very table is also updated by threads. Processing engine crashes whenever a trigger updates or inserts a record in the table.
#e8e8bd 1858 doc active 2006 Jun anonymous 2006 Jun 5 5 Typo: Pearl -> Perl sqlite/www/index.tcl 1.139 s/Pearl/Perl/g
#f2dcdc 1857 code active 2006 Jun anonymous 2006 Jun 3 4 Can't use ISO 8601 dates with time zone designator 'Z' Date-times of the format 'YYYY-MM-DDTHH:MM:SS.sssZ' do not return a valid date. e.g. datetime('2006-06-19T15:44:07.466940Z'). The 'Z' indicates UTC.{linebreak} This is the format generated by 'svn log --xml'. More details of the format at {link: http://www.w3.org/TR/NOTE-datetime}. This 'svn diff' fixes the problem: --- date.c (revision 656) +++ date.c (working copy) @@ -136,7 +136,7 @@ ** ** If the parse is successful, write the number of minutes ** of change in *pnMin and return 0. If a parser error occurs, -** return 0. +** return 1. ** ** A missing specifier is not considered an error. */ @@ -200,7 +200,10 @@ p->h = h; p->m = m; p->s = s + ms; - if( parseTimezone(zDate, p) ) return 1; + if (*zDate == 'Z') + p->tz = 0; + else if( parseTimezone(zDate, p) ) + return 1; p->validTZ = p->tz!=0; return 0; }
PS Should probably add a test case to 'sqlite/test/date.test'.
#f2dcdc 1856 code active 2006 Jun anonymous 2006 Jun 2 3 SQLITE_OMIT_UTF16 breaks 'make test' When compiling sqlite 3.3.6 with -DSQLITE_OMIT_UTF16 and you say 'make test' it fails: make test ./libtool --mode=link gcc -g -O2 -DOS_UNIX=1 -DHAVE_USLEEP=1 -DHAVE_FDATASYNC=1 -I. -I./src -DSQLITE_DEBUG=2 -DSQLITE_MEMDEBUG=2 -DSQLITE_OMIT_UTF16 -I/usr/include -DTHREADSAFE=1 -DSQLITE_THREAD_OVERRIDE_LOCK=-1 -DSQLITE_OMIT_CURSOR -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ -DTEMP_STORE=1 -o testfixture ./src/btree.c ./src/date.c ./src/func.c ./src/os.c ./src/os_unix.c ./src/os_win.c ./src/os_os2.c ./src/pager.c ./src/pragma.c ./src/printf.c ./src/test1.c ./src/test2.c ./src/test3.c ./src/test4.c ./src/test5.c ./src/test6.c ./src/test7.c ./src/test_async.c ./src/test_md5.c ./src/test_server.c ./src/utf.c ./src/util.c ./src/vdbe.c ./src/where.c ./src/tclsqlite.c \ libsqlite3.la -L/usr/lib -ltcl8.4 -ldl -lpthread -lieee -lm gcc -g -O2 -DOS_UNIX=1 -DHAVE_USLEEP=1 -DHAVE_FDATASYNC=1 -I. -I./src -DSQLITE_DEBUG=2 -DSQLITE_MEMDEBUG=2 -DSQLITE_OMIT_UTF16 -I/usr/include -DTHREADSAFE=1 -DSQLITE_THREAD_OVERRIDE_LOCK=-1 -DSQLITE_OMIT_CURSOR -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 -DTEMP_STORE=1 -o .libs/testfixture ./src/btree.c ./src/date.c ./src/func.c ./src/os.c ./src/os_unix.c ./src/os_win.c ./src/os_os2.c ./src/pager.c ./src/pragma.c ./src/printf.c ./src/test1.c ./src/test2.c ./src/test3.c ./src/test4.c ./src/test5.c ./src/test6.c ./src/test7.c ./src/test_async.c ./src/test_md5.c ./src/test_server.c ./src/utf.c ./src/util.c ./src/vdbe.c ./src/where.c ./src/tclsqlite.c ./.libs/libsqlite3.so -L/usr/lib -ltcl8.4 -ldl -lpthread -lieee -lm -Wl,--rpath -Wl,/home/cla/proj/caissadb/sqlite/sqlite/lib ./src/test1.c: In function 'Sqlitetest1_Init': ./src/test1.c:3742: error: 'unaligned_string_counter' undeclared (first use in this function) ./src/test1.c:3742: error: (Each undeclared identifier is reported only once ./src/test1.c:3742: error: for each function it appears in.) make: *** [testfixture] Error 1 Maybe there is a '#ifndef SQLITE_OMIT_UTF16' / '#endif' needed around Tcl_LinkVar(interp, "unaligned_string_counter", (char*)&unaligned_string_counter, TCL_LINK_INT); in Line 3742 in file src/test1.c? Regards.
#f2dcdc 1853 code active 2006 Jun anonymous 2006 Jun 3 4 sqlite3_analyzer-3.3.4 integer overflows on large database sqlite3_analyzer-3.3.4 reports senseless values (negative bytes and percents) for some fields of my 28 GB database: Examples: Bytes of storage consumed............. 291456000 Bytes of payload...................... -1382746251 -474.4% Average payload per entry............. -3.80 (I'll attached the full output separatly.) I'm using the pre-built Linux binary of sqlite3_analyzer-3.3.4 from the sqlite homepage. My system is cn@jehova:~/misc/jibladze> uname -a Linux jehova 2.6.13-15.10-smp #1 SMP Fri May 12 16:11:24 UTC 2006 x86_64 x86_64 x86_64 GNU/Linux _2006-Jun-18 17:28:20 by anonymous:_ {linebreak} Here's the full output: cn@jehova:~> du -ah /playground/cn/yacop-data/gfr/steenrod-5-E-1Rfull 28G /playground/cn/yacop-data/gfr/steenrod-5-E-1Rfull cn@jehova:~> sqlite-analyzer /playground/cn/yacop-data/gfr/steenrod-5-E-1Rfull Analyzing table chartinfo... Analyzing table complog... Analyzing table fragments... Analyzing table generators... Analyzing table header... Analyzing table sqlite_master... Analyzing table worklog... Analyzing index sdegind of table fragments... Analyzing index sqlite_autoindex_header_1 of table header... Analyzing index sqlite_autoindex_worklog_1 of table worklog... /** Disk-Space Utilization Report For /playground/cn/yacop-data/gfr/steenrod-5-E-1Rfull *** As of 2006-Jun-18 18:55:01 Page size in bytes.................... 1024 Pages in the whole file (measured).... 28439973 Pages in the whole file (calculated).. 28439972 Pages that store data................. 28439972 100.000% Pages on the freelist (per header).... 0 0.0% Pages on the freelist (calculated).... 1 0.0% Pages of auto-vacuum overhead......... 0 0.0% Number of tables in the database...... 7 Number of indices..................... 3 Number of named indices............... 1 Automatically generated indices....... 2 Size of the file in bytes............. 29122532352 Bytes of user payload stored.......... -1818729787 -6.2% *** Page counts for all tables with their indices ******************** FRAGMENTS............................. 28409448 99.89% COMPLOG............................... 13559 0.048% CHARTINFO............................. 11151 0.039% GENERATORS............................ 5797 0.020% WORKLOG............................... 10 0.0% SQLITE_MASTER......................... 5 0.0% HEADER................................ 2 0.0% *** All tables and indices ******************************************* Percentage of total database.......... 100.000% Number of entries..................... 728140955 Bytes of storage consumed............. -942239744 Bytes of payload...................... 1093495310 -116.1% Average payload per entry............. 1.50 Average unused bytes per entry........ -2.73 Average fanout........................ 88.00 Maximum payload per entry............. 1157 Entries that use overflow............. 351 0.0% Index pages used...................... 270372 Primary pages used.................... 28169249 Overflow pages used................... 351 Total pages used...................... 28439972 Unused bytes on index pages........... 32954241 11.9% Unused bytes on primary pages......... -2022053395 165.815% Unused bytes on overflow pages........ 37488 10.4% Unused bytes on all pages............. -1989061666 211.099% *** All tables ******************************************************* Percentage of total database.......... 84.3% Number of entries..................... 364670985 Bytes of storage consumed............. -1233700864 Bytes of payload...................... -1818727376 147.420% Average payload per entry............. -4.99 Average unused bytes per entry........ 4.87 Average fanout........................ 88.00 Maximum payload per entry............. 1157 Entries that use overflow............. 351 0.0% Index pages used...................... 270372 Primary pages used.................... 23690315 Overflow pages used................... 351 Total pages used...................... 23961038 Unused bytes on index pages........... 32954241 11.9% Unused bytes on primary pages......... 1742865277 -115.4% Unused bytes on overflow pages........ 37488 10.4% Unused bytes on all pages............. 1775857006 -143.9% *** All indices ****************************************************** Percentage of total database.......... 15.7% Number of entries..................... 363469970 Bytes of storage consumed............. 291461120 Bytes of payload...................... -1382744610 -474.4% Average payload per entry............. -3.80 Average unused bytes per entry........ 1.46 Maximum payload per entry............. 17 Entries that use overflow............. 0 0.0% Primary pages used.................... 4478934 Overflow pages used................... 0 Total pages used...................... 4478934 Unused bytes on primary pages......... 530048624 181.859% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 530048624 181.859% *** Table CHARTINFO ************************************************** Percentage of total database.......... 0.039% Number of entries..................... 560603 Bytes of storage consumed............. 11418624 Bytes of payload...................... 7708145 67.5% Average payload per entry............. 13.75 Average unused bytes per entry........ 0.31 Average fanout........................ 99.00 Maximum payload per entry............. 14 Entries that use overflow............. 0 0.0% Index pages used...................... 112 Primary pages used.................... 11039 Overflow pages used................... 0 Total pages used...................... 11151 Unused bytes on index pages........... 14280 12.5% Unused bytes on primary pages......... 160371 1.4% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 174651 1.5% *** Table COMPLOG **************************************************** Percentage of total database.......... 0.048% Number of entries..................... 322413 Bytes of storage consumed............. 13884416 Bytes of payload...................... 11350473 81.7% Average payload per entry............. 35.20 Average unused bytes per entry........ 1.20 Average fanout........................ 98.00 Maximum payload per entry............. 45 Entries that use overflow............. 0 0.0% Index pages used...................... 138 Primary pages used.................... 13421 Overflow pages used................... 0 Total pages used...................... 13559 Unused bytes on index pages........... 19411 13.7% Unused bytes on primary pages......... 367295 2.7% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 386706 2.8% *** Table FRAGMENTS and all its indices ****************************** Percentage of total database.......... 99.89% Number of entries..................... 726939530 Bytes of storage consumed............. -973496320 Bytes of payload...................... 1070540938 -110.0% Average payload per entry............. 1.47 Average unused bytes per entry........ -2.74 Average fanout........................ 88.00 Maximum payload per entry............. 1157 Entries that use overflow............. 351 0.0% Index pages used...................... 270061 Primary pages used.................... 28139036 Overflow pages used................... 351 Total pages used...................... 28409448 Unused bytes on index pages........... 32910337 11.9% Unused bytes on primary pages......... -2022636857 161.759% Unused bytes on overflow pages........ 37488 10.4% Unused bytes on all pages............. -1989689032 204.386% *** Table FRAGMENTS w/o any indices ********************************** Percentage of total database.......... 84.1% Number of entries..................... 363469765 Bytes of storage consumed............. -1264952320 Bytes of payload...................... -1841680107 145.593% Average payload per entry............. -5.07 Average unused bytes per entry........ 4.88 Average fanout........................ 88.00 Maximum payload per entry............. 1157 Entries that use overflow............. 351 0.0% Index pages used...................... 270061 Primary pages used.................... 23660107 Overflow pages used................... 351 Total pages used...................... 23930519 Unused bytes on index pages........... 32910337 11.9% Unused bytes on primary pages......... 1742284627 -113.0% Unused bytes on overflow pages........ 37488 10.4% Unused bytes on all pages............. 1775232452 -140.3% *** Indices of table FRAGMENTS *************************************** Percentage of total database.......... 15.7% Number of entries..................... 363469765 Bytes of storage consumed............. 291456000 Bytes of payload...................... -1382746251 -474.4% Average payload per entry............. -3.80 Average unused bytes per entry........ 1.46 Maximum payload per entry............. 9 Entries that use overflow............. 0 0.0% Primary pages used.................... 4478929 Overflow pages used................... 0 Total pages used...................... 4478929 Unused bytes on primary pages......... 530045812 181.861% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 530045812 181.861% *** Table GENERATORS ************************************************* Percentage of total database.......... 0.020% Number of entries..................... 317989 Bytes of storage consumed............. 5936128 Bytes of payload...................... 3889213 65.5% Average payload per entry............. 12.23 Average unused bytes per entry........ 0.18 Average fanout........................ 98.00 Maximum payload per entry............. 14 Entries that use overflow............. 0 0.0% Index pages used...................... 59 Primary pages used.................... 5738 Overflow pages used................... 0 Total pages used...................... 5797 Unused bytes on index pages........... 8350 13.8% Unused bytes on primary pages......... 49171 0.84% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 57521 0.97% *** Table HEADER and all its indices ********************************* Percentage of total database.......... 0.0% Number of entries..................... 10 Bytes of storage consumed............. 2048 Bytes of payload...................... 187 9.1% Average payload per entry............. 18.70 Average unused bytes per entry........ 180.70 Maximum payload per entry............. 50 Entries that use overflow............. 0 0.0% Primary pages used.................... 2 Overflow pages used................... 0 Total pages used...................... 2 Unused bytes on primary pages......... 1807 88.2% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 1807 88.2% *** Table HEADER w/o any indices ************************************* Percentage of total database.......... 0.0% Number of entries..................... 5 Bytes of storage consumed............. 1024 Bytes of payload...................... 124 12.1% Average payload per entry............. 24.80 Average unused bytes per entry........ 173.80 Maximum payload per entry............. 50 Entries that use overflow............. 0 0.0% Primary pages used.................... 1 Overflow pages used................... 0 Total pages used...................... 1 Unused bytes on primary pages......... 869 84.9% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 869 84.9% *** Indices of table HEADER ****************************************** Percentage of total database.......... 0.0% Number of entries..................... 5 Bytes of storage consumed............. 1024 Bytes of payload...................... 63 6.2% Average payload per entry............. 12.60 Average unused bytes per entry........ 187.60 Maximum payload per entry............. 17 Entries that use overflow............. 0 0.0% Primary pages used.................... 1 Overflow pages used................... 0 Total pages used...................... 1 Unused bytes on primary pages......... 938 91.6% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 938 91.6% *** Table SQLITE_MASTER ********************************************** Percentage of total database.......... 0.0% Number of entries..................... 10 Bytes of storage consumed............. 5120 Bytes of payload...................... 2411 47.1% Average payload per entry............. 241.10 Average unused bytes per entry........ 249.70 Average fanout........................ 4.00 Maximum payload per entry............. 483 Entries that use overflow............. 0 0.0% Index pages used...................... 1 Primary pages used.................... 4 Overflow pages used................... 0 Total pages used...................... 5 Unused bytes on index pages........... 891 87.0% Unused bytes on primary pages......... 1606 39.2% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 2497 48.8% *** Table WORKLOG and all its indices ******************************** Percentage of total database.......... 0.0% Number of entries..................... 400 Bytes of storage consumed............. 10240 Bytes of payload...................... 3943 38.5% Average payload per entry............. 9.86 Average unused bytes per entry........ 10.46 Average fanout........................ 5.00 Maximum payload per entry............. 13 Entries that use overflow............. 0 0.0% Index pages used...................... 1 Primary pages used.................... 9 Overflow pages used................... 0 Total pages used...................... 10 Unused bytes on index pages........... 972 94.9% Unused bytes on primary pages......... 3212 34.9% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 4184 40.9% *** Table WORKLOG w/o any indices ************************************ Percentage of total database.......... 0.0% Number of entries..................... 200 Bytes of storage consumed............. 6144 Bytes of payload...................... 2365 38.5% Average payload per entry............. 11.82 Average unused bytes per entry........ 11.55 Average fanout........................ 5.00 Maximum payload per entry............. 13 Entries that use overflow............. 0 0.0% Index pages used...................... 1 Primary pages used.................... 5 Overflow pages used................... 0 Total pages used...................... 6 Unused bytes on index pages........... 972 94.9% Unused bytes on primary pages......... 1338 26.1% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 2310 37.6% *** Indices of table WORKLOG ***************************************** Percentage of total database.......... 0.0% Number of entries..................... 200 Bytes of storage consumed............. 4096 Bytes of payload...................... 1578 38.5% Average payload per entry............. 7.89 Average unused bytes per entry........ 9.37 Maximum payload per entry............. 9 Entries that use overflow............. 0 0.0% Primary pages used.................... 4 Overflow pages used................... 0 Total pages used...................... 4 Unused bytes on primary pages......... 1874 45.8% Unused bytes on overflow pages........ 0 Unused bytes on all pages............. 1874 45.8% *** Definitions ****************************************************** Page size in bytes The number of bytes in a single page of the database file. Usually 1024. Number of pages in the whole file The number of 1024-byte pages that go into forming the complete database Pages that store data The number of pages that store data, either as primary B*Tree pages or as overflow pages. The number at the right is the data pages divided by the total number of pages in the file. Pages on the freelist The number of pages that are not currently in use but are reserved for future use. The percentage at the right is the number of freelist pages divided by the total number of pages in the file. Pages of auto-vacuum overhead The number of pages that store data used by the database to facilitate auto-vacuum. This is zero for databases that do not support auto-vacuum. Number of tables in the database The number of tables in the database, including the SQLITE_MASTER table used to store schema information. Number of indices The total number of indices in the database. Number of named indices The number of indices created using an explicit CREATE INDEX statement. Automatically generated indices The number of indices used to implement PRIMARY KEY or UNIQUE constraints on tables. Size of the file in bytes The total amount of disk space used by the entire database files. Bytes of user payload stored The total number of bytes of user payload stored in the database. The schema information in the SQLITE_MASTER table is not counted when computing this number. The percentage at the right shows the payload divided by the total file size. Percentage of total database The amount of the complete database file that is devoted to storing information described by this category. Number of entries The total number of B-Tree key/value pairs stored under this category. Bytes of storage consumed The total amount of disk space required to store all B-Tree entries under this category. The is the total number of pages used times the pages size. Bytes of payload The amount of payload stored under this category. Payload is the data part of table entries and the key part of index entries. The percentage at the right is the bytes of payload divided by the bytes of storage consumed. Average payload per entry The average amount of payload on each entry. This is just the bytes of payload divided by the number of entries. Average unused bytes per entry The average amount of free space remaining on all pages under this category on a per-entry basis. This is the number of unused bytes on all pages divided by the number of entries. Maximum payload per entry The largest payload size of any entry. Entries that use overflow The number of entries that user one or more overflow pages. Total pages used This is the number of pages used to hold all information in the current category. This is the sum of index, primary, and overflow pages. Index pages used This is the number of pages in a table B-tree that hold only key (rowid) information and no data. Primary pages used This is the number of B-tree pages that hold both key and data. Overflow pages used The total number of overflow pages used for this category. Unused bytes on index pages The total number of bytes of unused space on all index pages. The percentage at the right is the number of unused bytes divided by the total number of bytes on index pages. Unused bytes on primary pages The total number of bytes of unused space on all primary pages. The percentage at the right is the number of unused bytes divided by the total number of bytes on primary pages. Unused bytes on overflow pages The total number of bytes of unused space on all overflow pages. The percentage at the right is the number of unused bytes divided by the total number of bytes on overflow pages. Unused bytes on all pages The total number of bytes of unused space on all primary and overflow pages. The percentage at the right is the number of unused bytes divided by the total number of bytes. ********************************************************************** The entire text of this report can be sourced into any SQL database engine for further analysis. All of the text above is an SQL comment. The data used to generate this report follows: */ BEGIN; CREATE TABLE space_used( name clob, -- Name of a table or index in the database file tblname clob, -- Name of associated table is_index boolean, -- TRUE if it is an index, false for a table nentry int, -- Number of entries in the BTree leaf_entries int, -- Number of leaf entries payload int, -- Total amount of data stored in this table or index ovfl_payload int, -- Total amount of data stored on overflow pages ovfl_cnt int, -- Number of entries that use overflow mx_payload int, -- Maximum payload size int_pages int, -- Number of interior pages used leaf_pages int, -- Number of leaf pages used ovfl_pages int, -- Number of overflow pages used int_unused int, -- Number of unused bytes on interior pages leaf_unused int, -- Number of unused bytes on primary pages ovfl_unused int -- Number of unused bytes on overflow pages ); INSERT INTO space_used VALUES('chartinfo','chartinfo',0,571641,560603,7708145,0,0,14,112,11039,0,14280,160371,0); INSERT INTO space_used VALUES('complog','complog',0,335833,322413,11350473,0,0,45,138,13421,0,19411,367295,0); INSERT INTO space_used VALUES('fragments','fragments',0,387129871,363469765,19633156373,320532,351,1157,270061,23660107,351,32910337,1742284627,37488); INSERT INTO space_used VALUES('generators','generators',0,323726,317989,3889213,0,0,14,59,5738,0,8350,49171,0); INSERT INTO space_used VALUES('header','header',0,5,5,124,0,0,50,0,1,0,0,869,0); INSERT INTO space_used VALUES('sqlite_master','sqlite_master',0,13,10,2411,0,0,483,1,4,0,891,1606,0); INSERT INTO space_used VALUES('worklog','worklog',0,204,200,2365,0,0,13,1,5,0,972,1338,0); INSERT INTO space_used VALUES('sdegind','fragments',1,363469765,363469765,2912221045,0,0,9,0,4478929,0,0,530045812,0); INSERT INTO space_used VALUES('sqlite_autoindex_header_1','header',1,5,5,63,0,0,17,0,1,0,0,938,0); INSERT INTO space_used VALUES('sqlite_autoindex_worklog_1','worklog',1,200,200,1578,0,0,9,0,4,0,0,1874,0); COMMIT;
#f2dcdc 1851 code active 2006 Jun anonymous Unknown 2006 Jun 2 1 USE "ORDER BY" error on uClinux when I use "ORDER BY" function in a "select" on uClinux 2.4.24, I get a error:"SQL error or missing database" but the same program run on windows or Linux OK. _2006-Jun-16 11:20:15 by drh:_ {linebreak} This is certainly a strange error. Combined with #1850, it suggests a problem with your build, not a problem in SQLite. I have no ability to use or run uCLinux. So if the error cannot be reproduced on a desktop system, there is not much I can do to address the problem. I am afraid you are on your own on this one. ---- _2006-Jun-19 03:12:31 by anonymous:_ {linebreak} I'm just wondering that SQLite 3.2.8 runs on this uClinux system OK but SQLite 3.3.5 is error.
#f2dcdc 1850 code active 2006 Jun anonymous Unknown 2006 Jun 2 1 NUMERIC data type ERROE when read on uClinux I have update some data of a tables's NUMERIC TYPE column on Windows or Linux,but when I use "select *......." to read on uClinux 2.4.24,I get the wrong value,example:the date I've written is 12.5,but readback is 2.3534826093695e -18.5(use the sqlite3_column_text API). I tried to get the value used "sqlite3_column_double" API,but the result is also wrong; But when I update some data with this column on uClinux,I can read the data right! _2006-Jun-16 01:53:24 by drh:_ {linebreak} What CPU is this happening on? SQLite assumes that floating point values are stored as IEEE 64-bit floats in the same byte order as a 64-bit integer. If your chip does not match this expectation, then floating point won't work.
#f2dcdc 1838 code active 2006 Jun anonymous 2006 Jun 5 5 test types3-1.3 fails on 64-bit Linux I just compiled 3.3.6 on my 64-bit Linux system (OpenSuse 10, using a self-compiled Tcl 8.4.11) and got one failure from "make test": types3-1.3... Expected: [wideInt integer] Got: [int integer] This seems to be an error in the test suite itself: Changing set V [expr {1+123456789012345}] to set V [expr {wide(1+123456789012345)}] gets rid of the failure.
#e8e8bd 1837 new active 2006 Jun anonymous Pager 2006 Jun 4 4 Deadlock detection would be best reported using explicit error code. As discussed in email (*), an explicit deadlock error code might be beneficial for an application to detect and recover from deadlock. Deadlock is different to SQLITE_BUSY, and should be notified as such (IMHO). (*) http://www.mail-archive.com/sqlite-users%40sqlite.org/msg15979.html _2006-Jun-12 11:23:49 by drh:_ {linebreak} Changing the integer return code would be not be a backwards compatible change, so that approach must be rejected. But we will consider some mechanism to make it easier for programmers to figure out that they are dealing with deadlock and not a temporary delay. ---- _2006-Jun-12 15:43:59 by anonymous:_ {linebreak} Is there a fatal return code? If so, perhaps it could be overloaded with an additional function to determine if this 'fatal' error is actually a deadlock. Deadlocks are a common return code from almost every database I've ever programmed for. I am not sure how you can work around it otherwise.
#e8e8bd 1833 doc active 2006 Jun anonymous Unknown 2006 Jun drh 4 3 PRAGMA legacy_file_format not documented [2922] introduces the *legacy_file_format* pragma, but it's not documented anywhere. At the very least, it should be mentioned in /sqlite/www/pragma.tcl, unless there's some better way to have a SQLite 3.3.5 (or so) generate databases usable by older versions of SQLite 3 (Debian stable, for example, ships with 3.2.1).
#e8e8bd 1902 new active 2006 Jul anonymous Parser 2006 Jul 5 3 allow alternate INSERT syntax using SET foo=bar Please support the alternate INSERT statement syntax in your parser that several other database products (and possibly the SQL standard) supports where you can say things like: INSERT INTO foo SET bar='hello', baz='world'; The main part of the statement, which maps field names to values, then is a lot easier for users to work with, either manually or in SQL generators, and can be reused between INSERT and UPDATE statements. This should be simple to add, and I strongly suggest having it in before the next release (eg, as part of 3.3.7). Thank you. -- Darren Duncan
#f2dcdc 1901 code active 2006 Jul anonymous Unknown 2006 Jul adamd 2 2 problem in select request with a alias table I have a table with 3 columns : c0, c1 and c2 My request is: select * from (select *, 'test' as new_col from table) as tmp inner join (select 'test' as new_col) as tmp1 on tmp.new_col = tmp1.new_col; The column's name as a result of this request (sqlite 3-3.3.6) is: |tmp.table.c0|tmp.table.c1|tmp.table.c2|tmp.new_col|tmp1.new_col In sqlite 3-3.2.7, the column's name is: |c0|c1|c2|collected|new_col|new_col Before this version, my request ran on mysql, postgresql and sqlite. Now I don't have the possibility of using this request with the new sqlite version. _2006-Jul-31 10:11:59 by anonymous:_ {linebreak} sorry in In sqlite 3-3.2.7, the column's name is: |c0|c1|c2|new_col|new_col ---- _2007-Jan-08 14:52:43 by anonymous:_ {linebreak} I had a similar problem with SQLite in PHP, see my bug report here: http://bugs.php.net/bug.php?id=40064
#f2dcdc 1900 code active 2006 Jul anonymous Unknown 2006 Jul a.rottmann 1 1 CURRENT_TIMESTAMP keyword not inserting UTC date in column This is the schema for my table. create table char (player varchar(64) NOT NULL default '~', name varchar(64) NOT NULL default '~', date timestamp NOT NULL default current_timestamp) Whenever an insert is made to the table the column 'date' does get a UTC timestamp, it gets a string value 'current_timestamp'. Is my schema wrong? _2006-Jul-30 22:31:06 by anonymous:_ {linebreak} *doesnt get a UTC timestap ---- _2006-Jul-31 00:38:49 by anonymous:_ {linebreak} Works fine for me. What's the exact syntax of your INSERT statement?
#e8e8bd 1898 doc active 2006 Jul anonymous 2006 Jul 5 4 sqlite3_progress_handler still marked experimental in documentation According to DRH's posting on the sqlite-user mailing list, =sqlite3_progress_handler= is no longer experimental and the note in the documentation should be removed. Here's the ticket to track this issue...
#e8e8bd 1896 new active 2006 Jul drh 2006 Jul 1 1 All extensions to be statically linked The new LoadableExtensions mechanism is great for loading new extensions out of external files. But we also need the ability to statically link extensions with an application and load them into database connections as needed. One possible solution would be a new API that takes a fake filename and a function pointer. Any attempt to call sqlite3_load_extension() on that fake filename by any SQLite connection invokes the function pointer rather than actually opening a shared library with that filename. _2006-Jul-26 11:30:28 by anonymous:_ {linebreak} In order to statically link extensions to the application, they'll all need to have unique entry function names. Besides meaning that extensions couldn't be statically linked without possible modification (I somehow suspect "sqlite3_extension_init" will be a popular name), it ensures that the extensions are uniquely identified in the application function namespace. So I _think_ it should just be necessary to have a function to register these function names and pointers, and sqlite3_load_extension() with a NULL =zFile= and appropriate =zProc= would have no trouble finding them in the "registry". A trivial modification to the SQL =load_extension()= function to take just the function name would be a bonus. c. ---- _2006-Jul-26 11:39:59 by anonymous:_ {linebreak} Something else which would be handy for statically linking extensions... It might be useful for a developer to directly call the extension entry function instead of formally registering it (i.e. they only want the extension available to a specific DB handle), in which case there should be a way to provide the =sqlite3_api_routines= structure to the entry function. Just making sure it's mentioned and formally documented in =sqlite.h= should be sufficient.
#e8e8bd 1895 doc active 2006 Jul anonymous 2006 Jul anonymous 4 3 IS operator not documented Hello, I tried to work with varchar fields having NULL values. However, I was not able to find the right operator to catch such values, and none of the operators mentionend in datatype3.html seemed to help. Finally I tried "is null" (or IS NOT NULL) and well, that seems to do what I want. But there are questions: *: is this an intended feature? *: if yes, can I rely on having it in the future versions? *: if this is not an intended feature, what is the right way to match NULL values? *: if that's just a documentation problem, will the documentation be updated? _2006-Jul-19 19:02:17 by anonymous:_ {linebreak} It's part of the SQL standard. "IS" isn't really an operator; it's an optional token that's part of the NULL and NOT NULL predicates.
#f2dcdc 1890 code active 2006 Jul anonymous Unknown 2006 Jul 3 4 double quotes ("") in a query are ambiguous sqlite> SELECT "uuid" FROM objects LIMIT 1;{linebreak} b43c9cdc-0dc8-11db-9475-080020a846a9{linebreak} sqlite> SELECT "uuidx" FROM objects LIMIT 1;{linebreak} uuidx{linebreak} The objects table has a column named uuid; it does not have a column named uuidx. The behaviour of "" depends on whether the contents are a valid column name, and I cannot see when this is desirable behaviour. (I know that `` and '' are the right quotes to use, by the way - I'm just pointing out that "" can surprise people a lot and should probably be fixed or removed) _2006-Jul-13 13:39:37 by anonymous:_ {linebreak} The current behaviour is an SQL standard thing, so SQLite implements it for the sake of compatibility. Certainly most people agree with you that it's a bad thing. ---- _2006-Jul-13 16:56:05 by anonymous:_ {linebreak} Perhaps what we need then is a big fat warning in the manual :) - Peter ---- _2006-Jul-17 00:06:41 by drh:_ {linebreak} Please suggest a specific location in the documentation where I should put a warning about the use of " instead of ' and I will add it. ---- _2006-Jul-27 10:30:12 by anonymous:_ {linebreak} lang_expr.html seems like an obvious choice; perhaps also a sidenote on FAQ question 16 - Peter
#f2dcdc 1885 code active 2006 Jul anonymous Shell 2006 Jul 2 3 sqlite3 .mode insert and .dump do not list column names for selects In sqlite3 .mode insert does not list column names for selects - it should. This makes dumping selected columns from tables when intending to add or delete columns problematic. .dump doesn't list column names either, IMHO it should. Consider sqlite> .mode tabs{linebreak} sqlite> select * from users;{linebreak} ed 2006-07-05 52{linebreak} sqlite> .mode insert{linebreak} sqlite> select abs_tgt from users;{linebreak} INSERT INTO table VALUES(52);{linebreak} sqlite> Obviously the workaround is to hand edit the output SQL _2006-Jul-11 10:20:08 by anonymous:_ {linebreak} I've just noticed it doesn't include the table name in the INSERT statements either.
#f2dcdc 1884 code active 2006 Jul anonymous 2006 Jul 3 2 pragma table_info caches results from previous query this problem is observed with pysqlite's latest windows build 2.3.2 and others. it does not occur on unix-based builds, which is why I suspect the issue is in sqlite, since pysqlite's code is platform-neutral. if you get a result from a "pragma table_info()" call, and do not consume all the results, then a subsequent call to the same statement does not return up-to-date results, i.e. if the table had been dropped in between. it behaves as though the results of "pragma table_info" are globally cached somewhere, ignoring the fact that is was executed again. this test program illustrates the problem: from pysqlite2 import dbapi2 as sqlite connection = sqlite.connect(':memory:') # check for a nonexistent table c = connection.execute("pragma table_info(users)") row = c.fetchone() assert row is None # its good. # now create the table connection.execute(""" create table users ( foo VARCHAR(10), name VARCHAR(40) ) """) # do the table_info pragma. returns two rows c = connection.execute("pragma table_info(users)") # get the first row row = c.fetchone() print row # but then dont get the second, close out the cursor instead. #row2 = c.fetchone() # uncomment to fully consume both rows, then it works c.close() c = None # rollback too. connection.rollback() # now drop the table connection.execute("DROP TABLE users") print "dropped" # now it should be gone, right? well it is, but the pragma # call starts off with the former result set c = connection.execute("pragma table_info(users)") row = c.fetchone() print row assert row is None # fails.
#f2dcdc 1882 code active 2006 Jul anonymous 2006 Jul 1 1 Wrong algorithm of SQLITE_VERSION_NUMBER calculation The sqlite3.h comment describing how numeric version number is calculated is as follows: "The SQLITE_VERSION_NUMBER is an integer with the value (X*100000 + Y*1000 + Z). For example, for version "3.1.1beta", SQLITE_VERSION_NUMBER is set to 3001001." But the value of SQLITE_VERSION_NUMBER is greater than the equation above suggests. The value X*100000 should be changed to X*1000000 (one milion).
#e8e8bd 1647 doc active 2006 Jan anonymous Unknown 2006 Jan paul 3 1 i want to use this lib in my project HI, I want to use this sqlite source in my project for quick firing of quaries. I want dataled doc of source or how to use these source in my project. Hoping for quick reply, Bye thaks & Regards Sumant Kadam 9422615104 _2006-Jan-30 20:43:26 by anonymous:_ {linebreak} This is not a bug report. Please use the mailing list for this type of question.
#e8e8bd 867 new active 2004 Aug anonymous Parser 2006 Jan 3 2 update on multiple tables I would like to have update working on multi tables; It is not written as possible thus it is a request for enhancement. basic example: create table a ( id integer primary key, val integer ); create table b ( id integer primary key, val integer ); insert into a (val) values (314); insert into a (val) values (315); insert into b (val) values (314); insert into b (val) values (314); update a, b set b.val = a.val; _2006-Jan-24 19:01:31 by anonymous:_ {linebreak} I would prefer another syntax - using a SELECT as the source for data to be updated. UPDATE SET =... WHERE = ... SELECT FROM WHERE =... This is the update syntax used in PostgreSQL and other databases. I also believe this is easier to understand and easier to implement (since the select is just the source for data) ---- _2006-Jan-24 19:21:15 by anonymous:_ {linebreak} Already supported via correlated subquery: update b set val = (select a.val from a where a.id = b.id); See: http://www.sqlite.org/lang_update.html
#e8e8bd 1638 warn active 2006 Jan anonymous 2006 Jan 3 2 rows place change and some row element missing there is a problem with my table row order.I miss one roe header and the next one come to its place.also there is a problem like this in the columns too.it does not occur when the table is list or line.but when I turn it into column mode the problem happens.my table become puzzling
#f2dcdc 1637 code active 2006 Jan anonymous Parser 2006 Jan 4 4 Multiple JOIN USING() doesn't work. CREATE TABLE T1(T1Id, Name); INSERT INTO T1(T1Id, Name) VALUES (0,"titi"); INSERT INTO T1(T1Id, Name) VALUES (1,"toto"); INSERT INTO T1(T1Id, Name) VALUES (2,"tutu"); INSERT INTO T1(T1Id, Name) VALUES (3,"hat"); INSERT INTO T1(T1Id, Name) VALUES (4,"socks"); CREATE TABLE T2(T2Id, Name); INSERT INTO T2(T2Id, Name) VALUES(0,"Black"); INSERT INTO T2(T2Id, Name) VALUES(1,"Red"); INSERT INTO T2(T2Id, Name) VALUES(2,"Blue"); INSERT INTO T2(T2Id, Name) VALUES(3,"Green"); INSERT INTO T2(T2Id, Name) VALUES(4,"Yellow"); INSERT INTO T2(T2Id, Name) VALUES(5,"Brown"); INSERT INTO T2(T2Id, Name) VALUES(6,"White"); CREATE TABLE T3(T3Id,T1Id,T2Id,Number); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (1,4,0,5); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (2,4,1,4); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (3,4,2,3); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (4,4,3,2); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (5,4,4,1); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (6,4,5,0); INSERT INTO T3(T3Id,T1Id,T2Id,Number) VALUES (7,3,0,10); SELECT main.Number AS Number, prod.Name AS product, col.Name AS color, Main.T3Id AS Id FROM T3 AS main LEFT JOIN T1 AS prod USING(T1Id) LEFT JOIN T2 AS col USING(T2Id);
The second USING doesn't work. _2006-Jan-23 16:03:17 by drh:_ {linebreak} SQLite only looks for columns to satisfy the USING clause in the two tables immediately tot he left and right of the join. In the example above, it is trying to resolve "USING(t2id)" by looking for columns t1.t2id and t2.t2id. ---- _2006-Jan-23 23:12:32 by anonymous:_ {linebreak} I'm not sure if you are saying that is the way SQLite should work, or just explaining the source of the error. I believe that JOINS are supposed to combine two tables to produce a third table in a left associative manner. The sample query should join T3 and T1 using T1Id to produce an intermediate table, lets call it simply T. Then T should be joined with T2 using T2Id to produce the final result table. As shown below, T does in fact have a column named T2Id, and this column should be used for the second join. sqlite> CREATE table t AS SELECT * ...> FROM T3 AS main ...> LEFT JOIN T1 AS prod USING(T1Id); sqlite> sqlite> PRAGMA table_info(t); 0|T3Id|numeric|0||0 1|T1Id|numeric|0||0 2|T2Id|numeric|0||0 3|Number|numeric|0||0 4|Name|numeric|0||0 sqlite> sqlite> SELECT T2ID from t; 0 1 2 3 4 5 0 One area where SQLite may have a standards compliance problem is with the name of the result set columns with column name joins (i.e. joins with a USING() clause). The standard says that the result set column that was used to join the two tables can not have a qualifier. That means it is NOT the column from either of the two input tables, and can't be qualified using either table name. The USING clause "projects out" the columns from both of the input tables and replaces them with a single column with the same name. The two columns could have different types for example, and the result set column could be yet another type. The first two otuput columns below should generate an error. Only the third is legal according to the SQL standard. sqlite> SELECT T3.T2Id, T2.T2Id, T2Id from T3 LEFT JOIN T2 USING(T2Id); T2Id|T2Id|T2Id 0|0|0 1|1|1 2|2|2 3|3|3 4|4|4 5|5|5 0|0|0 The type differences between the different columns are displayed in the following example. In one table the values of column a are integers and in the other they are text. The type of the result column depends upon the order of the tables in the join. sqlite> select a from tt1 join tt2 using(a); a 1 2 sqlite> select typeof(a) from tt1 join tt2 using(a); typeof(a) integer integer sqlite> select typeof(a), typeof(tt1.a), typeof(tt2.a) from tt1 join tt2 using(a); typeof(a)|typeof(tt1.a)|typeof(tt2.a) integer|integer|text integer|integer|text sqlite> select * from tt1 join tt2 using(a); a|b|c 1|1|-1 2|4|-2 sqlite> select typeof(a), typeof(tt1.a), typeof(tt2.a) from tt2 join tt1 using(a); typeof(a)|typeof(tt1.a)|typeof(tt2.a) text|integer|text text|integer|text The same rules also apply to NATURAL joins. The result columns cannot legally be qualified by either table name. ---- _2006-Jan-23 23:45:04 by drh:_ {linebreak} My previous remark is describe what SQLite does. You are probably right in pointing out that what it currently does is not correct and ought to be fixed. I merely observet that SQLite has never worked that way and has (up until now) caused no serious concern. So, I'm not going to consider this a high-priority bug. I will get to it as I am able. But I need to get 3.3.x out the door first.
#e8e8bd 1636 todo active 2006 Jan anonymous Shell 2006 Jan drh 4 3 stdev does not work When trying to calculate standard deviation I get the following error message: SQL error: no such function: stdev How does SQLite support statistical function stdev? What is the correct name of the function? The same with sqr and sqrt. What are the names for square and square-root? Is there any other way to use SQLite for statistical calculations?
#e8e8bd 1626 new active 2006 Jan anonymous Unknown 2006 Jan 4 4 ALTER TABLE BUG/MISHEAVIOR I have observed a "bug" in sqlite (3.2.8). when a table is created, and then renamed via the ALTER TABLE statement, the create statement gets adjusted (which is correct), but the new name appears in single quotes. according to sql syntax, table and field names are quoted by double quotes, so this creates subsequent problems for parsing tools and such. to test: create table x(id) select * from sqlite_master (observe the create statememt) alter table x rename to x1 select * from sqlite_master (observe the create statememt again, the name is now 'x1' in quotes) IMHO, the name should not be quoted by default, but only if it contains spaces, special characters and such (in other words, follow the quoting rules for tables that sqlite already has) _2006-Jan-23 00:24:34 by drh:_ {linebreak} The result that SQLite produces is technically correct, even if it fails to meet the aesthetic expectations of this tickets author. Changes SQLite as suggest will make the code base larger, which is something we are loath to do. So I am converting this to a low-priority feature request. ---- _2006-Feb-07 07:36:19 by anonymous:_ {linebreak} ok, lets not do "intelligent" quoting. what about the quote format ? should it not be DOUBLE quotes, instead of single ?
#e8e8bd 1635 doc active 2006 Jan anonymous 2006 Jan 4 4 SQLite Ticket "Version" field The "Version" field in the CVSTrac ticket is ambiguous (particularly for "fixed" bugs). Can it be renamed to "Version Bug Appears" or something to that effect? Also, it would be very useful to see the SQLite version in which the ticket is fixed in addition to the SQLite version which the bug first appears. Manually correlating the date of the fix with the SQLite version is awkward.
#e8e8bd 1627 warn active 2006 Jan anonymous 2006 Jan 4 4 warnings on BCB and how to resolve them + couple of minor fixes I took 3.3.1 sources (the preprocessed version) and tried to compile them on Borland C++ Builder 6.4. I wanted to keep all compiler warnings switched on. I got some warnings from sqlite source. Some due to strlen() returning unsigned, some due signed/unsigned comparison, some because BCB requires if (a = b) ... to be rewritten as if ((a = b) != 0) ... to avoid warnings. List of places generating warnings and ways to shut then up are bellow. Perhaps these fixes may be applied to the codebase since the warnings may happen with other C++ compilers as well. I cannot promise the fixes will work with 64bit architectures but it seems likely. ----------------------------------------- Some macros, like MASTER_NAME, may cause collisions. Perhaps they all can be prefixed with SQLITE3 or so to avoid such accidents. -------------------------------------------- The trick to enable NDEBUG in sqliteInt.h: #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif has unfortunate property of changing NDEBUG settings in those parts of a project using sqlite and not in the others. It is hard to detect reason for possible problems. I suggest: 1) Put on top of each sqlite source file #define THIS_IS_SQLITE_SOURCE 2) Change the trick to: #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) && (defined THIS_IS_SQLITE_SOURCE) # define NDEBUG 1 #endif -------------------------------------------- Small wish for the documentation: could there be information how to switch off all sqlite memory management and checking and leave it all to host's malloc/realloc/free? I have very optimized dmalloc allocator and want to be sure there are no other layers above this. My allocator also does boundary checking and error detection. -------------------------------------------- ---- FIXES TO GET RID OF WARNINGS ON BCB --- -------------------------------------------- vdbmem.c, line 725: assert( strlen(pMem->z)<=pMem->n ); ==>> assert( (int)strlen(pMem->z)<=pMem->n ); -------------------------------------------- vdbeaux.c: line 545: if( strlen(zTemp)+strlen(zNum)+1<=nTemp ){ ==>> if( (int)strlen(zTemp)+(int)strlen(zNum)+1<=nTemp ){ line 1740: if( d1>=nKey1 && sqlite3VdbeSerialTypeLen(serial_type1)>0 ) break; ==>> if( (int)d1>=nKey1 && sqlite3VdbeSerialTypeLen(serial_type1)>0 ) break; line 1742: if( d2>=nKey2 && sqlite3VdbeSerialTypeLen(serial_type2)>0 ) break; ==>> if( (int)d2>=nKey2 && sqlite3VdbeSerialTypeLen(serial_type2)>0 ) break; line 1768: }else if( d1> }else if( (int)d1> }else if( (int)d2hdrOffset+3]) ); ==>> assert( iCell<(int)get2byte(&data[pPage->hdrOffset+3]) ); line 932: if( nPayload<=pPage->maxLocal ){ ==>> if( (int)nPayload<=pPage->maxLocal ){ line 1150: assert( nCell==get2byte(&data[hdr+3]) ); ==>> assert( nCell==(int)get2byte(&data[hdr+3]) ); line 2351: if( origSize>=PENDING_BYTE_PAGE(pBt) && finSize<=PENDING_BYTE_PAGE(pBt) ){ ==>> if( (int)origSize>=PENDING_BYTE_PAGE(pBt) && (int)finSize<=PENDING_BYTE_PAGE(pBt) ){ line 2367: if( PTRMAP_ISPAGE(pgsz, iDbPage) || iDbPage==PENDING_BYTE_PAGE(pBt) ){ ==>> if( PTRMAP_ISPAGE(pgsz, iDbPage) || (int)iDbPage==PENDING_BYTE_PAGE(pBt) ){ line 2916: if( offset+amt > nKey+pCur->info.nData ){ ==>> if( offset+amt > (int)(nKey+pCur->info.nData) ){ line 3056: if( nLocal>nKey ){ ==>> if( nLocal>(int)nKey ){ line 3737: if( *pPgno>sqlite3pager_pagecount(pBt->pPager) ){ ==> if( *pPgno>(Pgno)sqlite3pager_pagecount(pBt->pPager) ){ line 3774: assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); ==>> assert( *pPgno!=(Pgno)PENDING_BYTE_PAGE(pBt) ); line 3779: assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); ==>> assert( *pPgno!=(Pgno)PENDING_BYTE_PAGE(pBt) ); line 3789: assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); ==>> assert( *pPgno!=(Pgno)PENDING_BYTE_PAGE(pBt) ); line 3879: if( ovflPgno>sqlite3pager_pagecount(pBt->pPager) ){ ==>> if( ovflPgno>(Pgno)sqlite3pager_pagecount(pBt->pPager) ){ line 3774: assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); ==>> assert( *pPgno!=(Pgno)PENDING_BYTE_PAGE(pBt) ); line 3938: assert( info.nData==nData ); ==>> assert( (int)info.nData==nData ); line 4166: assert( end <= get2byte(&data[hdr+5]) ); ==>> assert( end <= (int)get2byte(&data[hdr+5]) ); line 4972: assert( pgnoChild<=sqlite3pager_pagecount(pPage->pBt->pPager) ); ==>> assert( pgnoChild<=(Pgno)sqlite3pager_pagecount(pPage->pBt->pPager) ); line 5399: pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ ==>> pgnoRoot==(Pgno)PENDING_BYTE_PAGE(pBt) ){ line 5491: if( pgno>sqlite3pager_pagecount(pBt->pPager) ){ ==>> if( pgno>(Pgno)sqlite3pager_pagecount(pBt->pPager) ){ line 5616: if( iTable==maxRootPgno ){ ==>> if( iTable==(int)maxRootPgno ){ line 5659: if( maxRootPgno==PENDING_BYTE_PAGE(pBt) ){ ==>> if( maxRootPgno==(Pgno)PENDING_BYTE_PAGE(pBt) ){ line 5665: assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) ); ==>> assert( maxRootPgno!=(Pgno)PENDING_BYTE_PAGE(pBt) ); if( (rc = restoreOrClearCursorPosition(pCur, 1)) || (rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur)) || (rc = sqlite3pager_write(pPage->aData)) ){ ==>> if( (rc = restoreOrClearCursorPosition(pCur, 1)) != 0 || (rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur)) != 0|| (rc = sqlite3pager_write(pPage->aData)) != 0 ){ (to get rid "possibly incorrect assigment" warning) line 3353: int dummy; pCell += getVarint32(pCell, &dummy); ==>> u32 dummy; pCell += getVarint32(pCell, &dummy); line 3342: i64 nCellKey; ==>> u64 nCellKey; because it causes warning in line 3353 in getVarint(pCell, &nCellKey); -------------------------------------------- build.c: line 622: if( (!OMIT_TEMPDB || i!=1 ) && n==strlen(pDb->zName) && ==>> if( (!OMIT_TEMPDB || i!=1 ) && n==(int)strlen(pDb->zName) && line 2367: pIndex->aiRowEst = (int *)(&pIndex->aiColumn[nCol]); ==>> pIndex->aiRowEst = (unsigned *)(&pIndex->aiColumn[nCol]); (the type is defined as unsigned in sqliteInt.h) -------------------------------------------- vdbe.c: line 1967: assert( p2> assert( p2<(int)nField ); line 1967: if( avail>=payloadSize ){ ==>> if( avail>=(int)payloadSize ){ line 2019: if( !zRec && avail> if( !zRec && avail<(int)offset ){ line 2034: for(i=0; i> for(i=0; i<(int)nField; i++){ -------------------------------------------- select.c, line 113: if( p->n==keywords[j].nChar ==>> if( (int)(p->n)==keywords[j].nChar -------------------------------------------- expr.c, line 350: && pE->token.n==n ==>> && (int)(pE->token.n)==n -------------------------------------------- shell.c: functions isatty() and access() are in on Borland. I suggest to add: #if (defined __BORLANDC__) # include #endif and at the same time to comment out shell.c, line 59: #if !(defined __BORLANDC__) extern int isatty(); #endif Prototype on BCB is int isatty(int handle); -------------------------------------------- pager.c: line 600: for(i=0; i> for(i=0; i<(int)len; i++){ line 1016: if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ ==>> if( pgno==0 || pgno==(Pgno)PAGER_MJ_PGNO(pPager) ){ line 1341: assert( pPager->origDbSize==0 || pPager->origDbSize==mxPg ); ==>> assert( pPager->origDbSize==0 || (Pgno)(pPager->origDbSize)==mxPg ); line 1354: for(i=0; i> for(i=0; i<(int)nRec; i++){ line 1935: if( pPg->pgno<=dbSize ){ ==>> if( pPg->pgno<=(Pgno)dbSize ){ line 2308: if( pList->pgno<=pPager->dbSize ){ ==>> if( pList->pgno<=(Pgno)(pPager->dbSize) ){ line 2560: if( pgno>PAGER_MAX_PGNO || pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ ==>> if( pgno>PAGER_MAX_PGNO || pgno==0 || pgno==(Pgno)PAGER_MJ_PGNO(pPager) ){ line 3043: assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); ==>> assert( pPg->pgno!=(Pgno)PAGER_MJ_PGNO(pPager) ); line 3672: for( i=nTrunc+1; i<=pPager->origDbSize; i++ ){ ==>> for( i=nTrunc+1; i<=(Pgno)(pPager->origDbSize); i++ ){ line 3673: if( !(pPager->aInJournal[i/8] & (1<<(i&7))) && i!=iSkip ){ ==>> if( !(pPager->aInJournal[i/8] & (1<<(i&7))) && i!=(Pgno)iSkip ){ -------------------------------------------- printf.c, line 409: *(--bufpt) = cset[longvalue%base]; ==>> *(--bufpt) = cset[(unsigned)(longvalue%base)]; Here BCB complains on using 64 bit integer as array index with warning "suspcious pointer conversion". -------------------------------------------- Lot of code complains on memory debug functions not compiled in but used within asserts: util.c: line 1360: assert( sqlite3ThreadData()->mallocDisallowed>=0 ); sqlite3ThreadData()->mallocDisallowed++; ==>> #ifdef SQLITE_MEMDEBUG assert( sqlite3ThreadData()->mallocDisallowed>=0 ); sqlite3ThreadData()->mallocDisallowed++; #endif line 1369: assert( sqlite3ThreadData()->mallocDisallowed>0 ); sqlite3ThreadData()->mallocDisallowed--; ==>> #ifdef SQLITE_MEMDEBUG assert( sqlite3ThreadData()->mallocDisallowed>0 ); sqlite3ThreadData()->mallocDisallowed--; #endif line 556: while( !(p = OSMALLOC(n)) && sqlite3_release_memory(n) ); ==>> while( (p = OSMALLOC(n))==0 && sqlite3_release_memory(n) ); This is officially BCB recommended trick to shut up "possibly incorrect assignement" message. The docs says: if (a = b) ... should be rewritten as if ((a = b) != 0) ... line 586: while( !(np = OSREALLOC(p, n)) && sqlite3_release_memory(n) ); ==>> while( (np = OSREALLOC(p, n))==0 && sqlite3_release_memory(n) ); line 739: if( db && (db->pErr || (db->pErr = sqlite3ValueNew()))!=0 ){ ==>> if( db && (db->pErr || ((db->pErr = sqlite3ValueNew())!=0))!=0 ){ -------------------------------------------- There are several more warnings when BCB compiler claims a function has no prototype where it has. It is knon Borland bug and the only way to fix it is to add #ifdef __BORLANDC__ # pragma -warn ... #endif ...statements... #ifdef __BORLANDC__ # pragma .warn ... #endif inside sqlite code. I do not know whether you wish to pollute the code with such specific workarounds. If yes, I may provide more details. EOF
#f2dcdc 1622 code active 2006 Jan danielk1977 2006 Jan 1 1 Compiling with OMIT_PRAGMA causes an error in the test suite Compiling with OMIT_PRAGMA causes an error in the test suite. The error is a Tcl level error thrown by a [db eval] command when it encounters the unknown SQL keyword "PRAGMA".
#f2dcdc 1621 code active 2006 Jan danielk1977 2006 Jan 5 5 Compiling with OMIT_FLOATING_POINT causes a segfault in the test suite Compiling with OMIT_FLOATING_POINT causes a segfault in the test suite.
#e8e8bd 1591 build active 2006 Jan anonymous 2006 Jan 3 2 Missing TEXE suffix in linking and install rules for sqlite3 Suffix $(TEXE) is missing at some places in Makefile.in. It makes dependencies and rules inconsistant, when TEXE is not null (e.g. .exe when compiling with MingW for MSWin target). Those rules has to be fixed: *: When linking slqite3 executable: use -o sqlite3$(TEXE) instead of -o sqlite3 in linker command line to use (especially since $(TEXE) suffix is present in rule) *: When installing: use{linebreak} $(LTINSTALL) sqlite3$(TEXE) $(DESTDIR)$(exec_prefix)/bin{linebreak} instead of{linebreak} $(LTINSTALL) sqlite3 $(DESTDIR)$(exec_prefix)/bin{linebreak} Similar fixes need to be applied to tclsqlite3 (missing in both rule and linker command line), testfixture (in linker cmdline), crashtest (in linker cmdline), lemon (in linker cmdline, with $(BEXE) suffix).
#e8e8bd 1588 new active 2006 Jan anonymous 2006 Jan 4 4 common subexpression elimination not performed in SELECT SQLite could perform common subexpression elimination to significantly speed up certain SELECT statements. Consider the following: CREATE TABLE t6(a); INSERT INTO "t6" VALUES(1); INSERT INTO "t6" VALUES(2); -- ...imagine 5000 more rows inserted... INSERT INTO "t6" VALUES(4999); INSERT INTO "t6" VALUES(5000); CREATE VIEW v6 as SELECT x.a as xa, y.a as yb, (123.45*x.a*y.a-x.a*x.a) AS expr FROM t6 x, t6 y; With the following SELECT statement: explain select expr s1, (expr/789.3) s2 from v6 order by s1; You can see that SQLite repeats the calculation of 'expr' three times below. If the common subexpression 'expr' was calculated just once per inner-loop iteration this query would be significantly faster. 0|OpenVirtual|3|3|keyinfo(1,BINARY) 1|Goto|0|58| 2|Integer|0|0| 3|OpenRead|1|2| 4|SetNumColumns|1|1| 5|Integer|0|0| 6|OpenRead|2|2| 7|SetNumColumns|2|1| 8|Rewind|1|46| 9|Rewind|2|45| -- first expr 10|Real|0|0|123.45 11|Column|1|0| 12|Multiply|0|0| 13|Column|2|0| 14|Multiply|0|0| 15|Column|1|0| 16|Column|1|0| 17|Multiply|0|0| 18|Subtract|0|0| -- second expr 19|Real|0|0|123.45 20|Column|1|0| 21|Multiply|0|0| 22|Column|2|0| 23|Multiply|0|0| 24|Column|1|0| 25|Column|1|0| 26|Multiply|0|0| 27|Subtract|0|0| 28|Real|0|0|789.3 29|Divide|0|0| 30|MakeRecord|2|0| -- third expr 31|Real|0|0|123.45 32|Column|1|0| 33|Multiply|0|0| 34|Column|2|0| 35|Multiply|0|0| 36|Column|1|0| 37|Column|1|0| 38|Multiply|0|0| 39|Subtract|0|0| 40|Sequence|3|0| 41|Pull|2|0| 42|MakeRecord|3|0| 43|IdxInsert|3|0| 44|Next|2|10| 45|Next|1|9| 46|Close|1|0| 47|Close|2|0| 48|Sort|3|57| 49|Column|3|2| 50|Integer|2|0| 51|Pull|1|0| 52|Column|-1|0| 53|Column|-2|1| 54|Callback|2|0| 55|Pop|2|0| 56|Next|3|49| 57|Halt|0|0| 58|Transaction|0|0| 59|VerifyCookie|0|2| 60|Goto|0|2| 61|Noop|0|0| The common sub-expression elimination may have to take into account user-defined functions that do not return the same value when given the same inputs (such as random()). Some databases fold such values into a single expression, while others evaluate each such function in a seperate expression. _2006-Jan-08 18:55:03 by drh:_ {linebreak} Common Subexpression Eliminationi (CSE) is a planned enhancement to occur after the VDBE is converted from a 3-operand stack machine to a 4-operand register machine. Perhaps this year sometime. ---- _2006-Jan-08 19:56:47 by anonymous:_ {linebreak} I think it would be desirable to have a seperate expression optimization pass before (and without knowledge of) the VDBE code generation phase to simplify and optimize the SQL abstract syntax tree. Such high level AST transformation appears to already happen in select.c in such routines as flattenSubquery(), but it would be great if all non-VDBE-specific expression tree manipulation code could be factored out into seperate .c files so that it would be completely independent of the code generation. This could allow you support a variety of backends.
#e8e8bd 1683 new active 2006 Feb anonymous Unknown 2006 Feb anonymous 4 3 .mode html produces uppercase tags Quote from one of sqlite docs: "The last output mode is "html". In this mode, sqlite writes the results of the query as an XHTML table. The beginning are not written, but all of the intervening s, s, and | s are. The html output mode is envisioned as being useful for CGI." I tried the ".mode html" and the result is just like the doc said. But there is one oddity. AFAIK, XHTML discourages using uppercase tags. Inspite of that, sqlite produces uppercase tags. Why?? Now, this is a suggestion from a newbie point of view just for the sake of sqlite's consistency: Use lowercase tags for ".mode hmtl" result. I know browsers produce the same result for |
...
as well as for ...
. We all know xhtml 1.0 is just a more strict version of html 4.0 and xhtml is based on xml whilst html 4.o is based on sgml. Not make much any difference to me and the browsers. But following the xhtml standard and rules -that sqlite WANTS- doesn't hurt right? I hope you can consider more seriously this lite suggestion :) Keep up the great work! ps:sorry for my broken english _2006-Feb-21 14:24:54 by anonymous:_ {linebreak} Capitals tags aren't xhtml. {linebreak} Why not offering an export may be also import mode in true xml ? With xsd schema file creation as useful for direct import into spread sheet and other popular programs. {linebreak} Keep on going with this beautiful program.
#e8e8bd 1679 new active 2006 Feb anonymous 2006 Feb 4 4 SQLite requires code modification to replace memory allocation funcs sqliteOsMalloc is a C macro that points to a 'generic' malloc routine by default. The same for realloc and free. This forces a SQLite user to modify the code of each realease to support their own memory functions. Allow it means that the user can no longer use a system wide SQLite library and have to ship their own. If those memory functions where function pointers then the calling program could set them without modifying the source code. These function pointers could be global variables or set by a sqlite_memory_funcs() API function.
#e8e8bd 1675 new active 2006 Feb anonymous 2006 Feb 5 4 sqlite3_db_dup() ? I don't need this myself, as I don't write multithreaded code now, but it seems a useful addition for all those people doing mutlithreaded code: Duplicate a db connection for use in another thread. The new db connection should NOT be connected to any shared caches for the current thread, so that the new connection can be moved to any thread. The idea might be a server, which starts some worker thread and wants to give that new thread a connection to the db.
#e8e8bd 1663 new active 2006 Feb anonymous Unknown 2006 Feb 4 4 Suggestion: DATETYPE field type using David Tribble's work? How about a new field (/data) type, DATETIME, using David Tribble's excellent proposal? See here: http://david.tribble.com/text/c0xlongtime.html _2006-Feb-09 02:58:34 by drh:_ {linebreak} What advantage does David Tribble's datetype design have over the existing date and time support in SQLite? Why should we change?
#e8e8bd 1653 new active 2006 Feb anonymous CodeGen 2006 Feb 4 5 SQLITE_OMIT_COST_REORDERING ? Embedded SQLite installations could benefit from conditionally compiling out the logic of the cost-based reordering of tables (i.e., always assume cross join). It might save a few kilobytes of code when the join order is always known in advance.
#e8e8bd 931 new active 2004 Sep anonymous 2006 Dec 5 4 Makefile for mingw on Win32, makes DLL, EXE and docs This is a more compact and easier to understand makefile which will work out-of-the box (well, almost) for people using mingw32 on Windows platforms. MSYS is not needed, but it does require gawk, sed and grep (well, and GNU make too). _2005-Aug-27 01:07:30 by anonymous:_ {linebreak} Has anybody compiled successfully sqlite3 3.2.x with mingw? how? ---- _2005-Aug-27 02:13:20 by anonymous:_ {linebreak} From version 2.8.14 to 3.2.4 I have no problems. ---- _2005-Aug-27 12:24:58 by anonymous:_ {linebreak} 1. installed msys -c:\msys {linebreak} 2. installed mingw - c:\msys\mingw {linebreak} 3. installed msys toolkit and mingw make. see http://mingw.org/download.shtml {linebreak} 4. run msys. {linebreak} 5. go to sqlite directory. {linebreak} 6. ./configure {linebreak} 7. make {linebreak} So easy so I can't tell more. ---- _2005-Aug-27 13:14:58 by drh:_ {linebreak} The precompiled windows binaries on the SQLite website have always been generated using mingw running as a cross-compiler on a linux host. ---- _2005-Aug-27 15:14:09 by anonymous:_ {linebreak} Ok, I got it working. There is just one discrepancy - I end up with the following: 291840 sqlite3.dll{linebreak} 1280564 sqlite3.exe{linebreak} {linebreak} Whereas the downloadable "precompiled" binaries are just:{linebreak} {linebreak} 248320 sqlite3.dll{linebreak} 330203 sqlite3.exe{linebreak} Is there anything else I need to do to get the exact same byte-size? (this is 3.2.4) ---- _2005-Aug-27 15:18:55 by anonymous:_ {linebreak} Is it possible that you're compiling with readline support (which isn't present in the distributed binary due to licensing issues)? ---- _2005-Aug-27 15:27:43 by anonymous:_ {linebreak} I do not know how to enable/disable readline support. I have not changed or added any switch to the above. Besides, I think the .dll does not use readline. ---- _2005-Dec-27 10:55:55 by anonymous:_ {linebreak} see also BuildOnWindowsWithoutTcl ---- _2006-Dec-28 19:18:55 by anonymous:_ {linebreak} Does this also generate a .a for use in applications? Still needing the .dll that is. I don't need sqlite.exe at all... ---- _2006-Dec-28 19:51:44 by anonymous:_ {linebreak} I assume you mean .a for MinGW or Cygwin. Using Cygwin or MinGW/MSYS just download and untar the sqlite3 source and issue these commands: cd sqlite ./configure make sqlite3.dll The .a file will be located in sqlite/.libs/libsqlite3.a
#f2dcdc 2132 code active 2006 Dec anonymous 2006 Dec 3 4 table_info, index_list, index_info & friends should be virtual tables The following meta data should be accessible via read-only virtual tables in some sort of sqlite_meta_info table: PRAGMA database_list; PRAGMA foreign_key_list(table-name); PRAGMA index_info(index-name); PRAGMA index_list(table-name); PRAGMA table_info(table-name); This would allow complex querying and joins of the meta data of the database. (Pragmas cannot be used as a subquery). The underlying introspection mechanism can actually be PRAGMA-based for backwards compatability.
#f2dcdc 2131 code active 2006 Dec anonymous 2006 Dec 2 3 Add substring() function (Part of SQL 99) sqlite> SELECT substring('foobar.class',-6,6) = '.class'; SQL error: no such function: substring sqlite> SELECT SUBSTRING('foobar.class',-6,6) = '.class'; SQL error: no such function: SUBSTRING sqlite> SELECT SUBSTR('foobar.class',-6,6) = '.class'; 1 sqlite> SELECT substring('foobar.class' FROM -6 FOR 6) = '.class'; SQL error: near "FROM": syntax error Looking at: http://www.oreilly.com/catalog/sqlnut/chapter/ch04.html SQL99 Syntax SUBSTRING(extraction_string FROM starting_position [FOR length] [COLLATE collation_name]) It would be useful for sqlite to support this syntax too to make the SQL more portable. _2006-Dec-28 16:03:03 by anonymous:_ {linebreak} sqlite has the substr() routine (func.c code): { "substr", 3, 0, SQLITE_UTF8, 0, substrFunc }, #ifndef SQLITE_OMIT_UTF16 { "substr", 3, 0, SQLITE_UTF16LE, 0, sqlite3utf16Substr }, #endif
this could be 'aliased' to help you using the substring() SQL99 std just doing: { "substr", 3, 0, SQLITE_UTF8, 0, substrFunc }, { "substring", 3, 0, SQLITE_UTF8, 0, substrFunc }, #ifndef SQLITE_OMIT_UTF16 { "substr", 3, 0, SQLITE_UTF16LE, 0, sqlite3utf16Substr }, { "substring", 3, 0, SQLITE_UTF16LE, 0, sqlite3utf16Substr }, #endif
#f2dcdc 2130 code active 2006 Dec anonymous 2006 Dec 4 4 replace "long long int" type with "sqlite_int64" defined in sqlite3.h As the typedef "typedef long long int sqlite_int64" is available in sqlite3.h. Please ensure that remaining references to "long long" use this typedef (for 32bit compilers): Searching for 'long long int'... sqlite3.h(89): typedef long long int sqlite_int64; sqlite3.h(90): typedef unsigned long long int sqlite_uint64; os_common.h(67):__inline__ unsigned long long int hwtime(void){ os_common.h(68): unsigned long long int x; os_common.h(74):static unsigned long long int g_start; sqliteInt.h(185):** cc '-DUINTPTR_TYPE=long long int' vdbe.c(365):__inline__ unsigned long long int hwtime(void){ vdbe.c(366): unsigned long long int x; 8 occurrence(s) have been found. thanks
#f2dcdc 2125 code new 2006 Dec anonymous 2006 Dec 4 4 SQLite strings/blobs limited to 1GB/2GB due to 'int' in api In several places it is claimed that SQLite doesn't have arbitrary limits - eg http://www.tcl.tk/community/tcl2004/Papers/D.RichardHipp/drh.html However the size of strings and blobs is limited due to using int in the APIs. Since int is 32 bit even on 64 bit platforms, that limits the size of individual strings to 2GB. For strings with a UTF16 database that means 1 billion characters. UTF8 strings are anywhere from 2 billion characters to 250 million worst case. When integrating SQLite into 64 bit code, I have to verify the sizes of all strings and blobs being passed in to ensure they don't overflow the signed 32 bit int. It would be nice if SQLite supported larger objects on 64 bit platforms. There may also be internal problems with the use of int everywhere. For example I could store a 1.5 billion byte UTF8 string and then request it back via the UTF16 api. _2006-Dec-21 23:47:22 by drh:_ {linebreak} On a 64-bit platform "int" is 64-bits. So apparently you can store blobs and strings larger than 2GiB. But SQLite is not designed for this and you would do much better to store such massive objects in a separate file then store the filename in the database. ---- _2006-Dec-22 08:37:46 by anonymous:_ {linebreak} {quote: On a 64-bit platform "int" is 64-bits} This is not true for all platforms. On MS Platforms: "int" is always 32 bit. ---- _2006-Dec-23 11:55:45 by anonymous:_ {linebreak} I don't know of *any* 64 bit platform where int is 64 bits (ie ILP64). Here are the sizes on x64 Linux with gcc: sizeof(int) = 4 sizeof(short) = 2 sizeof(long) = 8 sizeof(long long) = 8 sizeof(void*) = 8 There is a good list of what is supported on which platforms on Wikipedia. http://en.wikipedia.org/wiki/64-bit#64-bit_data_models Note "Most 64 bit compilers today use the LP64 model (including Solaris, AIX, HP, Linux, MacOS native compilers), Microsoft however decided to use the LLP64 model." Note that this means it is impossible to use sizes larger than 2GB with SQLite using int in the API. And while I agree that SQLite isn't the ideal solution for this much data, you do claim no arbitrary limits! Additionally 64 bit code has to ensure that it never gives more than a signed int worth of data. I also suspect that wierd things would happen if too large values are given. The usual way this is solved in other libraries is to use the size_t and ssize_t types which will always be sized appropriately for the platform. You could even define your own size types such as sqlite3_size_t (that is what Python did). ---- _2006-Dec-25 12:25:57 by anonymous:_ {linebreak} Actually I can categorically prove that the current SQLite code is broken and that *bad things will happen*. The front page of the site says "Sizes of strings and BLOBs limited only by available memory." Using a simple example, I looked at the implementation of sqlite3_value_text which takes an int. Lets assume -1 is passed in. That calls bindText which calls sqlite3VdbeMemSetStr. That ultimately executes this code: if( n<0 ){ pMem->n = strlen(z); pMem->n is of integer type, ie signed 32 bits on all current platforms (32 bit and 64 bit). The memory being pointed to could be more than 2GB on a 64 bit platform. strlen's return type is size_t which is 64 bits on 64 bit platforms. Consequently the strlen returned will have the top 32 bits ignore in the code above and SQLite will store a negative number or a number smaller than the actual length of the string. This means that data will be lost (positive truncation) or that wierd things will happen (negative truncation, several assert failures). Note that there is the potential for problems even on 32 bit machines. For example a 1.1GB UTF8 string can be supplied and then be requested in UTF16 which will be 2.2GB and make the ints go negative. I suggest one or more of the following fixes: *: Remove the claim on the front page about only being limited by memory since this isn't true *: Document that strings larger than 500MB (UTF8) should not be used and will result in undefined behaviour (worst case UTF8 to UTF16 expansion could be 4 bytes?) *: Document that blobs larger than 2GB should not be used and will result in undefined behaviour *: Switch to using size_t and ssize_t in the APIs. If using a signed type (eg ssize_t) for the string APIs then the 500MB limit still remains on 32 bit machines.
#f2dcdc 2128 code active 2006 Dec anonymous 2006 Dec 4 3 virtual table code doesn't verify type of rowid (calling xUpdate) The virtual tables code doesn't verify the type of rowid when calling update. For example I used the following query: UPDATE foo SET rowid='a string' WHERE 1 This results in a call to xUpdate with argv[0] equal the current rowid but argv[1] is 'a string'. While I'd be quite happy for rowids to be any SQLite type, the xRowid call only allows 64 bit integers. I believe SQLite should check the new rowid in a case like this is an integer and reject it, rather than calling xUpdate with the bad value. (I also just checked with rowid=3.4 and rowid=NULL and they get passed through as is as well) A workaround is to document that the xUpdate method must check the new rowid is an integer type.
#f2dcdc 2127 code active 2006 Dec anonymous 2006 Dec 2 3 Virtual tables do not always free zErrmsg The documentation for virtual tables and in particular the sqlite3_vtab structure says "The SQLite core will free and zero the content of zErrMsg when it delivers the error message text to the client application or when it destroys the virtual table." The latter part does not appear to be true ("when it destroys the virtual table"). I can't find any code that does actually that. (eg vtab.c:496 definitely doesn't, nor does vtab.c:76) Usually the former case happens. However some operations have their error codes ignored (eg xClose). This can result in the zErrMsg pointing to a message but no error code returned upstream (which would clear the message). Finally as far as I can tell the responsibility for freeing sqlite3_vtab is with the xDisconnect/xDestroy callbacks since the corresponding xCreate/xConnect callbacks allocated it. Consequently there is no way for SQLite to even access zErrmsg since it would be a member of a freed structure after xDisconnect/xDestroy returned.
#f2dcdc 2126 code active 2006 Dec anonymous 2006 Dec 3 1 Update hook not invoked when deleteing all rows from table I was testing the update hook feature of SQLite and incidentally I noticed that hook is not invoked for "DELETE FROM" statement with no WHERE clause. Hook works well for "DELETE FROM ... WHERE ..." statement. Steps to reproduce: 1: Open database, setup update hook 2: Execute:{linebreak}CREATE TABLE Test(Test INTEGER); 3: Insert some data:{linebreak}INSERT INTO TEST (Test) VALUES (1); -- update hook invoked for INSERT{linebreak}INSERT INTO TEST (Test) VALUES (2); -- update hook invoked for INSERT{linebreak}INSERT INTO TEST (Test) VALUES (3); -- update hook invoked for INSERT 4: Execute:{linebreak}DELETE FROM TEST; -- update hook IS NOT INVOKED(!) for each row. _2006-Dec-22 15:38:44 by drh:_ {linebreak} Triggers don't work either. This is a feature not a bug. When you do "DELETE FROM table" with no WHERE clause, SQLite drops and recreates the table. Doing it this way have a huge speed boost. If you really need the update hook to work add a "WHERE 1" to the end of the query. ---- _2006-Dec-22 16:11:11 by anonymous:_ {linebreak} I don't really need it, maybe other users. You should update the documentation of _sqlite3_update_hook()_ and _triggers_ mentioning this behaviour. Another solution is to perform "DELETE FROM" as if it was with WHERE clause, if there is an update hook regitered.
#e8e8bd 2120 new active 2006 Dec anonymous 2006 Dec 5 4 Date Column support 1) date column declared as 'date' or 'datestr' or similar. 2) date real type is text, stored like 'yyyy-mm-dd'. 3) sqlite3_column_type() return SQLITE_DATE 4) min, max function return type SQLITE_DATE 5) (optional) new function: sqlite3_column_date() is same as sqlite3_column_text(); 6) modify return type of sqlite3_value_type() (add SQLITE_DATE) 7) (optional) new function sqlite3_value_date() same as sqlite3_value_text().
#f2dcdc 2112 code active 2006 Dec anonymous 2006 Dec 5 4 code compatibility with C++ I need to compile SQLite with C++ compiler (GCC 3.4.5 from MingW). I also compiled the code as single translation unit, by including all relevant source files into single *.cpp file. To make it working I needed to make several dozens of small fixes. _:Problems I found: *:1. does not have guarding macros *:2. does not have guarding macros *:3. parser.y used symbol"not" which is reserved in C++ and transformed to "!" *:4. pager.c redefines macros TRACE1 ... TRACE5 which are already defined elsewhere. With several files in one TU this clashes. *:5. Inner structures and name lookup. With structures like struct SrcList { ... struct SrcList_item { ...
C++ requires full qualification, e.g. struct SrcList::SrcList_item* p = ... // OK struct SrcList_item* p = ... // ERROR
*:6. Code like char* p = sqlite3Malloc(...)
fails to compile without cast. *:7. There are few dozens of places where a signed value is compared with unsigned one and compiler emits a warning. If some of the points above get eliminated than the porting should be much easier - it was few boring hours of change-try compile-fix cycle for me. _2006-Dec-19 16:42:35 by anonymous:_ {linebreak} this software is written in C with a lemon grammar parser (parser.y), and it´s compiled with GCC compiler suite (GCC, not G++). Also, the source code is not C++ compliant, and isn't structured for a single source file, so avoid reporting tickets like this in the trac, use mailing list to ask things, only report errors here. ---- _2006-Dec-19 17:21:15 by anonymous:_ {linebreak} It's fine to ask for reasonable features like making the code C++ clean. Just use a lower priority (4 or 5), as the original ticket poster did.
#e8e8bd 2110 build active 2006 Dec anonymous 2006 Dec 4 3 Non-optional linking with readline makes sqlite3 binary GPL Currently, the sqlite3 binary is linked with libreadline support if it happens to be available at compile time. This may not always be desirable, because readline is licensed under GPL, and therefore the sqlite3 binary becomes GPL. Solution: There ought to be a configure script parameter --disable-readline (or something similar) to allow creating non-GPL binaries. _2006-Dec-17 16:11:21 by anonymous:_ {linebreak} Another solution would be to use editline instead, which is BSD licensed, from NetBSD project. Here is an autotool- and libtoolized port of it: {link: http://www.thrysoee.dk/editline/ }. It seems to even have a readline.h wrapper. Check the links there for {link: http://cvsweb.netbsd.org/bsdweb.cgi/src/lib/libedit/?sortby=date#dirlist upstream sources} and related projects. ---- _2006-Dec-17 16:34:45 by anonymous:_ {linebreak} This might work: env CFLAGS="-UHAVE_READLINE" ./configure ---- _2006-Dec-17 16:55:55 by anonymous:_ {linebreak} The -UHAVE_READLINE thing does not work because it is hardcoded into Makefile.in: # Compiler options needed for programs that use the readline() library. # READLINE_FLAGS = -DHAVE_READLINE=@TARGET_HAVE_READLINE@ @TARGET_READLINE_INC@ # The library that programs using readline() must link against. # LIBREADLINE = @TARGET_READLINE_LIBS@ ... sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h $(LTLINK) $(READLINE_FLAGS) $(LIBPTHREAD) \ -o $@ $(TOP)/src/shell.c libsqlite3.la \ $(LIBREADLINE) $(TLIBS) You have to manually edit the generated Makefile to remove READLINE_FLAGS and LIBREADLINE.
#f2dcdc 1946 code new 2006 Aug anonymous Unknown 2006 Dec 2 2 .read file fails on blob fields with end-of-file char I've a table with a blob fields. I put there binary data that contains 0x1a (end of file) symbol. It's alright until i try to dump table to file and then trying to import that file. sqlite3 my_db {linebreak}>.output my_file {linebreak}>.dump table_with_blob {linebreak}>.exit {linebreak}del my_db sqlite3 my_db {linebreak}>.read my_file Fails with "Incomplete SQL: ..." SQL break before 0x1a char I'm on windows. Possibly solving with opening file as binary file. Sorry for my English
#e8e8bd 2106 doc active 2006 Dec anonymous 2006 Dec 4 4 FtsOne wiki page neglects to mention Porter stemming http://www.sqlite.org/cvstrac/wiki?p=FtsOne claims that "the module does not perform stemming of any sort" when, in fact, FTS1 (in the 3.3.8 tarball and onwards) appears to fully support Porter stemming, using the same "tokenizer porter" syntax as FTS2.
#e8e8bd 2105 new active 2006 Dec anonymous 2006 Dec 5 5 sqlite3_exec does not return modified rows SQLite does not provide a way to determine wether a statement has succeeded unless it is a select statement. For instance using the following sample database create table urls (url text unique) insert into urls(url) values('http://www.sqlite.org') insert into urls(url) values('http://www.google.com') The following statement will execute the callback from sqlite3_exec which can be used to count the rows (becuase its deemed a query) select * from urls But the following statement will not execute the callback and so there is no way of knowing wether the statement was succesful or not. delete from urls where url='http://news.com' In fact, it is indistinguisable from the following statement... delete from urls where url='http://www.google.com' Even using prepared statements and the sqlite3_step function does not allow the user to determine the success of a statement or count affected rows. There needs to be some way to determine the number of affected rows as a result of a particular statement, not just queries. One solution would be for sqlite3_exec to call the callback once for each affected row, and that the fields and values parameters be left blank for non queries. Of course, a better solution may be to add a new API call which can be used when SQLITE_DONE is returned to check for affected rows. This would allow the API to actually signal success or failure, and more importantly provide a way of counting the affected rows from a particular statement. Most other database API's provide a way to do this. After submitting this bug, i saw other bug reports that were similar with the suggestion that sqlite3_changes can be used to determine the number of affected rows. So i would like to change this bug report to simply be a documentation request. Please add a link to sqlite3_changes and a description in the documentation for sqlite3_exec so that it is easier to find. ---- _2006-Dec-11 01:06:07 by anonymous:_ {linebreak} There is still one case which i cannot resolve even using the sqlite3_changes api call. That is the following statement. select * from urls where url=''; Since this select statement returns no results, the callback is not called, and when i call the sqlite3_changes routine it simply supplies the previous change (since select is not considdered a change). So how then would i determine wether a statement returned no results without knowing in advance wether it was a select? I suggest sqlite3_changes be modified to return 0 when the statement was a select statement (since this also makes sense, there were no changes). This would allow the detection of select statements which return no results. ---- _2006-Dec-11 01:23:07 by anonymous:_ {linebreak} A workaround for this problem is to always manually reset the change count before calling sqlite3_exec db->nChange = 0; Unfortunately this is not available to the user, since the sqlite3 structure is not defined in the header. Perhaps executing a statement which is known to always have 0 changes may achieve the same effect until this can be added/fixed. ---- _2006-Dec-11 19:36:38 by anonymous:_ {linebreak} You should look at the empty_result_callbacks pragma documentation at http://www.sqlite.org/pragma.html You can use set this to 1 to get a callback even when your query returns no results. The other pragma you may be interested in is the count_changes pragma. It can cause insert, delete, and update statements to return a single row with the number of changes when using the sqlite3_exec API.
#e8e8bd 2104 build active 2006 Dec anonymous 2006 Dec 2 3 manual link on Mac OS X fails due to common symbol Attempting a manual link on OS X with fts1: gcc -O -fPIC -dynamiclib -o mylib sqlite-3.3.8/*.o Results in the error: ld: common symbols not allowed with MH_DYLIB output format with the -multi_module option fts1.o definition of common _sqlite3_api (size 16) /usr/bin/libtool: internal link edit command failed This error is described: http://gcc.gnu.org/ml/gcc/2005-06/msg00199.html And a fix: --- /tmp/sqlite-3.3.8/src/sqlite3ext.h 2006-09-23 21:28:30.000000000 +1000 +++ sqlite3ext.h 2006-10-09 19:20:09.000000000 +1000 @@ -276,7 +276,7 @@ #define sqlite3_overload_function sqlite3_api->overload_function #endif /* SQLITE_CORE */ -#define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api; +#define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api = 0; #define SQLITE_EXTENSION_INIT2(v) sqlite3_api = v; #endif /* _SQLITE3EXT_H_ */
#f2dcdc 2100 code active 2006 Dec anonymous 2006 Dec 1 1 Fixes for SQL lower() and upper() As acknowledged in the documentation, the SQL lower() and upper() functions might not work correctly on UTF-8 characters. This bug might show if a country specific locale is used instead of the standard C locale. Under certain circumstances, SQL lower() or upper() can even corrupt the UTF-8 string into invalid UTF-8 if the tolower() and toupper() C functions convert character values starting from 0x80. Below I propose implementations of lowerFunc() and upperFunc() which work correctly with UTF-8 characters, regardless of the implementation of the C library tolower() and toupper() functions. If these C functions are implemented to support high ASCII or even Unicode case conversion, the new SQL lower() and upper() will support them as well. The proposed C implementation applies a technique also found in sqlite3VdbeMemTranslate() in utf.c and makes use of some macros contained in that unit. To avoid duplicating existing code, it could make sense to move lowerFunc() and lowerFunc() to utf.c, just as it has been done with sqlite3utf16Substr(). Finally, here is the code: /* ** Implementation of the upper() and lower() SQL functions. */ static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ const unsigned char *zIn, *zInTerm; unsigned char *z, *zOut; int c, l; if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; zIn = sqlite3_value_text(argv[0]); if( zIn==0 ) return; l = sqlite3_value_bytes(argv[0]); zInTerm = &zIn[l]; /* When converting case, the maximum growth results from ** translating a 1-byte UTF-8 character to a 4-byte UTF-8 character. */ zOut = sqliteMalloc( l * 4 ); z = zOut; while( zIn #ifdef SQLITE_UNICODE_UPPERLOWERFUNCS #define WCHAR_T_SIZE sizeof(wchar_t) #if (WCHAR_T_SIZE == 2) #define MAXUPPERLOWERCHAR_AVAIL 0x0000ffff #else // (WCHAR_T_SIZE == 4) #define MAXUPPERLOWERCHAR_AVAIL 0x7fffffff #endif // (WCHAR_T_SIZE == 2) #define TOLOWERSQLFUNC(c) unicode_tolower #define TOUPPERSQLFUNC(c) unicode_toupper int unicode_tolower(const int c) { wchar_t buff [2]; if (c > MAXUPPERLOWERCHAR_AVAIL) return c; buff[0] = (wchar_t) c; buff[1] = 0; _wcslwr(buff); return (int) buff[0]; } int unicode_toupper(const int c) { wchar_t buff [2]; if (c > MAXUPPERLOWERCHAR_AVAIL) return c; buff[0] = (wchar_t) c; buff[1] = 0; _wcsupr(buff); return (int) buff[0]; } #else // SQLITE_UNICODE_UPPERLOWERFUNCS #define TOLOWERSQLFUNC(c) (c > 255 ? c : tolower(c)) #define TOUPPERSQLFUNC(c) (c > 255 ? c : toupper(c)) #endif // SQLITE_UNICODE_UPPERLOWERFUNCS /* ** Implementation of the upper() and lower() SQL functions. */ static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ const unsigned char *zIn, *zInTerm; unsigned char *z, *zOut; int c, l; if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; zIn = sqlite3_value_text(argv[0]); if( zIn==0 ) return; l = sqlite3_value_bytes(argv[0]); zInTerm = &zIn[l]; /* When converting case, the maximum growth results from ** translating a 1-byte UTF-8 character to a 4-byte UTF-8 character. */ zOut = sqliteMalloc( l * 4 ); z = zOut; while( zIn