1# 2007 April 12
2#
3# The author disclaims copyright to this source code.  In place of
4# a legal notice, here is a blessing:
5#
6#    May you do good and not evil.
7#    May you find forgiveness for yourself and forgive others.
8#    May you share freely, never taking more than you give.
9#
10#***********************************************************************
11# This file implements regression tests for SQLite library.
12# The focus of the tests in this file are to verify that the
13# pager optimizations implemented in version 3.3.14 work.
14#
15# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $
16
17set testdir [file dirname $argv0]
18source $testdir/tester.tcl
19
20ifcapable {!pager_pragmas||secure_delete} {
21  finish_test
22  return
23}
24
25# Run the SQL statement supplied by the argument and return
26# the results.  Prepend four integers to the beginning of the
27# result which are
28#
29#     (1)  The number of page reads from the database
30#     (2)  The number of page writes to the database
31#     (3)  The number of page writes to the journal
32#     (4)  The number of cache pages freed
33#
34proc pagercount_sql {sql {db db}} {
35  global sqlite3_pager_readdb_count
36  global sqlite3_pager_writedb_count
37  global sqlite3_pager_writej_count
38  global sqlite3_pager_pgfree_count
39  set sqlite3_pager_readdb_count 0
40  set sqlite3_pager_writedb_count 0
41  set sqlite3_pager_writej_count 0
42  set r [$db eval $sql]
43  set cnt [list $sqlite3_pager_readdb_count \
44                $sqlite3_pager_writedb_count \
45                $sqlite3_pager_writej_count ]
46  return [concat $cnt $r]
47}
48
49# Setup the test database
50#
51do_test pageropt-1.1 {
52  sqlite3_soft_heap_limit 0
53  execsql {
54    PRAGMA auto_vacuum = OFF;
55    PRAGMA page_size = 1024;
56  }
57  pagercount_sql {
58    CREATE TABLE t1(x);
59  }
60} {0 2 0}
61do_test pageropt-1.2 {
62  pagercount_sql {
63    INSERT INTO t1 VALUES(randomblob(5000));
64  }
65} {0 6 2}
66
67# Verify that values remain in cache on for subsequent reads.
68# We should not have to go back to disk.
69#
70do_test pageropt-1.3 {
71  pagercount_sql {
72    SELECT length(x) FROM t1
73  }
74} {0 0 0 5000}
75
76# If another thread reads the database, the original cache
77# remains valid.
78#
79sqlite3 db2 test.db
80set blobcontent [db2 one {SELECT hex(x) FROM t1}]
81do_test pageropt-1.4 {
82  pagercount_sql {
83    SELECT hex(x) FROM t1
84  }
85} [list 0 0 0 $blobcontent]
86
87# But if the other thread modifies the database, then the cache
88# must refill.
89#
90do_test pageropt-1.5 {
91  db2 eval {CREATE TABLE t2(y)}
92  pagercount_sql {
93    SELECT hex(x) FROM t1
94  }
95} [list 6 0 0 $blobcontent]
96do_test pageropt-1.6 {
97  pagercount_sql {
98    SELECT hex(x) FROM t1
99  }
100} [list 0 0 0 $blobcontent]
101
102# Verify that the last page of an overflow chain is not read from
103# disk when deleting a row.  The one row of t1(x) has four pages
104# of overflow.  So deleting that row from t1 should involve reading
105# the sqlite_master table (1 page) the main page of t1 (1 page) and
106# the three overflow pages of t1 for a total of 5 pages.
107#
108# Pages written are page 1 (for the freelist pointer), the root page
109# of the table, and one of the overflow chain pointers because it
110# becomes the trunk of the freelist.  Total 3.
111#
112do_test pageropt-2.1 {
113  db close
114  sqlite3 db test.db
115  pagercount_sql {
116    DELETE FROM t1 WHERE rowid=1
117  }
118} {5 3 3}
119
120# When pulling pages off of the freelist, there is no reason
121# to actually bring in the old content.
122#
123do_test pageropt-2.2 {
124  db close
125  sqlite3 db test.db
126  pagercount_sql {
127    INSERT INTO t1 VALUES(randomblob(1500));
128  }
129} {3 4 3}
130do_test pageropt-2.3 {
131  pagercount_sql {
132    INSERT INTO t1 VALUES(randomblob(1500));
133  }
134} {0 4 3}
135
136# Note the new optimization that when pulling the very last page off of the
137# freelist we do not read the content of that page.
138#
139do_test pageropt-2.4 {
140  pagercount_sql {
141    INSERT INTO t1 VALUES(randomblob(1500));
142  }
143} {0 5 3}
144
145# Appending a large quantity of data does not involve writing much
146# to the journal file.
147#
148do_test pageropt-3.1 {
149  pagercount_sql {
150    INSERT INTO t2 SELECT * FROM t1;
151  }
152} {1 7 2}
153
154# Once again, we do not need to read the last page of an overflow chain
155# while deleting.
156#
157do_test pageropt-3.2 {
158  pagercount_sql {
159    DROP TABLE t2;
160  }
161} {0 2 3}
162do_test pageropt-3.3 {
163  pagercount_sql {
164    DELETE FROM t1;
165  }
166} {0 3 3}
167
168# There are now 11 pages on the freelist.  Move them all into an
169# overflow chain by inserting a single large record.  Starting from
170# a cold cache, only page 1, the root page of table t1, and the trunk
171# of the freelist need to be read (3 pages).  And only those three
172# pages need to be journalled.  But 13 pages need to be written:
173# page1, the root page of table t1, and an 11 page overflow chain.
174#
175do_test pageropt-4.1 {
176  db close
177  sqlite3 db test.db
178  pagercount_sql {
179    INSERT INTO t1 VALUES(randomblob(11300))
180  }
181} {3 13 3}
182
183# Now we delete that big entries starting from a cold cache and an
184# empty freelist.  The first 10 of the 11 pages overflow chain have
185# to be read, together with page1 and the root of the t1 table.  12
186# reads total.  But only page1, the t1 root, and the trunk of the
187# freelist need to be journalled and written back.
188#
189do_test pageropt-4.2 {
190  db close
191  sqlite3 db test.db
192  pagercount_sql {
193    DELETE FROM t1
194  }
195} {12 3 3}
196
197sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)
198catch {db2 close}
199finish_test
200