summaryrefslogtreecommitdiff
path: root/coverage/sqldata.py
blob: b8efd9362d59c7caca613956e4f2eb4619d0a6e7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

"""Sqlite coverage data."""

# TODO: get sys_info for data class, so we can see sqlite version etc
# TODO: get rid of skip_unless_data_storage_is
# TODO: get rid of "JSON message" and "SQL message" in the tests
# TODO: factor out dataop debugging to a wrapper class?
# TODO: make sure all dataop debugging is in place somehow
# TODO: should writes be batched?
# TODO: run_info

import collections
import glob
import itertools
import os
import sqlite3
import sys

from coverage.backward import get_thread_id, iitems
from coverage.data import filename_suffix
from coverage.debug import NoDebugging, SimpleReprMixin
from coverage.files import PathAliases
from coverage.misc import CoverageException, file_be_gone


SCHEMA_VERSION = 2

SCHEMA = """
-- One row, to record the version of the schema store in this db.
CREATE TABLE coverage_schema (
    version integer
    -- Schema versions:
    -- 1: Released in 5.0a2
    -- 2: Added contexts in 5.0a3.  This is schema 2.
);

-- One row, to record some metadata about the data
CREATE TABLE meta (
    has_lines boolean,      -- Is this data recording lines?
    has_arcs boolean,       -- .. or branches?
    sys_argv text           -- The coverage command line that recorded the data.
);

-- A row per file measured.
CREATE TABLE file (
    id integer primary key,
    path text,
    unique(path)
);

-- A row per context measured.
CREATE TABLE context (
    id integer primary key,
    context text,
    unique(context)
);

-- If recording lines, a row per context per line executed.
CREATE TABLE line (
    file_id integer,            -- foreign key to `file`.
    context_id integer,         -- foreign key to `context`.
    lineno integer,             -- the line number.
    unique(file_id, context_id, lineno)
);

-- If recording branches, a row per context per from/to line transition executed.
CREATE TABLE arc (
    file_id integer,            -- foreign key to `file`.
    context_id integer,         -- foreign key to `context`.
    fromno integer,             -- line number jumped from.
    tono integer,               -- line number jumped to.
    unique(file_id, context_id, fromno, tono)
);

-- A row per file indicating the tracer used for that file.
CREATE TABLE tracer (
    file_id integer primary key,
    tracer text
);
"""


class CoverageSqliteData(SimpleReprMixin):

    def __init__(self, basename=None, suffix=None, warn=None, debug=None):
        self._basename = os.path.abspath(basename or ".coverage")
        self._suffix = suffix
        self._warn = warn
        self._debug = debug or NoDebugging()

        self._choose_filename()
        self._file_map = {}
        # Maps thread ids to SqliteDb objects.
        self._dbs = {}
        self._pid = os.getpid()

        # Are we in sync with the data file?
        self._have_used = False

        self._has_lines = False
        self._has_arcs = False

        self._current_context = None
        self._current_context_id = None
        self._query_contexts = None
        self._query_context_ids = None

    def _choose_filename(self):
        self._filename = self._basename
        suffix = filename_suffix(self._suffix)
        if suffix:
            self._filename += "." + suffix

    def _reset(self):
        if self._dbs:
            for db in self._dbs.values():
                db.close()
        self._dbs = {}
        self._file_map = {}
        self._have_used = False
        self._current_context_id = None

    def _create_db(self):
        if self._debug.should('dataio'):
            self._debug.write("Creating data file {!r}".format(self._filename))
        self._dbs[get_thread_id()] = SqliteDb(self._filename, self._debug)
        with self._dbs[get_thread_id()] as db:
            for stmt in SCHEMA.split(';'):
                stmt = stmt.strip()
                if stmt:
                    db.execute(stmt)
            db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
            db.execute(
                "insert into meta (has_lines, has_arcs, sys_argv) values (?, ?, ?)",
                (self._has_lines, self._has_arcs, str(getattr(sys, 'argv', None)))
            )

    def _open_db(self):
        if self._debug.should('dataio'):
            self._debug.write("Opening data file {!r}".format(self._filename))
        self._dbs[get_thread_id()] = SqliteDb(self._filename, self._debug)
        with self._dbs[get_thread_id()] as db:
            try:
                schema_version, = db.execute("select version from coverage_schema").fetchone()
            except Exception as exc:
                raise CoverageException(
                    "Data file {!r} doesn't seem to be a coverage data file: {}".format(
                        self._filename, exc
                    )
                )
            else:
                if schema_version != SCHEMA_VERSION:
                    raise CoverageException(
                        "Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
                            self._filename, schema_version, SCHEMA_VERSION
                        )
                    )

            for row in db.execute("select has_lines, has_arcs from meta"):
                self._has_lines, self._has_arcs = row

            for path, id in db.execute("select path, id from file"):
                self._file_map[path] = id

    def _connect(self):
        """Get the SqliteDb object to use."""
        if get_thread_id() not in self._dbs:
            if os.path.exists(self._filename):
                self._open_db()
            else:
                self._create_db()
        return self._dbs[get_thread_id()]

    def __nonzero__(self):
        if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)):
            return False
        try:
            with self._connect() as con:
                rows = con.execute("select * from file limit 1")
                return bool(list(rows))
        except CoverageException:
            return False

    __bool__ = __nonzero__

    def dump(self):                                         # pragma: debugging
        """Write a dump of the database."""
        if self._debug:
            with self._connect() as con:
                self._debug.write(con.dump())

    def _file_id(self, filename, add=False):
        """Get the file id for `filename`.

        If filename is not in the database yet, add it if `add` is True.
        If `add` is not True, return None.
        """
        if filename not in self._file_map:
            if add:
                with self._connect() as con:
                    cur = con.execute("insert or replace into file (path) values (?)", (filename,))
                    self._file_map[filename] = cur.lastrowid
        return self._file_map.get(filename)

    def _context_id(self, context):
        """Get the id for a context."""
        assert context is not None
        self._start_using()
        with self._connect() as con:
            row = con.execute("select id from context where context = ?", (context,)).fetchone()
            if row is not None:
                return row[0]
            else:
                return None

    def set_context(self, context):
        """Set the current context for future `add_lines` etc."""
        if self._debug.should('dataop'):
            self._debug.write("Setting context: %r" % (context,))
        self._current_context = context
        self._current_context_id = None

    def _set_context_id(self):
        """Use the _current_context to set _current_context_id."""
        context = self._current_context or ""
        context_id = self._context_id(context)
        if context_id is not None:
            self._current_context_id = context_id
        else:
            with self._connect() as con:
                cur = con.execute("insert into context (context) values (?)", (context,))
                self._current_context_id = cur.lastrowid

    def base_filename(self):
        """The base filename for storing data."""
        return self._basename

    def data_filename(self):
        """Where is the data stored?"""
        return self._filename

    def add_lines(self, line_data):
        """Add measured line data.

        `line_data` is a dictionary mapping file names to dictionaries::

            { filename: { lineno: None, ... }, ...}

        """
        if self._debug.should('dataop'):
            self._debug.write("Adding lines: %d files, %d lines total" % (
                len(line_data), sum(len(lines) for lines in line_data.values())
            ))
        self._start_using()
        self._choose_lines_or_arcs(lines=True)
        self._set_context_id()
        with self._connect() as con:
            for filename, linenos in iitems(line_data):
                file_id = self._file_id(filename, add=True)
                data = [(file_id, self._current_context_id, lineno) for lineno in linenos]
                con.executemany(
                    "insert or ignore into line (file_id, context_id, lineno) values (?, ?, ?)",
                    data,
                )

    def add_arcs(self, arc_data):
        """Add measured arc data.

        `arc_data` is a dictionary mapping file names to dictionaries::

            { filename: { (l1,l2): None, ... }, ...}

        """
        if self._debug.should('dataop'):
            self._debug.write("Adding arcs: %d files, %d arcs total" % (
                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
            ))
        self._start_using()
        self._choose_lines_or_arcs(arcs=True)
        self._set_context_id()
        with self._connect() as con:
            for filename, arcs in iitems(arc_data):
                file_id = self._file_id(filename, add=True)
                data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
                con.executemany(
                    "insert or ignore into arc "
                    "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
                    data,
                )

    def _choose_lines_or_arcs(self, lines=False, arcs=False):
        if lines and self._has_arcs:
            raise CoverageException("Can't add lines to existing arc data")
        if arcs and self._has_lines:
            raise CoverageException("Can't add arcs to existing line data")
        if not self._has_arcs and not self._has_lines:
            self._has_lines = lines
            self._has_arcs = arcs
            with self._connect() as con:
                con.execute("update meta set has_lines = ?, has_arcs = ?", (lines, arcs))

    def add_file_tracers(self, file_tracers):
        """Add per-file plugin information.

        `file_tracers` is { filename: plugin_name, ... }

        """
        self._start_using()
        with self._connect() as con:
            for filename, plugin_name in iitems(file_tracers):
                file_id = self._file_id(filename)
                if file_id is None:
                    raise CoverageException(
                        "Can't add file tracer data for unmeasured file '%s'" % (filename,)
                    )

                existing_plugin = self.file_tracer(filename)
                if existing_plugin:
                    if existing_plugin != plugin_name:
                        raise CoverageException(
                            "Conflicting file tracer name for '%s': %r vs %r" % (
                                filename, existing_plugin, plugin_name,
                            )
                        )
                elif plugin_name:
                    con.execute(
                        "insert into tracer (file_id, tracer) values (?, ?)",
                        (file_id, plugin_name)
                    )

    def touch_file(self, filename, plugin_name=""):
        """Ensure that `filename` appears in the data, empty if needed.

        `plugin_name` is the name of the plugin resposible for this file. It is used
        to associate the right filereporter, etc.
        """
        self._start_using()
        if self._debug.should('dataop'):
            self._debug.write("Touching %r" % (filename,))
        if not self._has_arcs and not self._has_lines:
            raise CoverageException("Can't touch files in an empty CoverageSqliteData")

        self._file_id(filename, add=True)
        if plugin_name:
            # Set the tracer for this file
            self.add_file_tracers({filename: plugin_name})

    def update(self, other_data, aliases=None):
        """Update this data with data from several other `CoverageData` instances.

        If `aliases` is provided, it's a `PathAliases` object that is used to
        re-map paths to match the local machine's.
        """
        if self._has_lines and other_data._has_arcs:
            raise CoverageException("Can't combine arc data with line data")
        if self._has_arcs and other_data._has_lines:
            raise CoverageException("Can't combine line data with arc data")

        aliases = aliases or PathAliases()

        # Force the database we're writing to to exist before we start nesting
        # contexts.
        self._start_using()

        # Collector for all arcs, lines and tracers
        other_data.read()
        with other_data._connect() as conn:
            # Get files data.
            cur = conn.execute('select path from file')
            files = {path: aliases.map(path) for (path,) in cur}
            cur.close()

            # Get contexts data.
            cur = conn.execute('select context from context')
            contexts = [context for (context,) in cur]
            cur.close()

            # Get arc data.
            cur = conn.execute(
                'select file.path, context.context, arc.fromno, arc.tono '
                'from arc '
                'inner join file on file.id = arc.file_id '
                'inner join context on context.id = arc.context_id'
            )
            arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
            cur.close()

            # Get line data.
            cur = conn.execute(
                'select file.path, context.context, line.lineno '
                'from line '
                'inner join file on file.id = line.file_id '
                'inner join context on context.id = line.context_id'
            )
            lines = [(files[path], context, lineno) for (path, context, lineno) in cur]
            cur.close()

            # Get tracer data.
            cur = conn.execute(
                'select file.path, tracer '
                'from tracer '
                'inner join file on file.id = tracer.file_id'
            )
            tracers = {files[path]: tracer for (path, tracer) in cur}
            cur.close()

        with self._connect() as conn:
            conn.isolation_level = 'IMMEDIATE'

            # Get all tracers in the DB. Files not in the tracers are assumed
            # to have an empty string tracer. Since Sqlite does not support
            # full outer joins, we have to make two queries to fill the
            # dictionary.
            this_tracers = {path: '' for path, in conn.execute('select path from file')}
            this_tracers.update({
                aliases.map(path): tracer
                for path, tracer in conn.execute(
                    'select file.path, tracer from tracer '
                    'inner join file on file.id = tracer.file_id'
                )
            })

            # Create all file and context rows in the DB.
            conn.executemany(
                'insert or ignore into file (path) values (?)',
                ((file,) for file in files.values())
            )
            file_ids = {
                path: id
                for id, path in conn.execute('select id, path from file')
            }
            conn.executemany(
                'insert or ignore into context (context) values (?)',
                ((context,) for context in contexts)
            )
            context_ids = {
                context: id
                for id, context in conn.execute('select id, context from context')
            }

            # Prepare tracers and fail, if a conflict is found.
            # tracer_paths is used to ensure consistency over the tracer data
            # and tracer_map tracks the tracers to be inserted.
            tracer_map = {}
            for path in files.values():
                this_tracer = this_tracers.get(path)
                other_tracer = tracers.get(path, '')
                # If there is no tracer, there is always the None tracer.
                if this_tracer is not None and this_tracer != other_tracer:
                    raise CoverageException(
                        "Conflicting file tracer name for '%s': %r vs %r" % (
                            path, this_tracer, other_tracer
                        )
                    )
                tracer_map[path] = other_tracer

            # Prepare arc and line rows to be inserted by converting the file
            # and context strings with integer ids. Then use the efficient
            # `executemany()` to insert all rows at once.
            arc_rows = (
                (file_ids[file], context_ids[context], fromno, tono)
                for file, context, fromno, tono in arcs
            )
            line_rows = (
                (file_ids[file], context_ids[context], lineno)
                for file, context, lineno in lines
            )

            self._choose_lines_or_arcs(arcs=bool(arcs), lines=bool(lines))

            conn.executemany(
                'insert or ignore into arc '
                '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)',
                arc_rows
            )
            conn.executemany(
                'insert or ignore into line '
                '(file_id, context_id, lineno) values (?, ?, ?)',
                line_rows
            )
            conn.executemany(
                'insert or ignore into tracer (file_id, tracer) values (?, ?)',
                ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
            )

        # Update all internal cache data.
        self._reset()
        self.read()

    def erase(self, parallel=False):
        """Erase the data in this object.

        If `parallel` is true, then also deletes data files created from the
        basename by parallel-mode.

        """
        self._reset()
        if self._debug.should('dataio'):
            self._debug.write("Erasing data file {!r}".format(self._filename))
        file_be_gone(self._filename)
        if parallel:
            data_dir, local = os.path.split(self._filename)
            localdot = local + '.*'
            pattern = os.path.join(os.path.abspath(data_dir), localdot)
            for filename in glob.glob(pattern):
                if self._debug.should('dataio'):
                    self._debug.write("Erasing parallel data file {!r}".format(filename))
                file_be_gone(filename)

    def read(self):
        with self._connect():       # TODO: doesn't look right
            self._have_used = True

    def write(self):
        """Write the collected coverage data to a file."""
        pass

    def _start_using(self):
        if self._pid != os.getpid():
            # Looks like we forked! Have to start a new data file.
            self._reset()
            self._choose_filename()
            self._pid = os.getpid()
        if not self._have_used:
            self.erase()
        self._have_used = True

    def has_arcs(self):
        return bool(self._has_arcs)

    def measured_files(self):
        """A set of all files that had been measured."""
        return set(self._file_map)

    def measured_contexts(self):
        """A set of all contexts that have been measured."""
        self._start_using()
        with self._connect() as con:
            contexts = set(row[0] for row in con.execute("select distinct(context) from context"))
        return contexts

    def file_tracer(self, filename):
        """Get the plugin name of the file tracer for a file.

        Returns the name of the plugin that handles this file.  If the file was
        measured, but didn't use a plugin, then "" is returned.  If the file
        was not measured, then None is returned.

        """
        self._start_using()
        with self._connect() as con:
            file_id = self._file_id(filename)
            if file_id is None:
                return None
            row = con.execute("select tracer from tracer where file_id = ?", (file_id,)).fetchone()
            if row is not None:
                return row[0] or ""
            return ""   # File was measured, but no tracer associated.


    def set_query_contexts(self, contexts):
        """Set query contexts for future `lines`, `arcs` etc. calls."""
        if contexts:
            self._query_context_ids = self._get_query_context_ids(contexts)
        else:
            self._query_context_ids = None
        self._query_contexts = contexts

    def _get_query_context_ids(self, contexts=None):
        if contexts is not None:
            if not contexts:
                return None
            self._start_using()
            with self._connect() as con:
                context_clause = ' or '.join(['context glob ?'] * len(contexts))
                cur = con.execute("select id from context where " + context_clause, contexts)
                return [row[0] for row in cur.fetchall()]
        elif self._query_contexts:
            return self._query_context_ids
        return None

    def lines(self, filename, contexts=None):
        self._start_using()
        if self.has_arcs():
            arcs = self.arcs(filename, contexts=contexts)
            if arcs is not None:
                all_lines = itertools.chain.from_iterable(arcs)
                return list(set(l for l in all_lines if l > 0))

        with self._connect() as con:
            file_id = self._file_id(filename)
            if file_id is None:
                return None
            else:
                query = "select distinct lineno from line where file_id = ?"
                data = [file_id]
                context_ids = self._get_query_context_ids(contexts)
                if context_ids is not None:
                    ids_array = ', '.join('?'*len(context_ids))
                    query += " and context_id in (" + ids_array + ")"
                    data += context_ids
                linenos = con.execute(query, data)
                return [lineno for lineno, in linenos]

    def arcs(self, filename, contexts=None):
        self._start_using()
        with self._connect() as con:
            file_id = self._file_id(filename)
            if file_id is None:
                return None
            else:
                query = "select distinct fromno, tono from arc where file_id = ?"
                data = [file_id]
                context_ids = self._get_query_context_ids(contexts)
                if context_ids is not None:
                    ids_array = ', '.join('?'*len(context_ids))
                    query += " and context_id in (" + ids_array + ")"
                    data += context_ids
                arcs = con.execute(query, data)
                return list(arcs)

    def contexts_by_lineno(self, filename):
        lineno_contexts_map = collections.defaultdict(list)
        self._start_using()
        with self._connect() as con:
            file_id = self._file_id(filename)
            if file_id is None:
                return lineno_contexts_map
            if self.has_arcs():
                query = (
                    "select arc.fromno, arc.tono, context.context "
                    "from arc, context "
                    "where arc.file_id = ? and arc.context_id = context.id"
                )
                data = [file_id]
                context_ids = self._get_query_context_ids()
                if context_ids is not None:
                    ids_array = ', '.join('?'*len(context_ids))
                    query += " and arc.context_id in (" + ids_array + ")"
                    data += context_ids
                for fromno, tono, context in con.execute(query, data):
                    if context not in lineno_contexts_map[fromno]:
                        lineno_contexts_map[fromno].append(context)
                    if context not in lineno_contexts_map[tono]:
                        lineno_contexts_map[tono].append(context)
            else:
                query = (
                    "select line.lineno, context.context "
                    "from line, context "
                    "where line.file_id = ? and line.context_id = context.id"
                )
                data = [file_id]
                context_ids = self._get_query_context_ids()
                if context_ids is not None:
                    ids_array = ', '.join('?'*len(context_ids))
                    query += " and line.context_id in (" + ids_array + ")"
                    data += context_ids
                for lineno, context in con.execute(query, data):
                    if context not in lineno_contexts_map[lineno]:
                        lineno_contexts_map[lineno].append(context)
        return lineno_contexts_map

    def run_infos(self):
        return []   # TODO


class SqliteDb(SimpleReprMixin):
    def __init__(self, filename, debug):
        self.debug = debug if debug.should('sql') else None
        self.filename = filename
        self.nest = 0

    def connect(self):
        # SQLite on Windows on py2 won't open a file if the filename argument
        # has non-ascii characters in it.  Opening a relative file name avoids
        # a problem if the current directory has non-ascii.
        filename = os.path.relpath(self.filename)
        # It can happen that Python switches threads while the tracer writes
        # data. The second thread will also try to write to the data,
        # effectively causing a nested context. However, given the indempotent
        # nature of the tracer operations, sharing a conenction among threads
        # is not a problem.
        if self.debug:
            self.debug.write("Connecting to {!r}".format(self.filename))
        self.con = sqlite3.connect(filename, check_same_thread=False)

        # This pragma makes writing faster. It disables rollbacks, but we never need them.
        # PyPy needs the .close() calls here, or sqlite gets twisted up:
        # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
        self.execute("pragma journal_mode=off").close()
        # This pragma makes writing faster.
        self.execute("pragma synchronous=off").close()

    def close(self):
        self.con.close()

    def __enter__(self):
        if self.nest == 0:
            self.connect()
            self.con.__enter__()
        self.nest += 1
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.nest -= 1
        if self.nest == 0:
            self.con.__exit__(exc_type, exc_value, traceback)
            self.close()

    def execute(self, sql, parameters=()):
        if self.debug:
            tail = " with {!r}".format(parameters) if parameters else ""
            self.debug.write("Executing {!r}{}".format(sql, tail))
        try:
            return self.con.execute(sql, parameters)
        except sqlite3.Error as exc:
            raise CoverageException("Couldn't use data file {!r}: {}".format(self.filename, exc))

    def executemany(self, sql, data):
        if self.debug:
            self.debug.write("Executing many {!r} with {} rows".format(sql, len(data)))
        return self.con.executemany(sql, data)

    def dump(self):                                         # pragma: debugging
        """Return a multi-line string, the dump of the database."""
        return "\n".join(self.con.iterdump())