summaryrefslogtreecommitdiff
path: root/lib/DBD
diff options
context:
space:
mode:
Diffstat (limited to 'lib/DBD')
-rw-r--r--lib/DBD/DBM.pm1461
-rw-r--r--lib/DBD/ExampleP.pm428
-rw-r--r--lib/DBD/File.pm1637
-rw-r--r--lib/DBD/File/Developers.pod556
-rw-r--r--lib/DBD/File/HowTo.pod270
-rw-r--r--lib/DBD/File/Roadmap.pod176
-rw-r--r--lib/DBD/Gofer.pm1292
-rw-r--r--lib/DBD/Gofer/Policy/Base.pm162
-rw-r--r--lib/DBD/Gofer/Policy/classic.pm79
-rw-r--r--lib/DBD/Gofer/Policy/pedantic.pm53
-rw-r--r--lib/DBD/Gofer/Policy/rush.pm90
-rw-r--r--lib/DBD/Gofer/Transport/Base.pm410
-rw-r--r--lib/DBD/Gofer/Transport/corostream.pm144
-rw-r--r--lib/DBD/Gofer/Transport/null.pm111
-rw-r--r--lib/DBD/Gofer/Transport/pipeone.pm253
-rw-r--r--lib/DBD/Gofer/Transport/stream.pm292
-rw-r--r--lib/DBD/NullP.pm166
-rw-r--r--lib/DBD/Proxy.pm997
-rw-r--r--lib/DBD/Sponge.pm305
19 files changed, 8882 insertions, 0 deletions
diff --git a/lib/DBD/DBM.pm b/lib/DBD/DBM.pm
new file mode 100644
index 0000000..3c621a3
--- /dev/null
+++ b/lib/DBD/DBM.pm
@@ -0,0 +1,1461 @@
+#######################################################################
+#
+# DBD::DBM - a DBI driver for DBM files
+#
+# Copyright (c) 2004 by Jeff Zucker < jzucker AT cpan.org >
+# Copyright (c) 2010 by Jens Rehsack & H.Merijn Brand
+#
+# All rights reserved.
+#
+# You may freely distribute and/or modify this module under the terms
+# of either the GNU General Public License (GPL) or the Artistic License,
+# as specified in the Perl README file.
+#
+# USERS - see the pod at the bottom of this file
+#
+# DBD AUTHORS - see the comments in the code
+#
+#######################################################################
+require 5.008;
+use strict;
+
+#################
+package DBD::DBM;
+#################
+use base qw( DBD::File );
+use vars qw($VERSION $ATTRIBUTION $drh $methods_already_installed);
+$VERSION = '0.06';
+$ATTRIBUTION = 'DBD::DBM by Jens Rehsack';
+
+# no need to have driver() unless you need private methods
+#
+sub driver ($;$)
+{
+ my ( $class, $attr ) = @_;
+ return $drh if ($drh);
+
+ # do the real work in DBD::File
+ #
+ $attr->{Attribution} = 'DBD::DBM by Jens Rehsack';
+ $drh = $class->SUPER::driver($attr);
+
+ # install private methods
+ #
+ # this requires that dbm_ (or foo_) be a registered prefix
+ # but you can write private methods before official registration
+ # by hacking the $dbd_prefix_registry in a private copy of DBI.pm
+ #
+ unless ( $methods_already_installed++ )
+ {
+ DBD::DBM::st->install_method('dbm_schema');
+ }
+
+ return $drh;
+}
+
+sub CLONE
+{
+ undef $drh;
+}
+
+#####################
+package DBD::DBM::dr;
+#####################
+$DBD::DBM::dr::imp_data_size = 0;
+@DBD::DBM::dr::ISA = qw(DBD::File::dr);
+
+# you could put some :dr private methods here
+
+# you may need to over-ride some DBD::File::dr methods here
+# but you can probably get away with just letting it do the work
+# in most cases
+
+#####################
+package DBD::DBM::db;
+#####################
+$DBD::DBM::db::imp_data_size = 0;
+@DBD::DBM::db::ISA = qw(DBD::File::db);
+
+sub validate_STORE_attr
+{
+ my ( $dbh, $attrib, $value ) = @_;
+
+ if ( $attrib eq "dbm_ext" or $attrib eq "dbm_lockfile" )
+ {
+ ( my $newattrib = $attrib ) =~ s/^dbm_/f_/g;
+ # carp "Attribute '$attrib' is depreciated, use '$newattrib' instead" if( $^W );
+ $attrib = $newattrib;
+ }
+
+ return $dbh->SUPER::validate_STORE_attr( $attrib, $value );
+}
+
+sub validate_FETCH_attr
+{
+ my ( $dbh, $attrib ) = @_;
+
+ if ( $attrib eq "dbm_ext" or $attrib eq "dbm_lockfile" )
+ {
+ ( my $newattrib = $attrib ) =~ s/^dbm_/f_/g;
+ # carp "Attribute '$attrib' is depreciated, use '$newattrib' instead" if( $^W );
+ $attrib = $newattrib;
+ }
+
+ return $dbh->SUPER::validate_FETCH_attr($attrib);
+}
+
+sub set_versions
+{
+ my $this = $_[0];
+ $this->{dbm_version} = $DBD::DBM::VERSION;
+ return $this->SUPER::set_versions();
+}
+
+sub init_valid_attributes
+{
+ my $dbh = shift;
+
+ # define valid private attributes
+ #
+ # attempts to set non-valid attrs in connect() or
+ # with $dbh->{attr} will throw errors
+ #
+ # the attrs here *must* start with dbm_ or foo_
+ #
+ # see the STORE methods below for how to check these attrs
+ #
+ $dbh->{dbm_valid_attrs} = {
+ dbm_type => 1, # the global DBM type e.g. SDBM_File
+ dbm_mldbm => 1, # the global MLDBM serializer
+ dbm_cols => 1, # the global column names
+ dbm_version => 1, # verbose DBD::DBM version
+ dbm_store_metadata => 1, # column names, etc.
+ dbm_berkeley_flags => 1, # for BerkeleyDB
+ dbm_valid_attrs => 1, # DBD::DBM::db valid attrs
+ dbm_readonly_attrs => 1, # DBD::DBM::db r/o attrs
+ dbm_meta => 1, # DBD::DBM public access for f_meta
+ dbm_tables => 1, # DBD::DBM public access for f_meta
+ };
+ $dbh->{dbm_readonly_attrs} = {
+ dbm_version => 1, # verbose DBD::DBM version
+ dbm_valid_attrs => 1, # DBD::DBM::db valid attrs
+ dbm_readonly_attrs => 1, # DBD::DBM::db r/o attrs
+ dbm_meta => 1, # DBD::DBM public access for f_meta
+ };
+
+ $dbh->{dbm_meta} = "dbm_tables";
+
+ return $dbh->SUPER::init_valid_attributes();
+}
+
+sub init_default_attributes
+{
+ my ( $dbh, $phase ) = @_;
+
+ $dbh->SUPER::init_default_attributes($phase);
+ $dbh->{f_lockfile} = '.lck';
+
+ return $dbh;
+}
+
+sub get_dbm_versions
+{
+ my ( $dbh, $table ) = @_;
+ $table ||= '';
+
+ my $meta;
+ my $class = $dbh->{ImplementorClass};
+ $class =~ s/::db$/::Table/;
+ $table and ( undef, $meta ) = $class->get_table_meta( $dbh, $table, 1 );
+ $meta or ( $meta = {} and $class->bootstrap_table_meta( $dbh, $meta, $table ) );
+
+ my $dver;
+ my $dtype = $meta->{dbm_type};
+ eval {
+ $dver = $meta->{dbm_type}->VERSION();
+
+ # *) when we're still alive here, everthing went ok - no need to check for $@
+ $dtype .= " ($dver)";
+ };
+ if ( $meta->{dbm_mldbm} )
+ {
+ $dtype .= ' + MLDBM';
+ eval {
+ $dver = MLDBM->VERSION();
+ $dtype .= " ($dver)"; # (*)
+ };
+ eval {
+ my $ser_class = "MLDBM::Serializer::" . $meta->{dbm_mldbm};
+ my $ser_mod = $ser_class;
+ $ser_mod =~ s|::|/|g;
+ $ser_mod .= ".pm";
+ require $ser_mod;
+ $dver = $ser_class->VERSION();
+ $dtype .= ' + ' . $ser_class; # (*)
+ $dver and $dtype .= " ($dver)"; # (*)
+ };
+ }
+ return sprintf( "%s using %s", $dbh->{dbm_version}, $dtype );
+}
+
+# you may need to over-ride some DBD::File::db methods here
+# but you can probably get away with just letting it do the work
+# in most cases
+
+#####################
+package DBD::DBM::st;
+#####################
+$DBD::DBM::st::imp_data_size = 0;
+@DBD::DBM::st::ISA = qw(DBD::File::st);
+
+sub FETCH
+{
+ my ( $sth, $attr ) = @_;
+
+ if ( $attr eq "NULLABLE" )
+ {
+ my @colnames = $sth->sql_get_colnames();
+
+ # XXX only BerkeleyDB fails having NULL values for non-MLDBM databases,
+ # none accept it for key - but it requires more knowledge between
+ # queries and tables storage to return fully correct information
+ $attr eq "NULLABLE" and return [ map { 0 } @colnames ];
+ }
+
+ return $sth->SUPER::FETCH($attr);
+} # FETCH
+
+sub dbm_schema
+{
+ my ( $sth, $tname ) = @_;
+ return $sth->set_err( $DBI::stderr, 'No table name supplied!' ) unless $tname;
+ return $sth->set_err( $DBI::stderr, "Unknown table '$tname'!" )
+ unless ( $sth->{Database}->{f_meta}
+ and $sth->{Database}->{f_meta}->{$tname} );
+ return $sth->{Database}->{f_meta}->{$tname}->{schema};
+}
+# you could put some :st private methods here
+
+# you may need to over-ride some DBD::File::st methods here
+# but you can probably get away with just letting it do the work
+# in most cases
+
+############################
+package DBD::DBM::Statement;
+############################
+
+@DBD::DBM::Statement::ISA = qw(DBD::File::Statement);
+
+########################
+package DBD::DBM::Table;
+########################
+use Carp;
+use Fcntl;
+
+@DBD::DBM::Table::ISA = qw(DBD::File::Table);
+
+my $dirfext = $^O eq 'VMS' ? '.sdbm_dir' : '.dir';
+
+sub file2table
+{
+ my ( $self, $meta, $file, $file_is_table, $quoted ) = @_;
+
+ my $tbl = $self->SUPER::file2table( $meta, $file, $file_is_table, $quoted ) or return;
+
+ $meta->{f_dontopen} = 1;
+
+ return $tbl;
+}
+
+my %reset_on_modify = (
+ dbm_type => "dbm_tietype",
+ dbm_mldbm => "dbm_tietype",
+ );
+__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+
+my %compat_map = (
+ ( map { $_ => "dbm_$_" } qw(type mldbm store_metadata) ),
+ dbm_ext => 'f_ext',
+ dbm_file => 'f_file',
+ dbm_lockfile => ' f_lockfile',
+ );
+__PACKAGE__->register_compat_map (\%compat_map);
+
+sub bootstrap_table_meta
+{
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ $meta->{dbm_type} ||= $dbh->{dbm_type} || 'SDBM_File';
+ $meta->{dbm_mldbm} ||= $dbh->{dbm_mldbm} if ( $dbh->{dbm_mldbm} );
+ $meta->{dbm_berkeley_flags} ||= $dbh->{dbm_berkeley_flags};
+
+ defined $meta->{f_ext}
+ or $meta->{f_ext} = $dbh->{f_ext};
+ unless ( defined( $meta->{f_ext} ) )
+ {
+ my $ext;
+ if ( $meta->{dbm_type} eq 'SDBM_File' or $meta->{dbm_type} eq 'ODBM_File' )
+ {
+ $ext = '.pag/r';
+ }
+ elsif ( $meta->{dbm_type} eq 'NDBM_File' )
+ {
+ # XXX NDBM_File on FreeBSD (and elsewhere?) may actually be Berkeley
+ # behind the scenes and so create a single .db file.
+ if ( $^O =~ /bsd/i or lc($^O) eq 'darwin' )
+ {
+ $ext = '.db/r';
+ }
+ elsif ( $^O eq 'SunOS' or $^O eq 'Solaris' or $^O eq 'AIX' )
+ {
+ $ext = '.pag/r'; # here it's implemented like dbm - just a bit improved
+ }
+ # else wrapped GDBM
+ }
+ defined($ext) and $meta->{f_ext} = $ext;
+ }
+
+ $self->SUPER::bootstrap_table_meta( $dbh, $meta, $table );
+}
+
+sub init_table_meta
+{
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ unless ( defined( $meta->{dbm_tietype} ) )
+ {
+ my $tie_type = $meta->{dbm_type};
+ $INC{"$tie_type.pm"} or require "$tie_type.pm";
+ $tie_type eq 'BerkeleyDB' and $tie_type = 'BerkeleyDB::Hash';
+
+ if ( $meta->{dbm_mldbm} )
+ {
+ $INC{"MLDBM.pm"} or require "MLDBM.pm";
+ $meta->{dbm_usedb} = $tie_type;
+ $tie_type = 'MLDBM';
+ }
+
+ $meta->{dbm_tietype} = $tie_type;
+ }
+
+ unless ( defined( $meta->{dbm_store_metadata} ) )
+ {
+ my $store = $dbh->{dbm_store_metadata};
+ defined($store) or $store = 1;
+ $meta->{dbm_store_metadata} = $store;
+ }
+
+ unless ( defined( $meta->{col_names} ) )
+ {
+ defined( $dbh->{dbm_cols} ) and $meta->{col_names} = $dbh->{dbm_cols};
+ }
+
+ $self->SUPER::init_table_meta( $dbh, $meta, $table );
+}
+
+sub open_file
+{
+ my ( $self, $meta, $attrs, $flags ) = @_;
+ $self->SUPER::open_file( $meta, $attrs, $flags );
+ unless ( $flags->{dropMode} )
+ {
+ # TIEING
+ #
+ # XXX allow users to pass in a pre-created tied object
+ #
+ my @tie_args;
+ if ( $meta->{dbm_type} eq 'BerkeleyDB' )
+ {
+ my $DB_CREATE = BerkeleyDB::DB_CREATE();
+ my $DB_RDONLY = BerkeleyDB::DB_RDONLY();
+ my %tie_flags;
+ if ( my $f = $meta->{dbm_berkeley_flags} )
+ {
+ defined( $f->{DB_CREATE} ) and $DB_CREATE = delete $f->{DB_CREATE};
+ defined( $f->{DB_RDONLY} ) and $DB_RDONLY = delete $f->{DB_RDONLY};
+ %tie_flags = %$f;
+ }
+ my $open_mode = $flags->{lockMode} || $flags->{createMode} ? $DB_CREATE : $DB_RDONLY;
+ @tie_args = (
+ -Filename => $meta->{f_fqbn},
+ -Flags => $open_mode,
+ %tie_flags
+ );
+ }
+ else
+ {
+ my $open_mode = O_RDONLY;
+ $flags->{lockMode} and $open_mode = O_RDWR;
+ $flags->{createMode} and $open_mode = O_RDWR | O_CREAT | O_TRUNC;
+
+ @tie_args = ( $meta->{f_fqbn}, $open_mode, 0666 );
+ }
+
+ if ( $meta->{dbm_mldbm} )
+ {
+ $MLDBM::UseDB = $meta->{dbm_usedb};
+ $MLDBM::Serializer = $meta->{dbm_mldbm};
+ }
+
+ $meta->{hash} = {};
+ my $tie_class = $meta->{dbm_tietype};
+ eval { tie %{ $meta->{hash} }, $tie_class, @tie_args };
+ $@ and croak "Cannot tie(\%h $tie_class @tie_args): $@";
+ -f $meta->{f_fqfn} or croak( "No such file: '" . $meta->{f_fqfn} . "'" );
+ }
+
+ unless ( $flags->{createMode} )
+ {
+ my ( $meta_data, $schema, $col_names );
+ if ( $meta->{dbm_store_metadata} )
+ {
+ $meta_data = $col_names = $meta->{hash}->{"_metadata \0"};
+ if ( $meta_data and $meta_data =~ m~<dbd_metadata>(.+)</dbd_metadata>~is )
+ {
+ $schema = $col_names = $1;
+ $schema =~ s~.*<schema>(.+)</schema>.*~$1~is;
+ $col_names =~ s~.*<col_names>(.+)</col_names>.*~$1~is;
+ }
+ }
+ $col_names ||= $meta->{col_names} || [ 'k', 'v' ];
+ $col_names = [ split /,/, $col_names ] if ( ref $col_names ne 'ARRAY' );
+ if ( $meta->{dbm_store_metadata} and not $meta->{hash}->{"_metadata \0"} )
+ {
+ $schema or $schema = '';
+ $meta->{hash}->{"_metadata \0"} =
+ "<dbd_metadata>"
+ . "<schema>$schema</schema>"
+ . "<col_names>"
+ . join( ",", @{$col_names} )
+ . "</col_names>"
+ . "</dbd_metadata>";
+ }
+
+ $meta->{schema} = $schema;
+ $meta->{col_names} = $col_names;
+ }
+}
+
+# you must define drop
+# it is called from execute of a SQL DROP statement
+#
+sub drop ($$)
+{
+ my ( $self, $data ) = @_;
+ my $meta = $self->{meta};
+ $meta->{hash} and untie %{ $meta->{hash} };
+ $self->SUPER::drop($data);
+ # XXX extra_files
+ -f $meta->{f_fqbn} . $dirfext
+ and $meta->{f_ext} eq '.pag/r'
+ and unlink( $meta->{f_fqbn} . $dirfext );
+ return 1;
+}
+
+# you must define fetch_row, it is called on all fetches;
+# it MUST return undef when no rows are left to fetch;
+# checking for $ary[0] is specific to hashes so you'll
+# probably need some other kind of check for nothing-left.
+# as Janis might say: "undef's just another word for
+# nothing left to fetch" :-)
+#
+sub fetch_row ($$)
+{
+ my ( $self, $data ) = @_;
+ my $meta = $self->{meta};
+ # fetch with %each
+ #
+ my @ary = each %{ $meta->{hash} };
+ $meta->{dbm_store_metadata}
+ and $ary[0]
+ and $ary[0] eq "_metadata \0"
+ and @ary = each %{ $meta->{hash} };
+
+ my ( $key, $val ) = @ary;
+ unless ($key)
+ {
+ delete $self->{row};
+ return;
+ }
+ my @row = ( ref($val) eq 'ARRAY' ) ? ( $key, @$val ) : ( $key, $val );
+ $self->{row} = @row ? \@row : undef;
+ return wantarray ? @row : \@row;
+}
+
+# you must define push_row except insert_new_row and update_specific_row is defined
+# it is called on inserts and updates as primitive
+#
+sub insert_new_row ($$$)
+{
+ my ( $self, $data, $row_aryref ) = @_;
+ my $meta = $self->{meta};
+ my $ncols = scalar( @{ $meta->{col_names} } );
+ my $nitems = scalar( @{$row_aryref} );
+ $ncols == $nitems
+ or croak "You tried to insert $nitems, but table is created with $ncols columns";
+
+ my $key = shift @$row_aryref;
+ my $exists;
+ eval { $exists = exists( $meta->{hash}->{$key} ); };
+ $exists and croak "Row with PK '$key' already exists";
+
+ $meta->{hash}->{$key} = $meta->{dbm_mldbm} ? $row_aryref : $row_aryref->[0];
+
+ return 1;
+}
+
+# this is where you grab the column names from a CREATE statement
+# if you don't need to do that, it must be defined but can be empty
+#
+sub push_names ($$$)
+{
+ my ( $self, $data, $row_aryref ) = @_;
+ my $meta = $self->{meta};
+
+ # some sanity checks ...
+ my $ncols = scalar(@$row_aryref);
+ $ncols < 2 and croak "At least 2 columns are required for DBD::DBM tables ...";
+ !$meta->{dbm_mldbm}
+ and $ncols > 2
+ and croak "Without serializing with MLDBM only 2 columns are supported, you give $ncols";
+ $meta->{col_names} = $row_aryref;
+ return unless $meta->{dbm_store_metadata};
+
+ my $stmt = $data->{sql_stmt};
+ my $col_names = join( ',', @{$row_aryref} );
+ my $schema = $data->{Database}->{Statement};
+ $schema =~ s/^[^\(]+\((.+)\)$/$1/s;
+ $schema = $stmt->schema_str() if ( $stmt->can('schema_str') );
+ $meta->{hash}->{"_metadata \0"} =
+ "<dbd_metadata>"
+ . "<schema>$schema</schema>"
+ . "<col_names>$col_names</col_names>"
+ . "</dbd_metadata>";
+}
+
+# fetch_one_row, delete_one_row, update_one_row
+# are optimized for hash-style lookup without looping;
+# if you don't need them, omit them, they're optional
+# but, in that case you may need to define
+# truncate() and seek(), see below
+#
+sub fetch_one_row ($$;$)
+{
+ my ( $self, $key_only, $key ) = @_;
+ my $meta = $self->{meta};
+ $key_only and return $meta->{col_names}->[0];
+ exists $meta->{hash}->{$key} or return;
+ my $val = $meta->{hash}->{$key};
+ $val = ( ref($val) eq 'ARRAY' ) ? $val : [$val];
+ my $row = [ $key, @$val ];
+ return wantarray ? @{$row} : $row;
+}
+
+sub delete_one_row ($$$)
+{
+ my ( $self, $data, $aryref ) = @_;
+ my $meta = $self->{meta};
+ delete $meta->{hash}->{ $aryref->[0] };
+}
+
+sub update_one_row ($$$)
+{
+ my ( $self, $data, $aryref ) = @_;
+ my $meta = $self->{meta};
+ my $key = shift @$aryref;
+ defined $key or return;
+ my $row = ( ref($aryref) eq 'ARRAY' ) ? $aryref : [$aryref];
+ $meta->{hash}->{$key} = $meta->{dbm_mldbm} ? $row : $row->[0];
+}
+
+sub update_specific_row ($$$$)
+{
+ my ( $self, $data, $aryref, $origary ) = @_;
+ my $meta = $self->{meta};
+ my $key = shift @$origary;
+ my $newkey = shift @$aryref;
+ return unless ( defined $key );
+ $key eq $newkey or delete $meta->{hash}->{$key};
+ my $row = ( ref($aryref) eq 'ARRAY' ) ? $aryref : [$aryref];
+ $meta->{hash}->{$newkey} = $meta->{dbm_mldbm} ? $row : $row->[0];
+}
+
+# you may not need to explicitly DESTROY the ::Table
+# put cleanup code to run when the execute is done
+#
+sub DESTROY ($)
+{
+ my $self = shift;
+ my $meta = $self->{meta};
+ $meta->{hash} and untie %{ $meta->{hash} };
+
+ $self->SUPER::DESTROY();
+}
+
+# truncate() and seek() must be defined to satisfy DBI::SQL::Nano
+# *IF* you define the *_one_row methods above, truncate() and
+# seek() can be empty or you can use them without actually
+# truncating or seeking anything but if you don't define the
+# *_one_row methods, you may need to define these
+
+# if you need to do something after a series of
+# deletes or updates, you can put it in truncate()
+# which is called at the end of executing
+#
+sub truncate ($$)
+{
+ # my ( $self, $data ) = @_;
+ return 1;
+}
+
+# seek() is only needed if you use IO::File
+# though it could be used for other non-file operations
+# that you need to do before "writes" or truncate()
+#
+sub seek ($$$$)
+{
+ # my ( $self, $data, $pos, $whence ) = @_;
+ return 1;
+}
+
+# Th, th, th, that's all folks! See DBD::File and DBD::CSV for other
+# examples of creating pure perl DBDs. I hope this helped.
+# Now it's time to go forth and create your own DBD!
+# Remember to check in with dbi-dev@perl.org before you get too far.
+# We may be able to make suggestions or point you to other related
+# projects.
+
+1;
+__END__
+
+=pod
+
+=head1 NAME
+
+DBD::DBM - a DBI driver for DBM & MLDBM files
+
+=head1 SYNOPSIS
+
+ use DBI;
+ $dbh = DBI->connect('dbi:DBM:'); # defaults to SDBM_File
+ $dbh = DBI->connect('DBI:DBM(RaiseError=1):'); # defaults to SDBM_File
+ $dbh = DBI->connect('dbi:DBM:dbm_type=DB_File'); # defaults to DB_File
+ $dbh = DBI->connect('dbi:DBM:dbm_mldbm=Storable'); # MLDBM with SDBM_File
+
+ # or
+ $dbh = DBI->connect('dbi:DBM:', undef, undef);
+ $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ f_ext => '.db/r',
+ f_dir => '/path/to/dbfiles/',
+ f_lockfile => '.lck',
+ dbm_type => 'BerkeleyDB',
+ dbm_mldbm => 'FreezeThaw',
+ dbm_store_metadata => 1,
+ dbm_berkeley_flags => {
+ '-Cachesize' => 1000, # set a ::Hash flag
+ },
+ });
+
+and other variations on connect() as shown in the L<DBI> docs,
+L<DBD::File/Metadata|DBD::File metadata> and L</Metadata>
+shown below.
+
+Use standard DBI prepare, execute, fetch, placeholders, etc.,
+see L<QUICK START> for an example.
+
+=head1 DESCRIPTION
+
+DBD::DBM is a database management system that works right out of the
+box. If you have a standard installation of Perl and DBI you can
+begin creating, accessing, and modifying simple database tables
+without any further modules. You can add other modules (e.g.,
+SQL::Statement, DB_File etc) for improved functionality.
+
+The module uses a DBM file storage layer. DBM file storage is common on
+many platforms and files can be created with it in many programming
+languages using different APIs. That means, in addition to creating
+files with DBI/SQL, you can also use DBI/SQL to access and modify files
+created by other DBM modules and programs and vice versa. B<Note> that
+in those cases it might be necessary to use a common subset of the
+provided features.
+
+DBM files are stored in binary format optimized for quick retrieval
+when using a key field. That optimization can be used advantageously
+to make DBD::DBM SQL operations that use key fields very fast. There
+are several different "flavors" of DBM which use different storage
+formats supported by perl modules such as SDBM_File and MLDBM. This
+module supports all of the flavors that perl supports and, when used
+with MLDBM, supports tables with any number of columns and insertion
+of Perl objects into tables.
+
+DBD::DBM has been tested with the following DBM types: SDBM_File,
+NDBM_File, ODBM_File, GDBM_File, DB_File, BerkeleyDB. Each type was
+tested both with and without MLDBM and with the Data::Dumper,
+Storable, FreezeThaw, YAML and JSON serializers using the DBI::SQL::Nano
+or the SQL::Statement engines.
+
+=head1 QUICK START
+
+DBD::DBM operates like all other DBD drivers - it's basic syntax and
+operation is specified by DBI. If you're not familiar with DBI, you should
+start by reading L<DBI> and the documents it points to and then come back
+and read this file. If you are familiar with DBI, you already know most of
+what you need to know to operate this module. Just jump in and create a
+test script something like the one shown below.
+
+You should be aware that there are several options for the SQL engine
+underlying DBD::DBM, see L<Supported SQL syntax>. There are also many
+options for DBM support, see especially the section on L<Adding
+multi-column support with MLDBM>.
+
+But here's a sample to get you started.
+
+ use DBI;
+ my $dbh = DBI->connect('dbi:DBM:');
+ $dbh->{RaiseError} = 1;
+ for my $sql( split /;\n+/,"
+ CREATE TABLE user ( user_name TEXT, phone TEXT );
+ INSERT INTO user VALUES ('Fred Bloggs','233-7777');
+ INSERT INTO user VALUES ('Sanjay Patel','777-3333');
+ INSERT INTO user VALUES ('Junk','xxx-xxxx');
+ DELETE FROM user WHERE user_name = 'Junk';
+ UPDATE user SET phone = '999-4444' WHERE user_name = 'Sanjay Patel';
+ SELECT * FROM user
+ "){
+ my $sth = $dbh->prepare($sql);
+ $sth->execute;
+ $sth->dump_results if $sth->{NUM_OF_FIELDS};
+ }
+ $dbh->disconnect;
+
+=head1 USAGE
+
+This section will explain some useage cases in more detail. To get an
+overview about the available attributes, see L</Metadata>.
+
+=head2 Specifying Files and Directories
+
+DBD::DBM will automatically supply an appropriate file extension for the
+type of DBM you are using. For example, if you use SDBM_File, a table
+called "fruit" will be stored in two files called "fruit.pag" and
+"fruit.dir". You should B<never> specify the file extensions in your SQL
+statements.
+
+DBD::DBM recognizes following default extensions for following types:
+
+=over 4
+
+=item .pag/r
+
+Chosen for dbm_type C<< SDBM_File >>, C<< ODBM_File >> and C<< NDBM_File >>
+when an implementation is detected which wraps C<< -ldbm >> for
+C<< NDBM_File >> (e.g. Solaris, AIX, ...).
+
+For those types, the C<< .dir >> extension is recognized, too (for being
+deleted when dropping a table).
+
+=item .db/r
+
+Chosen for dbm_type C<< NDBM_File >> when an implementation is detected
+which wraps BerkeleyDB 1.x for C<< NDBM_File >> (typically BSD's, Darwin).
+
+=back
+
+C<< GDBM_File >>, C<< DB_File >> and C<< BerkeleyDB >> don't usually
+use a file extension.
+
+If your DBM type uses an extension other than one of the recognized
+types of extensions, you should set the I<f_ext> attribute to the
+extension B<and> file a bug report as described in DBI with the name
+of the implementation and extension so we can add it to DBD::DBM.
+Thanks in advance for that :-).
+
+ $dbh = DBI->connect('dbi:DBM:f_ext=.db'); # .db extension is used
+ $dbh = DBI->connect('dbi:DBM:f_ext='); # no extension is used
+
+ # or
+ $dbh->{f_ext}='.db'; # global setting
+ $dbh->{f_meta}->{'qux'}->{f_ext}='.db'; # setting for table 'qux'
+
+By default files are assumed to be in the current working directory.
+To use other directories specify the I<f_dir> attribute in either the
+connect string or by setting the database handle attribute.
+
+For example, this will look for the file /foo/bar/fruit (or
+/foo/bar/fruit.pag for DBM types that use that extension)
+
+ my $dbh = DBI->connect('dbi:DBM:f_dir=/foo/bar');
+ # and this will too:
+ my $dbh = DBI->connect('dbi:DBM:');
+ $dbh->{f_dir} = '/foo/bar';
+ # but this is recommended
+ my $dbh = DBI->connect('dbi:DBM:', undef, undef, { f_dir => '/foo/bar' } );
+
+ # now you can do
+ my $ary = $dbh->selectall_arrayref(q{ SELECT x FROM fruit });
+
+You can also use delimited identifiers to specify paths directly in SQL
+statements. This looks in the same place as the two examples above but
+without setting I<f_dir>:
+
+ my $dbh = DBI->connect('dbi:DBM:');
+ my $ary = $dbh->selectall_arrayref(q{
+ SELECT x FROM "/foo/bar/fruit"
+ });
+
+You can also tell DBD::DBM to use a specified path for a specific table:
+
+ $dbh->{dbm_tables}->{f}->{file} = q(/foo/bar/fruit);
+
+Please be aware that you cannot specify this during connection.
+
+If you have SQL::Statement installed, you can use table aliases:
+
+ my $dbh = DBI->connect('dbi:DBM:');
+ my $ary = $dbh->selectall_arrayref(q{
+ SELECT f.x FROM "/foo/bar/fruit" AS f
+ });
+
+See the L<GOTCHAS AND WARNINGS> for using DROP on tables.
+
+=head2 Table locking and flock()
+
+Table locking is accomplished using a lockfile which has the same
+basename as the table's file but with the file extension '.lck' (or a
+lockfile extension that you supply, see below). This lock file is
+created with the table during a CREATE and removed during a DROP.
+Every time the table itself is opened, the lockfile is flocked(). For
+SELECT, this is a shared lock. For all other operations, it is an
+exclusive lock (except when you specify something different using the
+I<f_lock> attribute).
+
+Since the locking depends on flock(), it only works on operating
+systems that support flock(). In cases where flock() is not
+implemented, DBD::DBM will simply behave as if the flock() had
+occurred although no actual locking will happen. Read the
+documentation for flock() for more information.
+
+Even on those systems that do support flock(), locking is only
+advisory - as is always the case with flock(). This means that if
+another program tries to access the table file while DBD::DBM has the
+table locked, that other program will *succeed* at opening unless
+it is also using flock on the '.lck' file. As a result DBD::DBM's
+locking only really applies to other programs using DBD::DBM or other
+program written to cooperate with DBD::DBM locking.
+
+=head2 Specifying the DBM type
+
+Each "flavor" of DBM stores its files in a different format and has
+different capabilities and limitations. See L<AnyDBM_File> for a
+comparison of DBM types.
+
+By default, DBD::DBM uses the C<< SDBM_File >> type of storage since
+C<< SDBM_File >> comes with Perl itself. If you have other types of
+DBM storage available, you can use any of them with DBD::DBM. It is
+strongly recommended to use at least C<< DB_File >>, because C<<
+SDBM_File >> has quirks and limitations and C<< ODBM_file >>, C<<
+NDBM_File >> and C<< GDBM_File >> are not always available.
+
+You can specify the DBM type using the I<dbm_type> attribute which can
+be set in the connection string or with C<< $dbh->{dbm_type} >> and
+C<< $dbh->{f_meta}->{$table_name}->{type} >> for per-table settings in
+cases where a single script is accessing more than one kind of DBM
+file.
+
+In the connection string, just set C<< dbm_type=TYPENAME >> where
+C<< TYPENAME >> is any DBM type such as GDBM_File, DB_File, etc. Do I<not>
+use MLDBM as your I<dbm_type> as that is set differently, see below.
+
+ my $dbh=DBI->connect('dbi:DBM:'); # uses the default SDBM_File
+ my $dbh=DBI->connect('dbi:DBM:dbm_type=GDBM_File'); # uses the GDBM_File
+
+ # You can also use $dbh->{dbm_type} to set the DBM type for the connection:
+ $dbh->{dbm_type} = 'DB_File'; # set the global DBM type
+ print $dbh->{dbm_type}; # display the global DBM type
+
+If you have several tables in your script that use different DBM
+types, you can use the $dbh->{dbm_tables} hash to store different
+settings for the various tables. You can even use this to perform
+joins on files that have completely different storage mechanisms.
+
+ # sets global default of GDBM_File
+ my $dbh->('dbi:DBM:type=GDBM_File');
+
+ # overrides the global setting, but only for the tables called
+ # I<foo> and I<bar>
+ my $dbh->{f_meta}->{foo}->{dbm_type} = 'DB_File';
+ my $dbh->{f_meta}->{bar}->{dbm_type} = 'BerkeleyDB';
+
+ # prints the dbm_type for the table "foo"
+ print $dbh->{f_meta}->{foo}->{dbm_type};
+
+B<Note> that you must change the I<dbm_type> of a table before you access
+it for first time.
+
+=head2 Adding multi-column support with MLDBM
+
+Most of the DBM types only support two columns and even if it would
+support more, DBD::DBM would only use two. However a CPAN module
+called MLDBM overcomes this limitation by allowing more than two
+columns. MLDBM does this by serializing the data - basically it puts
+a reference to an array into the second column. It can also put almost
+any kind of Perl object or even B<Perl coderefs> into columns.
+
+If you want more than two columns, you B<must> install MLDBM. It's available
+for many platforms and is easy to install.
+
+MLDBM is by default distributed with three serializers - Data::Dumper,
+Storable, and FreezeThaw. Data::Dumper is the default and Storable is the
+fastest. MLDBM can also make use of user-defined serialization methods or
+other serialization modules (e.g. L<YAML::MLDBM> or
+L<MLDBM::Serializer::JSON>. You select the serializer using the
+I<dbm_mldbm> attribute.
+
+Some examples:
+
+ $dbh=DBI->connect('dbi:DBM:dbm_mldbm=Storable'); # use MLDBM with Storable
+ $dbh=DBI->connect(
+ 'dbi:DBM:dbm_mldbm=MySerializer' # use MLDBM with a user defined module
+ );
+ $dbh=DBI->connect('dbi::dbm:', undef,
+ undef, { dbm_mldbm => 'YAML' }); # use 3rd party serializer
+ $dbh->{dbm_mldbm} = 'YAML'; # same as above
+ print $dbh->{dbm_mldbm} # show the MLDBM serializer
+ $dbh->{f_meta}->{foo}->{dbm_mldbm}='Data::Dumper'; # set Data::Dumper for table "foo"
+ print $dbh->{f_meta}->{foo}->{mldbm}; # show serializer for table "foo"
+
+MLDBM works on top of other DBM modules so you can also set a DBM type
+along with setting dbm_mldbm. The examples above would default to using
+SDBM_File with MLDBM. If you wanted GDBM_File instead, here's how:
+
+ # uses DB_File with MLDBM and Storable
+ $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ dbm_type => 'DB_File',
+ dbm_mldbm => 'Storable',
+ });
+
+SDBM_File, the default I<dbm_type> is quite limited, so if you are going to
+use MLDBM, you should probably use a different type, see L<AnyDBM_File>.
+
+See below for some L<GOTCHAS AND WARNINGS> about MLDBM.
+
+=head2 Support for Berkeley DB
+
+The Berkeley DB storage type is supported through two different Perl
+modules - DB_File (which supports only features in old versions of Berkeley
+DB) and BerkeleyDB (which supports all versions). DBD::DBM supports
+specifying either "DB_File" or "BerkeleyDB" as a I<dbm_type>, with or
+without MLDBM support.
+
+The "BerkeleyDB" dbm_type is experimental and it's interface is likely to
+change. It currently defaults to BerkeleyDB::Hash and does not currently
+support ::Btree or ::Recno.
+
+With BerkeleyDB, you can specify initialization flags by setting them in
+your script like this:
+
+ use BerkeleyDB;
+ my $env = new BerkeleyDB::Env -Home => $dir; # and/or other Env flags
+ $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ dbm_type => 'BerkeleyDB',
+ dbm_mldbm => 'Storable',
+ dbm_berkeley_flags => {
+ 'DB_CREATE' => DB_CREATE, # pass in constants
+ 'DB_RDONLY' => DB_RDONLY, # pass in constants
+ '-Cachesize' => 1000, # set a ::Hash flag
+ '-Env' => $env, # pass in an environment
+ },
+ });
+
+Do I<not> set the -Flags or -Filename flags as those are determined and
+overwritten by the SQL (e.g. -Flags => DB_RDONLY is set automatically
+when you issue a SELECT statement).
+
+Time has not permitted us to provide support in this release of DBD::DBM
+for further Berkeley DB features such as transactions, concurrency,
+locking, etc. We will be working on these in the future and would value
+suggestions, patches, etc.
+
+See L<DB_File> and L<BerkeleyDB> for further details.
+
+=head2 Optimizing the use of key fields
+
+Most "flavors" of DBM have only two physical columns (but can contain
+multiple logical columns as explained above in
+L<Adding multi-column support with MLDBM>). They work similarly to a
+Perl hash with the first column serving as the key. Like a Perl hash, DBM
+files permit you to do quick lookups by specifying the key and thus avoid
+looping through all records (supported by DBI::SQL::Nano only). Also like
+a Perl hash, the keys must be unique. It is impossible to create two
+records with the same key. To put this more simply and in SQL terms,
+the key column functions as the I<PRIMARY KEY> or UNIQUE INDEX.
+
+In DBD::DBM, you can take advantage of the speed of keyed lookups by using
+DBI::SQL::Nano and a WHERE clause with a single equal comparison on the key
+field. For example, the following SQL statements are optimized for keyed
+lookup:
+
+ CREATE TABLE user ( user_name TEXT, phone TEXT);
+ INSERT INTO user VALUES ('Fred Bloggs','233-7777');
+ # ... many more inserts
+ SELECT phone FROM user WHERE user_name='Fred Bloggs';
+
+The "user_name" column is the key column since it is the first
+column. The SELECT statement uses the key column in a single equal
+comparison - "user_name='Fred Bloggs'" - so the search will find it
+very quickly without having to loop through all the names which were
+inserted into the table.
+
+In contrast, these searches on the same table are not optimized:
+
+ 1. SELECT phone FROM user WHERE user_name < 'Fred';
+ 2. SELECT user_name FROM user WHERE phone = '233-7777';
+
+In #1, the operation uses a less-than (<) comparison rather than an equals
+comparison, so it will not be optimized for key searching. In #2, the key
+field "user_name" is not specified in the WHERE clause, and therefore the
+search will need to loop through all rows to find the requested row(s).
+
+B<Note> that the underlying DBM storage needs to loop over all I<key/value>
+pairs when the optimized fetch is used. SQL::Statement has a massively
+improved where clause evaluation which costs around 15% of the evaluation
+in DBI::SQL::Nano - combined with the loop in the DBM storage the speed
+improvement isn't so impressive.
+
+Even if lookups are faster by around 50%, DBI::SQL::Nano and
+SQL::Statement can benefit from the key field optimizations on
+updating and deleting rows - and here the improved where clause
+evaluation of SQL::Statement might beat DBI::SQL::Nano every time the
+where clause contains not only the key field (or more than one).
+
+=head2 Supported SQL syntax
+
+DBD::DBM uses a subset of SQL. The robustness of that subset depends on
+what other modules you have installed. Both options support basic SQL
+operations including CREATE TABLE, DROP TABLE, INSERT, DELETE, UPDATE, and
+SELECT.
+
+B<Option #1:> By default, this module inherits its SQL support from
+DBI::SQL::Nano that comes with DBI. Nano is, as its name implies, a *very*
+small SQL engine. Although limited in scope, it is faster than option #2
+for some operations (especially single I<primary key> lookups). See
+L<DBI::SQL::Nano> for a description of the SQL it supports and comparisons
+of it with option #2.
+
+B<Option #2:> If you install the pure Perl CPAN module SQL::Statement,
+DBD::DBM will use it instead of Nano. This adds support for table aliases,
+functions, joins, and much more. If you're going to use DBD::DBM
+for anything other than very simple tables and queries, you should install
+SQL::Statement. You don't have to change DBD::DBM or your scripts in any
+way, simply installing SQL::Statement will give you the more robust SQL
+capabilities without breaking scripts written for DBI::SQL::Nano. See
+L<SQL::Statement> for a description of the SQL it supports.
+
+To find out which SQL module is working in a given script, you can use the
+dbm_versions() method or, if you don't need the full output and version
+numbers, just do this:
+
+ print $dbh->{sql_handler}, "\n";
+
+That will print out either "SQL::Statement" or "DBI::SQL::Nano".
+
+Baring the section about optimized access to the DBM storage in mind,
+comparing the benefits of both engines:
+
+ # DBI::SQL::Nano is faster
+ $sth = $dbh->prepare( "update foo set value='new' where key=15" );
+ $sth->execute();
+ $sth = $dbh->prepare( "delete from foo where key=27" );
+ $sth->execute();
+ $sth = $dbh->prepare( "select * from foo where key='abc'" );
+
+ # SQL::Statement might faster (depending on DB size)
+ $sth = $dbh->prepare( "update foo set value='new' where key=?" );
+ $sth->execute(15);
+ $sth = $dbh->prepare( "update foo set value=? where key=15" );
+ $sth->execute('new');
+ $sth = $dbh->prepare( "delete from foo where key=?" );
+ $sth->execute(27);
+
+ # SQL::Statement is faster
+ $sth = $dbh->prepare( "update foo set value='new' where value='old'" );
+ $sth->execute();
+ # must be expressed using "where key = 15 or key = 27 or key = 42 or key = 'abc'"
+ # in DBI::SQL::Nano
+ $sth = $dbh->prepare( "delete from foo where key in (15,27,42,'abc')" );
+ $sth->execute();
+ # must be expressed using "where key > 10 and key < 90" in DBI::SQL::Nano
+ $sth = $dbh->prepare( "select * from foo where key between (10,90)" );
+ $sth->execute();
+
+ # only SQL::Statement can handle
+ $sth->prepare( "select * from foo,bar where foo.name = bar.name" );
+ $sth->execute();
+ $sth->prepare( "insert into foo values ( 1, 'foo' ), ( 2, 'bar' )" );
+ $sth->execute();
+
+=head2 Specifying Column Names
+
+DBM files don't have a standard way to store column names. DBD::DBM gets
+around this issue with a DBD::DBM specific way of storing the column names.
+B<If you are working only with DBD::DBM and not using files created by or
+accessed with other DBM programs, you can ignore this section.>
+
+DBD::DBM stores column names as a row in the file with the key I<_metadata
+\0>. So this code
+
+ my $dbh = DBI->connect('dbi:DBM:');
+ $dbh->do("CREATE TABLE baz (foo CHAR(10), bar INTEGER)");
+ $dbh->do("INSERT INTO baz (foo,bar) VALUES ('zippy',1)");
+
+Will create a file that has a structure something like this:
+
+ _metadata \0 | <dbd_metadata><schema></schema><col_names>foo,bar</col_names></dbd_metadata>
+ zippy | 1
+
+The next time you access this table with DBD::DBM, it will treat the
+I<_metadata \0> row as a header rather than as data and will pull the column
+names from there. However, if you access the file with something other
+than DBD::DBM, the row will be treated as a regular data row.
+
+If you do not want the column names stored as a data row in the table you
+can set the I<dbm_store_metadata> attribute to 0.
+
+ my $dbh = DBI->connect('dbi:DBM:', undef, undef, { dbm_store_metadata => 0 });
+
+ # or
+ $dbh->{dbm_store_metadata} = 0;
+
+ # or for per-table setting
+ $dbh->{f_meta}->{qux}->{dbm_store_metadata} = 0;
+
+By default, DBD::DBM assumes that you have two columns named "k" and "v"
+(short for "key" and "value"). So if you have I<dbm_store_metadata> set to
+1 and you want to use alternate column names, you need to specify the
+column names like this:
+
+ my $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ dbm_store_metadata => 0,
+ dbm_cols => [ qw(foo bar) ],
+ });
+
+ # or
+ $dbh->{dbm_store_metadata} = 0;
+ $dbh->{dbm_cols} = 'foo,bar';
+
+ # or to set the column names on per-table basis, do this:
+ # sets the column names only for table "qux"
+ $dbh->{f_meta}->{qux}->{dbm_store_metadata} = 0;
+ $dbh->{f_meta}->{qux}->{col_names} = [qw(foo bar)];
+
+If you have a file that was created by another DBM program or created with
+I<dbm_store_metadata> set to zero and you want to convert it to using
+DBD::DBM's column name storage, just use one of the methods above to name
+the columns but *without* specifying I<dbm_store_metadata> as zero. You
+only have to do that once - thereafter you can get by without setting
+either I<dbm_store_metadata> or setting I<dbm_cols> because the names will
+be stored in the file.
+
+=head1 DBI database handle attributes
+
+=head2 Metadata
+
+=head3 Statement handle ($sth) attributes and methods
+
+Most statement handle attributes such as NAME, NUM_OF_FIELDS, etc. are
+available only after an execute. The same is true of $sth->rows which is
+available after the execute but does I<not> require a fetch.
+
+=head3 Driver handle ($dbh) attributes
+
+It is not supported anymore to use dbm-attributes without the dbm_-prefix.
+Currently, if an DBD::DBM private attribute is accessed without an
+underscore in it's name, dbm_ is prepended to that attribute and it's
+processed further. If the resulting attribute name is invalid, an error is
+thrown.
+
+=head4 dbm_cols
+
+Contains a comma separated list of column names or an array reference to
+the column names.
+
+=head4 dbm_type
+
+Contains the DBM storage type. Currently known supported type are
+C<< ODBM_File >>, C<< NDBM_File >>, C<< SDBM_File >>, C<< GDBM_File >>,
+C<< DB_File >> and C<< BerkeleyDB >>. It is not recommended to use one
+of the first three types - even if C<< SDBM_File >> is the most commonly
+available I<dbm_type>.
+
+=head4 dbm_mldbm
+
+Contains the serializer for DBM storage (value column). Requires the
+CPAN module L<MLDBM> installed. Currently known supported serializers
+are:
+
+=over 8
+
+=item Data::Dumper
+
+Default serializer. Deployed with Perl core.
+
+=item Storable
+
+Faster serializer. Deployed with Perl core.
+
+=item FreezeThaw
+
+Pure Perl serializer, requires L<FreezeThaw> to be installed.
+
+=item YAML
+
+Portable serializer (between languages but not architectures).
+Requires L<YAML::MLDBM> installation.
+
+=item JSON
+
+Portable, fast serializer (between languages but not architectures).
+Requires L<MLDBM::Serializer::JSON> installation.
+
+=back
+
+=head4 dbm_store_metadata
+
+Boolean value which determines if the metadata in DBM is stored or not.
+
+=head4 dbm_berkeley_flags
+
+Hash reference with additional flags for BerkeleyDB::Hash instantiation.
+
+=head4 dbm_version
+
+Readonly attribute containing the version of DBD::DBM.
+
+=head4 f_meta
+
+In addition to the attributes L<DBD::File> recognizes, DBD::DBM knows
+about the (public) attributes C<col_names> (B<Note> not I<dbm_cols>
+here!), C<dbm_type>, C<dbm_mldbm>, C<dbm_store_metadata> and
+C<dbm_berkeley_flags>. As in DBD::File, there are undocumented,
+internal attributes in DBD::DBM. Be very careful when modifying
+attributes you do not know; the consequence might a destroyed or
+corrupted table.
+
+=head4 dbm_tables
+
+This attribute provides restricted access to the table meta data. See
+L<f_meta> and L<DBD::File/f_meta> for attribute details.
+
+dbm_tables is a tied hash providing the internal table names as keys
+(accessing unknown tables might create an entry) and their meta
+data as another tied hash. The table meta storage is obtained via
+the C<get_table_meta> method from the table implementation (see
+L<DBD::File::Developers>). Attribute setting and getting within the
+table meta data is handled via the methods C<set_table_meta_attr> and
+C<get_table_meta_attr>.
+
+=head3 Following attributes are no longer handled by DBD::DBM:
+
+=head4 dbm_ext
+
+This attribute is silently mapped to DBD::File's attribute I<f_ext>.
+Later versions of DBI might show a depreciated warning when this attribute
+is used and eventually it will be removed.
+
+=head4 dbm_lockfile
+
+This attribute is silently mapped to DBD::File's attribute I<f_lockfile>.
+Later versions of DBI might show a depreciated warning when this attribute
+is used and eventually it will be removed.
+
+=head1 DBI database handle methods
+
+=head2 The $dbh->dbm_versions() method
+
+The private method dbm_versions() returns a summary of what other modules
+are being used at any given time. DBD::DBM can work with or without many
+other modules - it can use either SQL::Statement or DBI::SQL::Nano as its
+SQL engine, it can be run with DBI or DBI::PurePerl, it can use many kinds
+of DBM modules, and many kinds of serializers when run with MLDBM. The
+dbm_versions() method reports all of that and more.
+
+ print $dbh->dbm_versions; # displays global settings
+ print $dbh->dbm_versions($table_name); # displays per table settings
+
+An important thing to note about this method is that when it called
+with no arguments, it displays the *global* settings. If you override
+these by setting per-table attributes, these will I<not> be shown
+unless you specify a table name as an argument to the method call.
+
+=head2 Storing Objects
+
+If you are using MLDBM, you can use DBD::DBM to take advantage of its
+serializing abilities to serialize any Perl object that MLDBM can handle.
+To store objects in columns, you should (but don't absolutely need to)
+declare it as a column of type BLOB (the type is *currently* ignored by
+the SQL engine, but it's good form).
+
+=head1 EXTENSIBILITY
+
+=over 8
+
+=item C<SQL::Statement>
+
+Improved SQL engine compared to the built-in DBI::SQL::Nano - see
+L<Supported SQL syntax>.
+
+=item C<DB_File>
+
+Berkeley DB version 1. This database library is available on many
+systems without additional installation and most systems are
+supported.
+
+=item C<GDBM_File>
+
+Simple dbm type (comparable to C<DB_File>) under the GNU license.
+Typically not available (or requires extra installation) on non-GNU
+operating systems.
+
+=item C<BerkeleyDB>
+
+Berkeley DB version up to v4 (and maybe higher) - requires additional
+installation but is easier than GDBM_File on non-GNU systems.
+
+db4 comes with a many tools which allow repairing and migrating
+databases. This is the B<recommended> dbm type for production use.
+
+=item C<MLDBM>
+
+Serializer wrapper to support more than one column for the files.
+Comes with serializers using C<Data::Dumper>, C<FreezeThaw> and
+C<Storable>.
+
+=item C<YAML::MLDBM>
+
+Additional serializer for MLDBM. YAML is very portable between languanges.
+
+=item C<MLDBM::Serializer::JSON>
+
+Additional serializer for MLDBM. JSON is very portable between languanges,
+probably more than YAML.
+
+=back
+
+=head1 GOTCHAS AND WARNINGS
+
+Using the SQL DROP command will remove any file that has the name specified
+in the command with either '.pag' and '.dir', '.db' or your {f_ext} appended
+to it. So this be dangerous if you aren't sure what file it refers to:
+
+ $dbh->do(qq{DROP TABLE "/path/to/any/file"});
+
+Each DBM type has limitations. SDBM_File, for example, can only store
+values of less than 1,000 characters. *You* as the script author must
+ensure that you don't exceed those bounds. If you try to insert a value
+that is larger than DBM can store, the results will be unpredictable.
+See the documentation for whatever DBM you are using for details.
+
+Different DBM implementations return records in different orders.
+That means that you I<should not> rely on the order of records unless
+you use an ORDER BY statement.
+
+DBM data files are platform-specific. To move them from one platform to
+another, you'll need to do something along the lines of dumping your data
+to CSV on platform #1 and then dumping from CSV to DBM on platform #2.
+DBD::AnyData and DBD::CSV can help with that. There may also be DBM
+conversion tools for your platforms which would probably be quicker.
+
+When using MLDBM, there is a very powerful serializer - it will allow
+you to store Perl code or objects in database columns. When these get
+de-serialized, they may be eval'ed - in other words MLDBM (or actually
+Data::Dumper when used by MLDBM) may take the values and try to
+execute them in Perl. Obviously, this can present dangers, so if you
+do not know what is in a file, be careful before you access it with
+MLDBM turned on!
+
+See the entire section on L<Table locking and flock()> for gotchas and
+warnings about the use of flock().
+
+=head1 BUGS AND LIMITATIONS
+
+This module uses hash interfaces of two column file databases. While
+none of supported SQL engines have support for indices, the following
+statements really do the same (even if they mean something completely
+different) for each dbm type which lacks C<EXISTS> support:
+
+ $sth->do( "insert into foo values (1, 'hello')" );
+
+ # this statement does ...
+ $sth->do( "update foo set v='world' where k=1" );
+ # ... the same as this statement
+ $sth->do( "insert into foo values (1, 'world')" );
+
+This is considered to be a bug and might change in a future release.
+
+Known affected dbm types are C<ODBM_File> and C<NDBM_File>. We highly
+recommended you use a more modern dbm type such as C<DB_File>.
+
+=head1 GETTING HELP, MAKING SUGGESTIONS, AND REPORTING BUGS
+
+If you need help installing or using DBD::DBM, please write to the DBI
+users mailing list at dbi-users@perl.org or to the
+comp.lang.perl.modules newsgroup on usenet. I cannot always answer
+every question quickly but there are many on the mailing list or in
+the newsgroup who can.
+
+DBD developers for DBD's which rely on DBD::File or DBD::DBM or use
+one of them as an example are suggested to join the DBI developers
+mailing list at dbi-dev@perl.org and strongly encouraged to join our
+IRC channel at L<irc://irc.perl.org/dbi>.
+
+If you have suggestions, ideas for improvements, or bugs to report, please
+report a bug as described in DBI. Do not mail any of the authors directly,
+you might not get an answer.
+
+When reporting bugs, please send the output of $dbh->dbm_versions($table)
+for a table that exhibits the bug and as small a sample as you can make of
+the code that produces the bug. And of course, patches are welcome, too
+:-).
+
+If you need enhancements quickly, you can get commercial support as
+described at L<http://dbi.perl.org/support/> or you can contact Jens Rehsack
+at rehsack@cpan.org for commercial support in Germany.
+
+Please don't bother Jochen Wiedmann or Jeff Zucker for support - they
+handed over further maintenance to H.Merijn Brand and Jens Rehsack.
+
+=head1 ACKNOWLEDGEMENTS
+
+Many, many thanks to Tim Bunce for prodding me to write this, and for
+copious, wise, and patient suggestions all along the way. (Jeff Zucker)
+
+I send my thanks and acknowledgements to H.Merijn Brand for his
+initial refactoring of DBD::File and his strong and ongoing support of
+SQL::Statement. Without him, the current progress would never have
+been made. And I have to name Martin J. Evans for each laugh (and
+correction) of all those funny word creations I (as non-native
+speaker) made to the documentation. And - of course - I have to thank
+all those unnamed contributors and testers from the Perl
+community. (Jens Rehsack)
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is written by Jeff Zucker < jzucker AT cpan.org >, who also
+maintained it till 2007. After that, in 2010, Jens Rehsack & H.Merijn Brand
+took over maintenance.
+
+ Copyright (c) 2004 by Jeff Zucker, all rights reserved.
+ Copyright (c) 2010 by Jens Rehsack & H.Merijn Brand, all rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=head1 SEE ALSO
+
+L<DBI>,
+L<SQL::Statement>, L<DBI::SQL::Nano>,
+L<AnyDBM_File>, L<DB_File>, L<BerkeleyDB>,
+L<MLDBM>, L<YAML::MLDBM>, L<MLDBM::Serializer::JSON>
+
+=cut
diff --git a/lib/DBD/ExampleP.pm b/lib/DBD/ExampleP.pm
new file mode 100644
index 0000000..0bbace0
--- /dev/null
+++ b/lib/DBD/ExampleP.pm
@@ -0,0 +1,428 @@
+{
+ package DBD::ExampleP;
+
+ use Symbol;
+
+ use DBI qw(:sql_types);
+
+ require File::Spec;
+
+ @EXPORT = qw(); # Do NOT @EXPORT anything.
+ $VERSION = sprintf("12.%06d", q$Revision: 14310 $ =~ /(\d+)/o);
+
+
+# $Id: ExampleP.pm 14310 2010-08-02 06:35:25Z REHSACK $
+#
+# Copyright (c) 1994,1997,1998 Tim Bunce
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+ @statnames = qw(dev ino mode nlink
+ uid gid rdev size
+ atime mtime ctime
+ blksize blocks name);
+ @statnames{@statnames} = (0 .. @statnames-1);
+
+ @stattypes = (SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
+ SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
+ SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
+ SQL_INTEGER, SQL_INTEGER, SQL_VARCHAR);
+ @stattypes{@statnames} = @stattypes;
+ @statprec = ((10) x (@statnames-1), 1024);
+ @statprec{@statnames} = @statprec;
+ die unless @statnames == @stattypes;
+ die unless @statprec == @stattypes;
+
+ $drh = undef; # holds driver handle once initialised
+ #$gensym = "SYM000"; # used by st::execute() for filehandles
+
+ sub driver{
+ return $drh if $drh;
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'ExampleP',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD Example Perl stub by Tim Bunce',
+ }, ['example implementors private data '.__PACKAGE__]);
+ $drh;
+ }
+
+ sub CLONE {
+ undef $drh;
+ }
+}
+
+
+{ package DBD::ExampleP::dr; # ====== DRIVER ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub connect { # normally overridden, but a handy default
+ my($drh, $dbname, $user, $auth)= @_;
+ my ($outer, $dbh) = DBI::_new_dbh($drh, {
+ Name => $dbname,
+ examplep_private_dbh_attrib => 42, # an example, for testing
+ });
+ $dbh->{examplep_get_info} = {
+ 29 => '"', # SQL_IDENTIFIER_QUOTE_CHAR
+ 41 => '.', # SQL_CATALOG_NAME_SEPARATOR
+ 114 => 1, # SQL_CATALOG_LOCATION
+ };
+ #$dbh->{Name} = $dbname;
+ $dbh->STORE('Active', 1);
+ return $outer;
+ }
+
+ sub data_sources {
+ return ("dbi:ExampleP:dir=."); # possibly usefully meaningless
+ }
+
+}
+
+
+{ package DBD::ExampleP::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub prepare {
+ my($dbh, $statement)= @_;
+ my @fields;
+ my($fields, $dir) = $statement =~ m/^\s*select\s+(.*?)\s+from\s+(\S*)/i;
+
+ if (defined $fields and defined $dir) {
+ @fields = ($fields eq '*')
+ ? keys %DBD::ExampleP::statnames
+ : split(/\s*,\s*/, $fields);
+ }
+ else {
+ return $dbh->set_err($DBI::stderr, "Syntax error in select statement (\"$statement\")")
+ unless $statement =~ m/^\s*set\s+/;
+ # the SET syntax is just a hack so the ExampleP driver can
+ # be used to test non-select statements.
+ # Now we have DBI::DBM etc., ExampleP should be deprecated
+ }
+
+ my ($outer, $sth) = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ examplep_private_sth_attrib => 24, # an example, for testing
+ }, ['example implementors private data '.__PACKAGE__]);
+
+ my @bad = map {
+ defined $DBD::ExampleP::statnames{$_} ? () : $_
+ } @fields;
+ return $dbh->set_err($DBI::stderr, "Unknown field names: @bad")
+ if @bad;
+
+ $outer->STORE('NUM_OF_FIELDS' => scalar(@fields));
+
+ $sth->{examplep_ex_dir} = $dir if defined($dir) && $dir !~ /\?/;
+ $outer->STORE('NUM_OF_PARAMS' => ($dir) ? $dir =~ tr/?/?/ : 0);
+
+ if (@fields) {
+ $outer->STORE('NAME' => \@fields);
+ $outer->STORE('NULLABLE' => [ (0) x @fields ]);
+ $outer->STORE('SCALE' => [ (0) x @fields ]);
+ }
+
+ $outer;
+ }
+
+
+ sub table_info {
+ my $dbh = shift;
+ my ($catalog, $schema, $table, $type) = @_;
+
+ my @types = split(/["']*,["']/, $type || 'TABLE');
+ my %types = map { $_=>$_ } @types;
+
+ # Return a list of all subdirectories
+ my $dh = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym;
+ my $dir = $catalog || File::Spec->curdir();
+ my @list;
+ if ($types{VIEW}) { # for use by test harness
+ push @list, [ undef, "schema", "table", 'VIEW', undef ];
+ push @list, [ undef, "sch-ema", "table", 'VIEW', undef ];
+ push @list, [ undef, "schema", "ta-ble", 'VIEW', undef ];
+ push @list, [ undef, "sch ema", "table", 'VIEW', undef ];
+ push @list, [ undef, "schema", "ta ble", 'VIEW', undef ];
+ }
+ if ($types{TABLE}) {
+ no strict 'refs';
+ opendir($dh, $dir)
+ or return $dbh->set_err(int($!), "Failed to open directory $dir: $!");
+ while (defined(my $item = readdir($dh))) {
+ if ($^O eq 'VMS') {
+ # if on VMS then avoid warnings from catdir if you use a file
+ # (not a dir) as the item below
+ next if $item !~ /\.dir$/oi;
+ }
+ my $file = File::Spec->catdir($dir,$item);
+ next unless -d $file;
+ my($dev, $ino, $mode, $nlink, $uid) = lstat($file);
+ my $pwnam = undef; # eval { scalar(getpwnam($uid)) } || $uid;
+ push @list, [ $dir, $pwnam, $item, 'TABLE', undef ];
+ }
+ close($dh);
+ }
+ # We would like to simply do a DBI->connect() here. However,
+ # this is wrong if we are in a subclass like DBI::ProxyServer.
+ $dbh->{'dbd_sponge_dbh'} ||= DBI->connect("DBI:Sponge:", '','')
+ or return $dbh->set_err($DBI::err,
+ "Failed to connect to DBI::Sponge: $DBI::errstr");
+
+ my $attr = {
+ 'rows' => \@list,
+ 'NUM_OF_FIELDS' => 5,
+ 'NAME' => ['TABLE_CAT', 'TABLE_SCHEM', 'TABLE_NAME',
+ 'TABLE_TYPE', 'REMARKS'],
+ 'TYPE' => [DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(),
+ DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR() ],
+ 'NULLABLE' => [1, 1, 1, 1, 1]
+ };
+ my $sdbh = $dbh->{'dbd_sponge_dbh'};
+ my $sth = $sdbh->prepare("SHOW TABLES FROM $dir", $attr)
+ or return $dbh->set_err($sdbh->err(), $sdbh->errstr());
+ $sth;
+ }
+
+
+ sub type_info_all {
+ my ($dbh) = @_;
+ my $ti = [
+ { TYPE_NAME => 0,
+ DATA_TYPE => 1,
+ COLUMN_SIZE => 2,
+ LITERAL_PREFIX => 3,
+ LITERAL_SUFFIX => 4,
+ CREATE_PARAMS => 5,
+ NULLABLE => 6,
+ CASE_SENSITIVE => 7,
+ SEARCHABLE => 8,
+ UNSIGNED_ATTRIBUTE=> 9,
+ FIXED_PREC_SCALE=> 10,
+ AUTO_UNIQUE_VALUE => 11,
+ LOCAL_TYPE_NAME => 12,
+ MINIMUM_SCALE => 13,
+ MAXIMUM_SCALE => 14,
+ },
+ [ 'VARCHAR', DBI::SQL_VARCHAR, 1024, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ],
+ [ 'INTEGER', DBI::SQL_INTEGER, 10, "","", undef, 0, 0, 1, 0, 0,0,undef,0,0 ],
+ ];
+ return $ti;
+ }
+
+
+ sub ping {
+ (shift->FETCH('Active')) ? 2 : 0; # the value 2 is checked for by t/80proxy.t
+ }
+
+
+ sub disconnect {
+ shift->STORE(Active => 0);
+ return 1;
+ }
+
+
+ sub get_info {
+ my ($dbh, $info_type) = @_;
+ return $dbh->{examplep_get_info}->{$info_type};
+ }
+
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ # else pass up to DBI to handle
+ return $INC{"DBD/ExampleP.pm"} if $attrib eq 'example_driver_path';
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ if ($attrib eq 'AutoCommit') {
+ # convert AutoCommit values to magic ones to let DBI
+ # know that the driver has 'handled' the AutoCommit attribute
+ $value = ($value) ? -901 : -900;
+ }
+ return $dbh->{$attrib} = $value if $attrib =~ /^examplep_/;
+ return $dbh->SUPER::STORE($attrib, $value);
+ }
+
+ sub DESTROY {
+ my $dbh = shift;
+ $dbh->disconnect if $dbh->FETCH('Active');
+ undef
+ }
+
+
+ # This is an example to demonstrate the use of driver-specific
+ # methods via $dbh->func().
+ # Use it as follows:
+ # my @tables = $dbh->func($re, 'examplep_tables');
+ #
+ # Returns all the tables that match the regular expression $re.
+ sub examplep_tables {
+ my $dbh = shift; my $re = shift;
+ grep { $_ =~ /$re/ } $dbh->tables();
+ }
+
+ sub parse_trace_flag {
+ my ($h, $name) = @_;
+ return 0x01000000 if $name eq 'foo';
+ return 0x02000000 if $name eq 'bar';
+ return 0x04000000 if $name eq 'baz';
+ return 0x08000000 if $name eq 'boo';
+ return 0x10000000 if $name eq 'bop';
+ return $h->SUPER::parse_trace_flag($name);
+ }
+
+ sub private_attribute_info {
+ return { example_driver_path => undef };
+ }
+}
+
+
+{ package DBD::ExampleP::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict; no strict 'refs'; # cause problems with filehandles
+
+ sub bind_param {
+ my($sth, $param, $value, $attribs) = @_;
+ $sth->{'dbd_param'}->[$param-1] = $value;
+ return 1;
+ }
+
+
+ sub execute {
+ my($sth, @dir) = @_;
+ my $dir;
+
+ if (@dir) {
+ $sth->bind_param($_, $dir[$_-1]) or return
+ foreach (1..@dir);
+ }
+
+ my $dbd_param = $sth->{'dbd_param'} || [];
+ return $sth->set_err(2, @$dbd_param." values bound when $sth->{NUM_OF_PARAMS} expected")
+ unless @$dbd_param == $sth->{NUM_OF_PARAMS};
+
+ return 0 unless $sth->{NUM_OF_FIELDS}; # not a select
+
+ $dir = $dbd_param->[0] || $sth->{examplep_ex_dir};
+ return $sth->set_err(2, "No bind parameter supplied")
+ unless defined $dir;
+
+ $sth->finish;
+
+ #
+ # If the users asks for directory "long_list_4532", then we fake a
+ # directory with files "file4351", "file4350", ..., "file0".
+ # This is a special case used for testing, especially DBD::Proxy.
+ #
+ if ($dir =~ /^long_list_(\d+)$/) {
+ $sth->{dbd_dir} = [ $1 ]; # array ref indicates special mode
+ $sth->{dbd_datahandle} = undef;
+ }
+ else {
+ $sth->{dbd_dir} = $dir;
+ my $sym = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym;
+ opendir($sym, $dir)
+ or return $sth->set_err(2, "opendir($dir): $!");
+ $sth->{dbd_datahandle} = $sym;
+ }
+ $sth->STORE(Active => 1);
+ return 1;
+ }
+
+
+ sub fetch {
+ my $sth = shift;
+ my $dir = $sth->{dbd_dir};
+ my %s;
+
+ if (ref $dir) { # special fake-data test mode
+ my $num = $dir->[0]--;
+ unless ($num > 0) {
+ $sth->finish();
+ return;
+ }
+ my $time = time;
+ @s{@DBD::ExampleP::statnames} =
+ ( 2051, 1000+$num, 0644, 2, $>, $), 0, 1024,
+ $time, $time, $time, 512, 2, "file$num")
+ }
+ else { # normal mode
+ my $dh = $sth->{dbd_datahandle}
+ or return $sth->set_err($DBI::stderr, "fetch without successful execute");
+ my $f = readdir($dh);
+ unless ($f) {
+ $sth->finish;
+ return;
+ }
+ # untaint $f so that we can use this for DBI taint tests
+ ($f) = ($f =~ m/^(.*)$/);
+ my $file = File::Spec->catfile($dir, $f);
+ # put in all the data fields
+ @s{ @DBD::ExampleP::statnames } = (lstat($file), $f);
+ }
+
+ # return just what fields the query asks for
+ my @new = @s{ @{$sth->{NAME}} };
+
+ return $sth->_set_fbav(\@new);
+ }
+ *fetchrow_arrayref = \&fetch;
+
+
+ sub finish {
+ my $sth = shift;
+ closedir($sth->{dbd_datahandle}) if $sth->{dbd_datahandle};
+ $sth->{dbd_datahandle} = undef;
+ $sth->{dbd_dir} = undef;
+ $sth->SUPER::finish();
+ return 1;
+ }
+
+
+ sub FETCH {
+ my ($sth, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ if ($attrib eq 'TYPE'){
+ return [ @DBD::ExampleP::stattypes{ @{ $sth->FETCH(q{NAME_lc}) } } ];
+ }
+ elsif ($attrib eq 'PRECISION'){
+ return [ @DBD::ExampleP::statprec{ @{ $sth->FETCH(q{NAME_lc}) } } ];
+ }
+ elsif ($attrib eq 'ParamValues') {
+ my $dbd_param = $sth->{dbd_param} || [];
+ my %pv = map { $_ => $dbd_param->[$_-1] } 1..@$dbd_param;
+ return \%pv;
+ }
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+ }
+
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ return $sth->{$attrib} = $value
+ if $attrib eq 'NAME' or $attrib eq 'NULLABLE' or $attrib eq 'SCALE' or $attrib eq 'PRECISION';
+ return $sth->SUPER::STORE($attrib, $value);
+ }
+
+ *parse_trace_flag = \&DBD::ExampleP::db::parse_trace_flag;
+}
+
+1;
+# vim: sw=4:ts=8
diff --git a/lib/DBD/File.pm b/lib/DBD/File.pm
new file mode 100644
index 0000000..d4d57ae
--- /dev/null
+++ b/lib/DBD/File.pm
@@ -0,0 +1,1637 @@
+# -*- perl -*-
+#
+# DBD::File - A base class for implementing DBI drivers that
+# act on plain files
+#
+# This module is currently maintained by
+#
+# H.Merijn Brand & Jens Rehsack
+#
+# The original author is Jochen Wiedmann.
+#
+# Copyright (C) 2009,2010 by H.Merijn Brand & Jens Rehsack
+# Copyright (C) 2004 by Jeff Zucker
+# Copyright (C) 1998 by Jochen Wiedmann
+#
+# All rights reserved.
+#
+# You may distribute this module under the terms of either the GNU
+# General Public License or the Artistic License, as specified in
+# the Perl README file.
+
+require 5.008;
+
+use strict;
+use warnings;
+
+use DBI ();
+
+package DBD::File;
+
+use strict;
+use warnings;
+
+use base qw(DBI::DBD::SqlEngine);
+use Carp;
+use vars qw(@ISA $VERSION $drh);
+
+$VERSION = "0.40";
+
+$drh = undef; # holds driver handle(s) once initialized
+
+my %accessors = (
+ get_meta => "get_file_meta",
+ set_meta => "set_file_meta",
+ clear_meta => "clear_file_meta",
+ );
+
+sub driver ($;$)
+{
+ my ($class, $attr) = @_;
+
+ # Drivers typically use a singleton object for the $drh
+ # We use a hash here to have one singleton per subclass.
+ # (Otherwise DBD::CSV and DBD::DBM, for example, would
+ # share the same driver object which would cause problems.)
+ # An alternative would be not not cache the $drh here at all
+ # and require that subclasses do that. Subclasses should do
+ # their own caching, so caching here just provides extra safety.
+ $drh->{$class} and return $drh->{$class};
+
+ $attr ||= {};
+ { no strict "refs";
+ unless ($attr->{Attribution}) {
+ $class eq "DBD::File" and
+ $attr->{Attribution} = "$class by Jeff Zucker";
+ $attr->{Attribution} ||= ${$class . "::ATTRIBUTION"} ||
+ "oops the author of $class forgot to define this";
+ }
+ $attr->{Version} ||= ${$class . "::VERSION"};
+ $attr->{Name} or ($attr->{Name} = $class) =~ s/^DBD\:\://;
+ }
+
+ $drh->{$class} = $class->SUPER::driver ($attr);
+
+ my $prefix = DBI->driver_prefix ($class);
+ if ($prefix) {
+ my $dbclass = $class . "::db";
+ while (my ($accessor, $funcname) = each %accessors) {
+ my $method = $prefix . $accessor;
+ $dbclass->can ($method) and next;
+ my $inject = sprintf <<'EOI', $dbclass, $method, $dbclass, $funcname;
+sub %s::%s
+{
+ my $func = %s->can (q{%s});
+ goto &$func;
+ }
+EOI
+ eval $inject;
+ $dbclass->install_method ($method);
+ }
+ }
+
+ # XXX inject DBD::XXX::Statement unless exists
+
+ return $drh->{$class};
+ } # driver
+
+sub CLONE
+{
+ undef $drh;
+ } # CLONE
+
+# ====== DRIVER ================================================================
+
+package DBD::File::dr;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+@DBD::File::dr::ISA = qw(DBI::DBD::SqlEngine::dr);
+$DBD::File::dr::imp_data_size = 0;
+
+sub dsn_quote
+{
+ my $str = shift;
+ ref $str and return "";
+ defined $str or return "";
+ $str =~ s/([;:\\])/\\$1/g;
+ return $str;
+ } # dsn_quote
+
+sub data_sources ($;$)
+{
+ my ($drh, $attr) = @_;
+ my $dir = $attr && exists $attr->{f_dir}
+ ? $attr->{f_dir}
+ : File::Spec->curdir ();
+ my %attrs;
+ $attr and %attrs = %$attr;
+ delete $attrs{f_dir};
+ my $dsnextra = join ";", map { $_ . "=" . dsn_quote ($attrs{$_}) } keys %attrs;
+ my ($dirh) = Symbol::gensym ();
+ unless (opendir $dirh, $dir) {
+ $drh->set_err ($DBI::stderr, "Cannot open directory $dir: $!");
+ return;
+ }
+
+ my ($file, @dsns, %names, $driver);
+ $driver = $drh->{ImplementorClass} =~ m/^dbd\:\:([^\:]+)\:\:/i ? $1 : "File";
+
+ while (defined ($file = readdir ($dirh))) {
+ my $d = File::Spec->catdir ($dir, $file);
+ # allow current dir ... it can be a data_source too
+ $file ne File::Spec->updir () && -d $d and
+ push @dsns, "DBI:$driver:f_dir=" . dsn_quote ($d) . ($dsnextra ? ";$dsnextra" : "");
+ }
+ return @dsns;
+ } # data_sources
+
+sub disconnect_all
+{
+ } # disconnect_all
+
+sub DESTROY
+{
+ undef;
+ } # DESTROY
+
+# ====== DATABASE ==============================================================
+
+package DBD::File::db;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+use Carp;
+require File::Spec;
+require Cwd;
+use Scalar::Util qw(refaddr); # in CORE since 5.7.3
+
+@DBD::File::db::ISA = qw(DBI::DBD::SqlEngine::db);
+$DBD::File::db::imp_data_size = 0;
+
+sub set_versions
+{
+ my $dbh = shift;
+ $dbh->{f_version} = $DBD::File::VERSION;
+
+ return $dbh->SUPER::set_versions ();
+ } # set_versions
+
+sub init_valid_attributes
+{
+ my $dbh = shift;
+
+ $dbh->{f_valid_attrs} = {
+ f_version => 1, # DBD::File version
+ f_dir => 1, # base directory
+ f_ext => 1, # file extension
+ f_schema => 1, # schema name
+ f_meta => 1, # meta data for tables
+ f_meta_map => 1, # mapping table for identifier case
+ f_lock => 1, # Table locking mode
+ f_lockfile => 1, # Table lockfile extension
+ f_encoding => 1, # Encoding of the file
+ f_valid_attrs => 1, # File valid attributes
+ f_readonly_attrs => 1, # File readonly attributes
+ };
+ $dbh->{f_readonly_attrs} = {
+ f_version => 1, # DBD::File version
+ f_valid_attrs => 1, # File valid attributes
+ f_readonly_attrs => 1, # File readonly attributes
+ };
+
+ return $dbh->SUPER::init_valid_attributes ();
+ } # init_valid_attributes
+
+sub init_default_attributes
+{
+ my ($dbh, $phase) = @_;
+
+ # must be done first, because setting flags implicitly calls $dbdname::db->STORE
+ $dbh->SUPER::init_default_attributes ($phase);
+
+ # DBI::BD::SqlEngine::dr::connect will detect old-style drivers and
+ # don't call twice
+ unless (defined $phase) {
+ # we have an "old" driver here
+ $phase = defined $dbh->{sql_init_phase};
+ $phase and $phase = $dbh->{sql_init_phase};
+ }
+
+ if (0 == $phase) {
+ # check whether we're running in a Gofer server or not (see
+ # validate_FETCH_attr for details)
+ $dbh->{f_in_gofer} = (defined $INC{"DBD/Gofer.pm"} && (caller(5))[0] eq "DBI::Gofer::Execute");
+ # f_ext should not be initialized
+ # f_map is deprecated (but might return)
+ $dbh->{f_dir} = Cwd::abs_path (File::Spec->curdir ());
+ $dbh->{f_meta} = {};
+ $dbh->{f_meta_map} = {}; # choose new name because it contains other keys
+
+ # complete derived attributes, if required
+ (my $drv_class = $dbh->{ImplementorClass}) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix ($drv_class);
+ my $valid_attrs = $drv_prefix . "valid_attrs";
+ my $ro_attrs = $drv_prefix . "readonly_attrs";
+
+ my @comp_attrs = ();
+ if (exists $dbh->{$drv_prefix . "meta"} and !$dbh->{f_in_gofer}) {
+ my $attr = $dbh->{$drv_prefix . "meta"};
+ defined $attr and defined $dbh->{$valid_attrs} and
+ !defined $dbh->{$valid_attrs}{$attr} and
+ $dbh->{$valid_attrs}{$attr} = 1;
+
+ my %h;
+ tie %h, "DBD::File::TieTables", $dbh;
+ $dbh->{$attr} = \%h;
+
+ push @comp_attrs, "meta";
+ }
+
+ foreach my $comp_attr (@comp_attrs) {
+ my $attr = $drv_prefix . $comp_attr;
+ defined $dbh->{$valid_attrs} and !defined $dbh->{$valid_attrs}{$attr} and
+ $dbh->{$valid_attrs}{$attr} = 1;
+ defined $dbh->{$ro_attrs} and !defined $dbh->{$ro_attrs}{$attr} and
+ $dbh->{$ro_attrs}{$attr} = 1;
+ }
+ }
+
+ return $dbh;
+ } # init_default_attributes
+
+sub disconnect ($)
+{
+ %{$_[0]->{f_meta}} = ();
+ return $_[0]->SUPER::disconnect ();
+ } # disconnect
+
+sub validate_FETCH_attr
+{
+ my ($dbh, $attrib) = @_;
+
+ # If running in a Gofer server, access to our tied compatibility hash
+ # would force Gofer to serialize the tieing object including it's
+ # private $dbh reference used to do the driver function calls.
+ # This will result in nasty exceptions. So return a copy of the
+ # f_meta structure instead, which is the source of for the compatibility
+ # tie-hash. It's not as good as liked, but the best we can do in this
+ # situation.
+ if ($dbh->{f_in_gofer}) {
+ (my $drv_class = $dbh->{ImplementorClass}) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix ($drv_class);
+ exists $dbh->{$drv_prefix . "meta"} && $attrib eq $dbh->{$drv_prefix . "meta"} and
+ $attrib = "f_meta";
+ }
+
+ return $attrib;
+ } # validate_FETCH_attr
+
+sub validate_STORE_attr
+{
+ my ($dbh, $attrib, $value) = @_;
+
+ if ($attrib eq "f_dir") {
+ -d $value or
+ return $dbh->set_err ($DBI::stderr, "No such directory '$value'");
+ File::Spec->file_name_is_absolute ($value) or
+ $value = Cwd::abs_path ($value);
+ }
+
+ if ($attrib eq "f_ext") {
+ $value eq "" || $value =~ m{^\.\w+(?:/[rR]*)?$} or
+ carp "'$value' doesn't look like a valid file extension attribute\n";
+ }
+
+ (my $drv_class = $dbh->{ImplementorClass}) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix ($drv_class);
+
+ if (exists $dbh->{$drv_prefix . "meta"}) {
+ my $attr = $dbh->{$drv_prefix . "meta"};
+ if ($attrib eq $attr) {
+ while (my ($k, $v) = each %$value) {
+ $dbh->{$attrib}{$k} = $v;
+ }
+ }
+ }
+
+ return $dbh->SUPER::validate_STORE_attr ($attrib, $value);
+ } # validate_STORE_attr
+
+sub get_f_versions
+{
+ my ($dbh, $table) = @_;
+
+ my $class = $dbh->{ImplementorClass};
+ $class =~ s/::db$/::Table/;
+ my (undef, $meta);
+ $table and (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ unless ($meta) {
+ $meta = {};
+ $class->bootstrap_table_meta ($dbh, $meta, $table);
+ }
+
+ my $dver;
+ my $dtype = "IO::File";
+ eval {
+ $dver = IO::File->VERSION ();
+
+ # when we're still alive here, everthing went ok - no need to check for $@
+ $dtype .= " ($dver)";
+ };
+
+ $meta->{f_encoding} and $dtype .= " + " . $meta->{f_encoding} . " encoding";
+
+ return sprintf "%s using %s", $dbh->{f_version}, $dtype;
+ } # get_f_versions
+
+sub get_single_table_meta
+{
+ my ($dbh, $table, $attr) = @_;
+ my $meta;
+
+ $table eq "." and
+ return $dbh->FETCH ($attr);
+
+ (my $class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ $meta or croak "No such table '$table'";
+
+ # prevent creation of undef attributes
+ return $class->get_table_meta_attr ($meta, $attr);
+ } # get_single_table_meta
+
+sub get_file_meta
+{
+ my ($dbh, $table, $attr) = @_;
+
+ my $gstm = $dbh->{ImplementorClass}->can ("get_single_table_meta");
+
+ $table eq "*" and
+ $table = [ ".", keys %{$dbh->{f_meta}} ];
+ $table eq "+" and
+ $table = [ grep { m/^[_A-Za-z0-9]+$/ } keys %{$dbh->{f_meta}} ];
+ ref $table eq "Regexp" and
+ $table = [ grep { $_ =~ $table } keys %{$dbh->{f_meta}} ];
+
+ ref $table || ref $attr or
+ return &$gstm ($dbh, $table, $attr);
+
+ ref $table or $table = [ $table ];
+ ref $attr or $attr = [ $attr ];
+ "ARRAY" eq ref $table or
+ croak "Invalid argument for \$table - SCALAR, Regexp or ARRAY expected but got " . ref $table;
+ "ARRAY" eq ref $attr or
+ croak "Invalid argument for \$attr - SCALAR or ARRAY expected but got " . ref $attr;
+
+ my %results;
+ foreach my $tname (@{$table}) {
+ my %tattrs;
+ foreach my $aname (@{$attr}) {
+ $tattrs{$aname} = &$gstm ($dbh, $tname, $aname);
+ }
+ $results{$tname} = \%tattrs;
+ }
+
+ return \%results;
+ } # get_file_meta
+
+sub set_single_table_meta
+{
+ my ($dbh, $table, $attr, $value) = @_;
+ my $meta;
+
+ $table eq "." and
+ return $dbh->STORE ($attr, $value);
+
+ (my $class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ $meta or croak "No such table '$table'";
+ $class->set_table_meta_attr ($meta, $attr, $value);
+
+ return $dbh;
+ } # set_single_table_meta
+
+sub set_file_meta
+{
+ my ($dbh, $table, $attr, $value) = @_;
+
+ my $sstm = $dbh->{ImplementorClass}->can ("set_single_table_meta");
+
+ $table eq "*" and
+ $table = [ ".", keys %{$dbh->{f_meta}} ];
+ $table eq "+" and
+ $table = [ grep { m/^[_A-Za-z0-9]+$/ } keys %{$dbh->{f_meta}} ];
+ ref ($table) eq "Regexp" and
+ $table = [ grep { $_ =~ $table } keys %{$dbh->{f_meta}} ];
+
+ ref $table || ref $attr or
+ return &$sstm ($dbh, $table, $attr, $value);
+
+ ref $table or $table = [ $table ];
+ ref $attr or $attr = { $attr => $value };
+ "ARRAY" eq ref $table or
+ croak "Invalid argument for \$table - SCALAR, Regexp or ARRAY expected but got " . ref $table;
+ "HASH" eq ref $attr or
+ croak "Invalid argument for \$attr - SCALAR or HASH expected but got " . ref $attr;
+
+ foreach my $tname (@{$table}) {
+ my %tattrs;
+ while (my ($aname, $aval) = each %$attr) {
+ &$sstm ($dbh, $tname, $aname, $aval);
+ }
+ }
+
+ return $dbh;
+ } # set_file_meta
+
+sub clear_file_meta
+{
+ my ($dbh, $table) = @_;
+
+ (my $class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ my (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ $meta and %{$meta} = ();
+
+ return;
+ } # clear_file_meta
+
+sub get_avail_tables
+{
+ my $dbh = shift;
+
+ my @tables = $dbh->SUPER::get_avail_tables ();
+ my $dir = $dbh->{f_dir};
+ my $dirh = Symbol::gensym ();
+
+ unless (opendir $dirh, $dir) {
+ $dbh->set_err ($DBI::stderr, "Cannot open directory $dir: $!");
+ return @tables;
+ }
+
+ my $class = $dbh->FETCH ("ImplementorClass");
+ $class =~ s/::db$/::Table/;
+ my ($file, %names);
+ my $schema = exists $dbh->{f_schema}
+ ? defined $dbh->{f_schema} && $dbh->{f_schema} ne ""
+ ? $dbh->{f_schema} : undef
+ : eval { getpwuid ((stat $dir)[4]) }; # XXX Win32::pwent
+ my %seen;
+ while (defined ($file = readdir ($dirh))) {
+ my ($tbl, $meta) = $class->get_table_meta ($dbh, $file, 0, 0) or next; # XXX
+ # $tbl && $meta && -f $meta->{f_fqfn} or next;
+ $seen{defined $schema ? $schema : "\0"}{$tbl}++ or
+ push @tables, [ undef, $schema, $tbl, "TABLE", "FILE" ];
+ }
+ closedir $dirh or
+ $dbh->set_err ($DBI::stderr, "Cannot close directory $dir: $!");
+
+ return @tables;
+ } # get_avail_tables
+
+# ====== Tie-Meta ==============================================================
+
+package DBD::File::TieMeta;
+
+use Carp qw(croak);
+require Tie::Hash;
+@DBD::File::TieMeta::ISA = qw(Tie::Hash);
+
+sub TIEHASH
+{
+ my ($class, $tblClass, $tblMeta) = @_;
+
+ my $self = bless ({ tblClass => $tblClass, tblMeta => $tblMeta, }, $class);
+ return $self;
+ } # new
+
+sub STORE
+{
+ my ($self, $meta_attr, $meta_val) = @_;
+
+ $self->{tblClass}->set_table_meta_attr ($self->{tblMeta}, $meta_attr, $meta_val);
+
+ return;
+ } # STORE
+
+sub FETCH
+{
+ my ($self, $meta_attr) = @_;
+
+ return $self->{tblClass}->get_table_meta_attr ($self->{tblMeta}, $meta_attr);
+ } # FETCH
+
+sub FIRSTKEY
+{
+ my $a = scalar keys %{$_[0]->{tblMeta}};
+ each %{$_[0]->{tblMeta}};
+ } # FIRSTKEY
+
+sub NEXTKEY
+{
+ each %{$_[0]->{tblMeta}};
+ } # NEXTKEY
+
+sub EXISTS
+{
+ exists $_[0]->{tblMeta}{$_[1]};
+ } # EXISTS
+
+sub DELETE
+{
+ croak "Can't delete single attributes from table meta structure";
+ } # DELETE
+
+sub CLEAR
+{
+ %{$_[0]->{tblMeta}} = ()
+ } # CLEAR
+
+sub SCALAR
+{
+ scalar %{$_[0]->{tblMeta}}
+ } # SCALAR
+
+# ====== Tie-Tables ============================================================
+
+package DBD::File::TieTables;
+
+use Carp qw(croak);
+require Tie::Hash;
+@DBD::File::TieTables::ISA = qw(Tie::Hash);
+
+sub TIEHASH
+{
+ my ($class, $dbh) = @_;
+
+ (my $tbl_class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ my $self = bless ({ dbh => $dbh, tblClass => $tbl_class, }, $class);
+ return $self;
+ } # new
+
+sub STORE
+{
+ my ($self, $table, $tbl_meta) = @_;
+
+ "HASH" eq ref $tbl_meta or
+ croak "Invalid data for storing as table meta data (must be hash)";
+
+ (undef, my $meta) = $self->{tblClass}->get_table_meta ($self->{dbh}, $table, 1);
+ $meta or croak "Invalid table name '$table'";
+
+ while (my ($meta_attr, $meta_val) = each %$tbl_meta) {
+ $self->{tblClass}->set_table_meta_attr ($meta, $meta_attr, $meta_val);
+ }
+
+ return;
+ } # STORE
+
+sub FETCH
+{
+ my ($self, $table) = @_;
+
+ (undef, my $meta) = $self->{tblClass}->get_table_meta ($self->{dbh}, $table, 1);
+ $meta or croak "Invalid table name '$table'";
+
+ my %h;
+ tie %h, "DBD::File::TieMeta", $self->{tblClass}, $meta;
+
+ return \%h;
+ } # FETCH
+
+sub FIRSTKEY
+{
+ my $a = scalar keys %{$_[0]->{dbh}->{f_meta}};
+ each %{$_[0]->{dbh}->{f_meta}};
+ } # FIRSTKEY
+
+sub NEXTKEY
+{
+ each %{$_[0]->{dbh}->{f_meta}};
+ } # NEXTKEY
+
+sub EXISTS
+{
+ exists $_[0]->{dbh}->{f_meta}->{$_[1]} or
+ exists $_[0]->{dbh}->{f_meta_map}->{$_[1]};
+ } # EXISTS
+
+sub DELETE
+{
+ my ($self, $table) = @_;
+
+ (undef, my $meta) = $self->{tblClass}->get_table_meta ($self->{dbh}, $table, 1);
+ $meta or croak "Invalid table name '$table'";
+
+ delete $_[0]->{dbh}->{f_meta}->{$meta->{table_name}};
+ } # DELETE
+
+sub CLEAR
+{
+ %{$_[0]->{dbh}->{f_meta}} = ();
+ %{$_[0]->{dbh}->{f_meta_map}} = ();
+ } # CLEAR
+
+sub SCALAR
+{
+ scalar %{$_[0]->{dbh}->{f_meta}}
+ } # SCALAR
+
+# ====== STATEMENT =============================================================
+
+package DBD::File::st;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+@DBD::File::st::ISA = qw(DBI::DBD::SqlEngine::st);
+$DBD::File::st::imp_data_size = 0;
+
+my %supported_attrs = (
+ TYPE => 1,
+ PRECISION => 1,
+ NULLABLE => 1,
+ );
+
+sub FETCH
+{
+ my ($sth, $attr) = @_;
+
+ if ($supported_attrs{$attr}) {
+ my $stmt = $sth->{sql_stmt};
+
+ if (exists $sth->{ImplementorClass} &&
+ exists $sth->{sql_stmt} &&
+ $sth->{sql_stmt}->isa ("SQL::Statement")) {
+
+ # fill overall_defs unless we know
+ unless (exists $sth->{f_overall_defs} && ref $sth->{f_overall_defs}) {
+ my $all_meta =
+ $sth->{Database}->func ("*", "table_defs", "get_file_meta");
+ while (my ($tbl, $meta) = each %$all_meta) {
+ exists $meta->{table_defs} && ref $meta->{table_defs} or next;
+ foreach (keys %{$meta->{table_defs}{columns}}) {
+ $sth->{f_overall_defs}{$_} = $meta->{table_defs}{columns}{$_};
+ }
+ }
+ }
+
+ my @colnames = $sth->sql_get_colnames ();
+
+ $attr eq "TYPE" and
+ return [ map { $sth->{f_overall_defs}{$_}{data_type} || "CHAR" }
+ @colnames ];
+
+ $attr eq "PRECISION" and
+ return [ map { $sth->{f_overall_defs}{$_}{data_length} || 0 }
+ @colnames ];
+
+ $attr eq "NULLABLE" and
+ return [ map { ( grep m/^NOT NULL$/ =>
+ @{ $sth->{f_overall_defs}{$_}{constraints} || [] })
+ ? 0 : 1 }
+ @colnames ];
+ }
+ }
+
+ return $sth->SUPER::FETCH ($attr);
+ } # FETCH
+
+# ====== SQL::STATEMENT ========================================================
+
+package DBD::File::Statement;
+
+use strict;
+use warnings;
+
+@DBD::File::Statement::ISA = qw( DBI::DBD::SqlEngine::Statement );
+
+sub open_table ($$$$$)
+{
+ my ($self, $data, $table, $createMode, $lockMode) = @_;
+
+ my $class = ref $self;
+ $class =~ s/::Statement/::Table/;
+
+ my $flags = {
+ createMode => $createMode,
+ lockMode => $lockMode,
+ };
+ $self->{command} eq "DROP" and $flags->{dropMode} = 1;
+
+ return $class->new ($data, { table => $table }, $flags);
+ } # open_table
+
+# ====== SQL::TABLE ============================================================
+
+package DBD::File::Table;
+
+use strict;
+use warnings;
+
+use Carp;
+require IO::File;
+require File::Basename;
+require File::Spec;
+require Cwd;
+
+# We may have a working flock () built-in but that doesn't mean that locking
+# will work on NFS (flock () may hang hard)
+my $locking = eval { flock STDOUT, 0; 1 };
+
+@DBD::File::Table::ISA = qw( DBI::DBD::SqlEngine::Table );
+
+# ====== FLYWEIGHT SUPPORT =====================================================
+
+my $fn_any_ext_regex = qr/\.[^.]*/;
+
+# Flyweight support for table_info
+# The functions file2table, init_table_meta, default_table_meta and
+# get_table_meta are using $self arguments for polymorphism only. The
+# must not rely on an instantiated DBD::File::Table
+sub file2table
+{
+ my ($self, $meta, $file, $file_is_table, $respect_case) = @_;
+
+ $file eq "." || $file eq ".." and return; # XXX would break a possible DBD::Dir
+
+ my ($ext, $req) = ("", 0);
+ if ($meta->{f_ext}) {
+ ($ext, my $opt) = split m/\//, $meta->{f_ext};
+ if ($ext && $opt) {
+ $opt =~ m/r/i and $req = 1;
+ }
+ }
+
+ # (my $tbl = $file) =~ s/$ext$//i;
+ my ($tbl, $basename, $dir, $fn_ext, $user_spec_file);
+ if ($file_is_table and defined $meta->{f_file}) {
+ $tbl = $file;
+ ($basename, $dir, $fn_ext) = File::Basename::fileparse ($meta->{f_file}, $fn_any_ext_regex);
+ $file = $basename . $fn_ext;
+ $user_spec_file = 1;
+ }
+ else {
+ ($basename, $dir, undef) = File::Basename::fileparse ($file, $ext);
+ $file = $tbl = $basename;
+ $user_spec_file = 0;
+ }
+
+ if (!$respect_case and $meta->{sql_identifier_case} == 1) { # XXX SQL_IC_UPPER
+ $basename = uc $basename;
+ $tbl = uc $tbl;
+ }
+ if( !$respect_case and $meta->{sql_identifier_case} == 2) { # XXX SQL_IC_LOWER
+ $basename = lc $basename;
+ $tbl = lc $tbl;
+ }
+
+ my $searchdir = File::Spec->file_name_is_absolute ($dir)
+ ? ($dir =~ s|/$||, $dir)
+ : Cwd::abs_path (File::Spec->catdir ($meta->{f_dir}, $dir));
+ -d $searchdir or
+ croak "-d $searchdir: $!";
+
+ $searchdir eq $meta->{f_dir} and
+ $dir = "";
+
+ unless ($user_spec_file) {
+ $file_is_table and $file = "$basename$ext";
+
+ # Fully Qualified File Name
+ my $cmpsub;
+ if ($respect_case) {
+ $cmpsub = sub {
+ my ($fn, undef, $sfx) = File::Basename::fileparse ($_, $fn_any_ext_regex);
+ $sfx = '' if $^O eq 'VMS' and $sfx eq '.'; # no extension turns up as a dot
+ $fn eq $basename and
+ return (lc $sfx eq lc $ext or !$req && !$sfx);
+ return 0;
+ }
+ }
+ else {
+ $cmpsub = sub {
+ my ($fn, undef, $sfx) = File::Basename::fileparse ($_, $fn_any_ext_regex);
+ $sfx = '' if $^O eq 'VMS' and $sfx eq '.'; # no extension turns up as a dot
+ lc $fn eq lc $basename and
+ return (lc $sfx eq lc $ext or !$req && !$sfx);
+ return 0;
+ }
+ }
+
+ opendir my $dh, $searchdir or croak "Can't open '$searchdir': $!";
+ my @f = sort { length $b <=> length $a } grep { &$cmpsub ($_) } readdir $dh;
+ @f > 0 && @f <= 2 and $file = $f[0];
+ !$respect_case && $meta->{sql_identifier_case} == 4 and # XXX SQL_IC_MIXED
+ ($tbl = $file) =~ s/$ext$//i;
+ closedir $dh or croak "Can't close '$searchdir': $!";
+
+ my $tmpfn = $file;
+ if ($ext && $req) {
+ # File extension required
+ $tmpfn =~ s/$ext$//i or return;
+ }
+ }
+
+ my $fqfn = File::Spec->catfile ($searchdir, $file);
+ my $fqbn = File::Spec->catfile ($searchdir, $basename);
+
+ $meta->{f_fqfn} = $fqfn;
+ $meta->{f_fqbn} = $fqbn;
+ defined $meta->{f_lockfile} && $meta->{f_lockfile} and
+ $meta->{f_fqln} = $meta->{f_fqbn} . $meta->{f_lockfile};
+
+ $dir && !$user_spec_file and $tbl = File::Spec->catfile ($dir, $tbl);
+ $meta->{table_name} = $tbl;
+
+ return $tbl;
+ } # file2table
+
+sub bootstrap_table_meta
+{
+ my ($self, $dbh, $meta, $table) = @_;
+
+ exists $meta->{f_dir} or $meta->{f_dir} = $dbh->{f_dir};
+ defined $meta->{f_ext} or $meta->{f_ext} = $dbh->{f_ext};
+ defined $meta->{f_encoding} or $meta->{f_encoding} = $dbh->{f_encoding};
+ exists $meta->{f_lock} or $meta->{f_lock} = $dbh->{f_lock};
+ exists $meta->{f_lockfile} or $meta->{f_lockfile} = $dbh->{f_lockfile};
+ defined $meta->{f_schema} or $meta->{f_schema} = $dbh->{f_schema};
+ defined $meta->{sql_identifier_case} or
+ $meta->{sql_identifier_case} = $dbh->{sql_identifier_case};
+ } # bootstrap_table_meta
+
+sub init_table_meta
+{
+ my ($self, $dbh, $meta, $table) = @_;
+
+ return;
+ } # init_table_meta
+
+sub get_table_meta ($$$$;$)
+{
+ my ($self, $dbh, $table, $file_is_table, $respect_case) = @_;
+ unless (defined $respect_case) {
+ $respect_case = 0;
+ $table =~ s/^\"// and $respect_case = 1; # handle quoted identifiers
+ $table =~ s/\"$//;
+ }
+
+ unless ($respect_case) {
+ defined $dbh->{f_meta_map}{$table} and $table = $dbh->{f_meta_map}{$table};
+ }
+
+ my $meta = {};
+ defined $dbh->{f_meta}{$table} and $meta = $dbh->{f_meta}{$table};
+
+ unless ($meta->{initialized}) {
+ $self->bootstrap_table_meta ($dbh, $meta, $table);
+
+ unless (defined $meta->{f_fqfn}) {
+ $self->file2table ($meta, $table, $file_is_table, $respect_case) or return;
+ }
+
+ if (defined $meta->{table_name} and $table ne $meta->{table_name}) {
+ $dbh->{f_meta_map}{$table} = $meta->{table_name};
+ $table = $meta->{table_name};
+ }
+
+ # now we know a bit more - let's check if user can't use consequent spelling
+ # XXX add know issue about reset sql_identifier_case here ...
+ if (defined $dbh->{f_meta}{$table} && defined $dbh->{f_meta}{$table}{initialized}) {
+ $meta = $dbh->{f_meta}{$table};
+ $self->file2table ($meta, $table, $file_is_table, $respect_case) or
+ return unless $dbh->{f_meta}{$table}{initialized};
+ }
+ unless ($dbh->{f_meta}{$table}{initialized}) {
+ $self->init_table_meta ($dbh, $meta, $table);
+ $meta->{initialized} = 1;
+ $dbh->{f_meta}{$table} = $meta;
+ }
+ }
+
+ return ($table, $meta);
+ } # get_table_meta
+
+my %reset_on_modify = (
+ f_file => "f_fqfn",
+ f_dir => "f_fqfn",
+ f_ext => "f_fqfn",
+ f_lockfile => "f_fqfn", # forces new file2table call
+ );
+
+my %compat_map = map { $_ => "f_$_" } qw( file ext lock lockfile );
+
+sub register_reset_on_modify
+{
+ my ($proto, $extra_resets) = @_;
+ %reset_on_modify = (%reset_on_modify, %$extra_resets);
+ return;
+ } # register_reset_on_modify
+
+sub register_compat_map
+{
+ my ($proto, $extra_compat_map) = @_;
+ %compat_map = (%compat_map, %$extra_compat_map);
+ return;
+ } # register_compat_map
+
+sub get_table_meta_attr
+{
+ my ($class, $meta, $attrib) = @_;
+ exists $compat_map{$attrib} and
+ $attrib = $compat_map{$attrib};
+ exists $meta->{$attrib} and
+ return $meta->{$attrib};
+ return;
+ } # get_table_meta_attr
+
+sub set_table_meta_attr
+{
+ my ($class, $meta, $attrib, $value) = @_;
+ exists $compat_map{$attrib} and
+ $attrib = $compat_map{$attrib};
+ $class->table_meta_attr_changed ($meta, $attrib, $value);
+ $meta->{$attrib} = $value;
+ } # set_table_meta_attr
+
+sub table_meta_attr_changed
+{
+ my ($class, $meta, $attrib, $value) = @_;
+ defined $reset_on_modify{$attrib} and
+ delete $meta->{$reset_on_modify{$attrib}} and
+ $meta->{initialized} = 0;
+ } # table_meta_attr_changed
+
+# ====== FILE OPEN =============================================================
+
+sub open_file ($$$)
+{
+ my ($self, $meta, $attrs, $flags) = @_;
+
+ defined $meta->{f_fqfn} && $meta->{f_fqfn} ne "" or croak "No filename given";
+
+ my ($fh, $fn);
+ unless ($meta->{f_dontopen}) {
+ $fn = $meta->{f_fqfn};
+ if ($flags->{createMode}) {
+ -f $meta->{f_fqfn} and
+ croak "Cannot create table $attrs->{table}: Already exists";
+ $fh = IO::File->new ($fn, "a+") or
+ croak "Cannot open $fn for writing: $! (" . ($!+0) . ")";
+ }
+ else {
+ unless ($fh = IO::File->new ($fn, ($flags->{lockMode} ? "r+" : "r"))) {
+ croak "Cannot open $fn: $! (" . ($!+0) . ")";
+ }
+ }
+
+ if ($fh) {
+ $fh->seek (0, 0) or
+ croak "Error while seeking back: $!";
+ if (my $enc = $meta->{f_encoding}) {
+ binmode $fh, ":encoding($enc)" or
+ croak "Failed to set encoding layer '$enc' on $fn: $!";
+ }
+ else {
+ binmode $fh or croak "Failed to set binary mode on $fn: $!";
+ }
+ }
+
+ $meta->{fh} = $fh;
+ }
+ if ($meta->{f_fqln}) {
+ $fn = $meta->{f_fqln};
+ if ($flags->{createMode}) {
+ -f $fn and
+ croak "Cannot create table lock for $attrs->{table}: Already exists";
+ $fh = IO::File->new ($fn, "a+") or
+ croak "Cannot open $fn for writing: $! (" . ($!+0) . ")";
+ }
+ else {
+ unless ($fh = IO::File->new ($fn, ($flags->{lockMode} ? "r+" : "r"))) {
+ croak "Cannot open $fn: $! (" . ($!+0) . ")";
+ }
+ }
+
+ $meta->{lockfh} = $fh;
+ }
+
+ if ($locking && $fh) {
+ my $lm = defined $flags->{f_lock}
+ && $flags->{f_lock} =~ m/^[012]$/
+ ? $flags->{f_lock}
+ : $flags->{lockMode} ? 2 : 1;
+ if ($lm == 2) {
+ flock $fh, 2 or croak "Cannot obtain exclusive lock on $fn: $!";
+ }
+ elsif ($lm == 1) {
+ flock $fh, 1 or croak "Cannot obtain shared lock on $fn: $!";
+ }
+ # $lm = 0 is forced no locking at all
+ }
+ } # open_file
+
+# ====== SQL::Eval API =========================================================
+
+sub new
+{
+ my ($className, $data, $attrs, $flags) = @_;
+ my $dbh = $data->{Database};
+
+ my ($tblnm, $meta) = $className->get_table_meta ($dbh, $attrs->{table}, 1) or
+ croak "Cannot find appropriate file for table '$attrs->{table}'";
+ $attrs->{table} = $tblnm;
+
+ # Being a bit dirty here, as SQL::Statement::Structure does not offer
+ # me an interface to the data I want
+ $flags->{createMode} && $data->{sql_stmt}{table_defs} and
+ $meta->{table_defs} = $data->{sql_stmt}{table_defs};
+
+ $className->open_file ($meta, $attrs, $flags);
+
+ my $columns = {};
+ my $array = [];
+ my $tbl = {
+ %{$attrs},
+ meta => $meta,
+ col_names => $meta->{col_names} || [],
+ };
+ return $className->SUPER::new ($tbl);
+ } # new
+
+sub drop ($)
+{
+ my ($self, $data) = @_;
+ my $meta = $self->{meta};
+ # We have to close the file before unlinking it: Some OS'es will
+ # refuse the unlink otherwise.
+ $meta->{fh} and $meta->{fh}->close ();
+ $meta->{lockfh} and $meta->{lockfh}->close ();
+ undef $meta->{fh};
+ undef $meta->{lockfh};
+ $meta->{f_fqfn} and unlink $meta->{f_fqfn};
+ $meta->{f_fqln} and unlink $meta->{f_fqln};
+ delete $data->{Database}{f_meta}{$self->{table}};
+ return 1;
+ } # drop
+
+sub seek ($$$$)
+{
+ my ($self, $data, $pos, $whence) = @_;
+ my $meta = $self->{meta};
+ if ($whence == 0 && $pos == 0) {
+ $pos = defined $meta->{first_row_pos} ? $meta->{first_row_pos} : 0;
+ }
+ elsif ($whence != 2 || $pos != 0) {
+ croak "Illegal seek position: pos = $pos, whence = $whence";
+ }
+
+ $meta->{fh}->seek ($pos, $whence) or
+ croak "Error while seeking in " . $meta->{f_fqfn} . ": $!";
+ } # seek
+
+sub truncate ($$)
+{
+ my ($self, $data) = @_;
+ my $meta = $self->{meta};
+ $meta->{fh}->truncate ($meta->{fh}->tell ()) or
+ croak "Error while truncating " . $meta->{f_fqfn} . ": $!";
+ return 1;
+ } # truncate
+
+sub DESTROY
+{
+ my $self = shift;
+ my $meta = $self->{meta};
+ $meta->{fh} and $meta->{fh}->close ();
+ $meta->{lockfh} and $meta->{lockfh}->close ();
+ undef $meta->{fh};
+ undef $meta->{lockfh};
+ } # DESTROY
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::File - Base class for writing file based DBI drivers
+
+=head1 SYNOPSIS
+
+This module is a base class for writing other L<DBD|DBI::DBD>s.
+It is not intended to function as a DBD itself (though it is possible).
+If you want to access flat files, use L<DBD::AnyData|DBD::AnyData>, or
+L<DBD::CSV|DBD::CSV> (both of which are subclasses of DBD::File).
+
+=head1 DESCRIPTION
+
+The DBD::File module is not a true L<DBI|DBI> driver, but an abstract
+base class for deriving concrete DBI drivers from it. The implication
+is, that these drivers work with plain files, for example CSV files or
+INI files. The module is based on the L<SQL::Statement|SQL::Statement>
+module, a simple SQL engine.
+
+See L<DBI|DBI> for details on DBI, L<SQL::Statement|SQL::Statement> for
+details on SQL::Statement and L<DBD::CSV|DBD::CSV>, L<DBD::DBM|DBD::DBM>
+or L<DBD::AnyData|DBD::AnyData> for example drivers.
+
+=head2 Metadata
+
+The following attributes are handled by DBI itself and not by DBD::File,
+thus they all work as expected:
+
+ Active
+ ActiveKids
+ CachedKids
+ CompatMode (Not used)
+ InactiveDestroy
+ AutoInactiveDestroy
+ Kids
+ PrintError
+ RaiseError
+ Warn (Not used)
+
+=head3 The following DBI attributes are handled by DBD::File:
+
+=head4 AutoCommit
+
+Always on.
+
+=head4 ChopBlanks
+
+Works.
+
+=head4 NUM_OF_FIELDS
+
+Valid after C<< $sth->execute >>.
+
+=head4 NUM_OF_PARAMS
+
+Valid after C<< $sth->prepare >>.
+
+=head4 NAME
+
+Valid after C<< $sth->execute >>; undef for Non-Select statements.
+
+=head4 NULLABLE
+
+Not really working, always returns an array ref of ones, except the
+affected table has been created in this session. Valid after
+C<< $sth->execute >>; undef for non-select statements.
+
+=head3 The following DBI attributes and methods are not supported:
+
+=over 4
+
+=item bind_param_inout
+
+=item CursorName
+
+=item LongReadLen
+
+=item LongTruncOk
+
+=back
+
+=head3 DBD::File specific attributes
+
+In addition to the DBI attributes, you can use the following dbh
+attributes:
+
+=head4 f_dir
+
+This attribute is used for setting the directory where the files are
+opened and it defaults to the current directory (F<.>). Usually you set
+it on the dbh but it may be overridden per table (see L<f_meta>).
+
+When the value for C<f_dir> is a relative path, it is converted into
+the appropriate absolute path name (based on the current working
+directory) when the dbh attribute is set.
+
+See L<KNOWN BUGS AND LIMITATIONS>.
+
+=head4 f_ext
+
+This attribute is used for setting the file extension. The format is:
+
+ extension{/flag}
+
+where the /flag is optional and the extension is case-insensitive.
+C<f_ext> allows you to specify an extension which:
+
+=over
+
+=item *
+
+makes DBD::File prefer F<table.extension> over F<table>.
+
+=item *
+
+makes the table name the filename minus the extension.
+
+=back
+
+ DBI:CSV:f_dir=data;f_ext=.csv
+
+In the above example and when C<f_dir> contains both F<table.csv> and
+F<table>, DBD::File will open F<table.csv> and the table will be
+named "table". If F<table.csv> does not exist but F<table> does
+that file is opened and the table is also called "table".
+
+If C<f_ext> is not specified and F<table.csv> exists it will be opened
+and the table will be called "table.csv" which is probably not what
+you want.
+
+NOTE: even though extensions are case-insensitive, table names are
+not.
+
+ DBI:CSV:f_dir=data;f_ext=.csv/r
+
+The C<r> flag means the file extension is required and any filename
+that does not match the extension is ignored.
+
+Usually you set it on the dbh but it may be overridden per table
+(see L<f_meta>).
+
+=head4 f_schema
+
+This will set the schema name and defaults to the owner of the
+directory in which the table file resides. You can set C<f_schema> to
+C<undef>.
+
+ my $dbh = DBI->connect ("dbi:CSV:", "", "", {
+ f_schema => undef,
+ f_dir => "data",
+ f_ext => ".csv/r",
+ }) or die $DBI::errstr;
+
+By setting the schema you affect the results from the tables call:
+
+ my @tables = $dbh->tables ();
+
+ # no f_schema
+ "merijn".foo
+ "merijn".bar
+
+ # f_schema => "dbi"
+ "dbi".foo
+ "dbi".bar
+
+ # f_schema => undef
+ foo
+ bar
+
+Defining C<f_schema> to the empty string is equal to setting it to C<undef>
+so the DSN can be C<"dbi:CSV:f_schema=;f_dir=.">.
+
+=head4 f_lock
+
+The C<f_lock> attribute is used to set the locking mode on the opened
+table files. Note that not all platforms support locking. By default,
+tables are opened with a shared lock for reading, and with an
+exclusive lock for writing. The supported modes are:
+
+ 0: No locking at all.
+
+ 1: Shared locks will be used.
+
+ 2: Exclusive locks will be used.
+
+But see L<KNOWN BUGS|/"KNOWN BUGS AND LIMITATIONS"> below.
+
+=head4 f_lockfile
+
+If you wish to use a lockfile extension other than C<.lck>, simply specify
+the C<f_lockfile> attribute:
+
+ $dbh = DBI->connect ("dbi:DBM:f_lockfile=.foo");
+ $dbh->{f_lockfile} = ".foo";
+ $dbh->{f_meta}{qux}{f_lockfile} = ".foo";
+
+If you wish to disable locking, set the C<f_lockfile> to C<0>.
+
+ $dbh = DBI->connect ("dbi:DBM:f_lockfile=0");
+ $dbh->{f_lockfile} = 0;
+ $dbh->{f_meta}{qux}{f_lockfile} = 0;
+
+=head4 f_encoding
+
+With this attribute, you can set the encoding in which the file is opened.
+This is implemented using C<< binmode $fh, ":encoding(<f_encoding>)" >>.
+
+=head4 f_meta
+
+Private data area which contains information about the tables this
+module handles. Table meta data might not be available until the
+table has been accessed for the first time e.g., by issuing a select
+on it however it is possible to pre-initialize attributes for each table
+you use.
+
+DBD::File recognizes the (public) attributes C<f_ext>, C<f_dir>,
+C<f_file>, C<f_encoding>, C<f_lock>, C<f_lockfile>, C<f_schema>,
+C<col_names>, C<table_name> and C<sql_identifier_case>. Be very careful
+when modifying attributes you do not know, the consequence might be a
+destroyed or corrupted table.
+
+C<f_file> is an attribute applicable to table meta data only and you
+will not find a corresponding attribute in the dbh. Whilst it may be
+reasonable to have several tables with the same column names, it is
+not for the same file name. If you need access to the same file using
+different table names, use C<SQL::Statement> as the SQL engine and the
+C<AS> keyword:
+
+ SELECT * FROM tbl AS t1, tbl AS t2 WHERE t1.id = t2.id
+
+C<f_file> can be an absolute path name or a relative path name but if
+it is relative, it is interpreted as being relative to the C<f_dir>
+attribute of the table meta data. When C<f_file> is set DBD::File will
+use C<f_file> as specified and will not attempt to work out an
+alternative for C<f_file> using the C<table name> and C<f_ext>
+attribute.
+
+While C<f_meta> is a private and readonly attribute (which means, you
+cannot modify it's values), derived drivers might provide restricted
+write access through another attribute. Well known accessors are
+C<csv_tables> for L<DBD::CSV>, C<ad_tables> for L<DBD::AnyData> and
+C<dbm_tables> for L<DBD::DBM>.
+
+=head3 Internally private attributes to deal with SQL backends:
+
+Do not modify any of these private attributes unless you understand
+the implications of doing so. The behavior of DBD::File and derived
+DBDs might be unpredictable when one or more of those attributes are
+modified.
+
+=head4 sql_nano_version
+
+Contains the version of loaded DBI::SQL::Nano.
+
+=head4 sql_statement_version
+
+Contains the version of loaded SQL::Statement.
+
+=head4 sql_handler
+
+Contains either the text 'SQL::Statement' or 'DBI::SQL::Nano'.
+
+=head4 sql_ram_tables
+
+Contains optionally temporary tables.
+
+=head4 sql_flags
+
+Contains optional flags to instantiate the SQL::Parser parsing engine
+when SQL::Statement is used as SQL engine. See L<SQL::Parser> for valid
+flags.
+
+=head2 Driver private methods
+
+=head3 Default DBI methods
+
+=head4 data_sources
+
+The C<data_sources> method returns a list of subdirectories of the current
+directory in the form "dbi:CSV:f_dir=$dirname".
+
+If you want to read the subdirectories of another directory, use
+
+ my ($drh) = DBI->install_driver ("CSV");
+ my (@list) = $drh->data_sources (f_dir => "/usr/local/csv_data");
+
+=head4 list_tables
+
+This method returns a list of file names inside $dbh->{f_dir}.
+Example:
+
+ my ($dbh) = DBI->connect ("dbi:CSV:f_dir=/usr/local/csv_data");
+ my (@list) = $dbh->func ("list_tables");
+
+Note that the list includes all files contained in the directory, even
+those that have non-valid table names, from the view of SQL.
+
+=head3 Additional methods
+
+The following methods are only available via their documented name when
+DBD::File is used directly. Because this is only reasonable for testing
+purposes, the real names must be used instead. Those names can be computed
+by replacing the C<f_> in the method name with the driver prefix.
+
+=head4 f_versions
+
+Signature:
+
+ sub f_versions (;$)
+ {
+ my ($table_name) = @_;
+ $table_name ||= ".";
+ ...
+ }
+
+Returns the versions of the driver, including the DBI version, the Perl
+version, DBI::PurePerl version (if DBI::PurePerl is active) and the version
+of the SQL engine in use.
+
+ my $dbh = DBI->connect ("dbi:File:");
+ my $f_versions = $dbh->f_versions ();
+ print "$f_versions\n";
+ __END__
+ # DBD::File 0.39 using SQL::Statement 1.28
+ # DBI 1.612
+ # OS netbsd (5.99.24)
+ # Perl 5.010001 (x86_64-netbsd-thread-multi)
+
+Called in list context, f_versions will return an array containing each
+line as single entry.
+
+Some drivers might use the optional (table name) argument and modify
+version information related to the table (e.g. DBD::DBM provides storage
+backend information for the requested table, when it has a table name).
+
+=head4 f_get_meta
+
+Signature:
+
+ sub f_get_meta ($$)
+ {
+ my ($table_name, $attrib) = @_;
+ ...
+ }
+
+Returns the value of a meta attribute set for a specific table, if any.
+See L<f_meta> for the possible attributes.
+
+A table name of C<"."> (single dot) is interpreted as the default table.
+This will retrieve the appropriate attribute globally from the dbh.
+This has the same restrictions as C<< $dbh->{$attrib} >>.
+
+=head4 f_set_meta
+
+Signature:
+
+ sub f_set_meta ($$$)
+ {
+ my ($table_name, $attrib, $value) = @_;
+ ...
+ }
+
+Sets the value of a meta attribute set for a specific table.
+See L<f_meta> for the possible attributes.
+
+A table name of C<"."> (single dot) is interpreted as the default table
+which will set the specified attribute globally for the dbh.
+This has the same restrictions as C<< $dbh->{$attrib} = $value >>.
+
+=head4 f_clear_meta
+
+Signature:
+
+ sub f_clear_meta ($)
+ {
+ my ($table_name) = @_;
+ ...
+ }
+
+Clears the table specific meta information in the private storage of the
+dbh.
+
+=head1 SQL ENGINES
+
+DBD::File currently supports two SQL engines: L<SQL::Statement|SQL::Statement>
+and L<DBI::SQL::Nano::Statement_|DBI::SQL::Nano>. DBI::SQL::Nano supports a
+I<very> limited subset of SQL statements, but it might be faster for some
+very simple tasks. SQL::Statement in contrast supports a much larger subset
+of ANSI SQL.
+
+To use SQL::Statement, you need at least version 1.28 of
+SQL::Statement and the environment variable C<DBI_SQL_NANO> must not
+be set to a true value.
+
+=head1 KNOWN BUGS AND LIMITATIONS
+
+=over 4
+
+=item *
+
+This module uses flock () internally but flock is not available on all
+platforms. On MacOS and Windows 95 there is no locking at all (perhaps
+not so important on MacOS and Windows 95, as there is only a single
+user).
+
+=item *
+
+The module stores details about the handled tables in a private area
+of the driver handle (C<$drh>). This data area is not shared between
+different driver instances, so several C<< DBI->connect () >> calls will
+cause different table instances and private data areas.
+
+This data area is filled for the first time when a table is accessed,
+either via an SQL statement or via C<table_info> and is not
+destroyed until the table is dropped or the driver handle is released.
+Manual destruction is possible via L<f_clear_meta>.
+
+The following attributes are preserved in the data area and will
+evaluated instead of driver globals:
+
+=over 8
+
+=item f_ext
+
+=item f_dir
+
+=item f_lock
+
+=item f_lockfile
+
+=item f_encoding
+
+=item f_schema
+
+=item col_names
+
+=item sql_identifier_case
+
+=back
+
+The following attributes are preserved in the data area only and
+cannot be set globally.
+
+=over 8
+
+=item f_file
+
+=back
+
+The following attributes are preserved in the data area only and are
+computed when initializing the data area:
+
+=over 8
+
+=item f_fqfn
+
+=item f_fqbn
+
+=item f_fqln
+
+=item table_name
+
+=back
+
+For DBD::CSV tables this means, once opened "foo.csv" as table named "foo",
+another table named "foo" accessing the file "foo.txt" cannot be opened.
+Accessing "foo" will always access the file "foo.csv" in memorized
+C<f_dir>, locking C<f_lockfile> via memorized C<f_lock>.
+
+You can use L<f_clear_meta> or the C<f_file> attribute for a specific table
+to work around this.
+
+=item *
+
+When used with SQL::Statement and temporary tables e.g.,
+
+ CREATE TEMP TABLE ...
+
+the table data processing bypasses DBD::File::Table. No file system
+calls will be made and there are no clashes with existing (file based)
+tables with the same name. Temporary tables are chosen over file
+tables, but they will not covered by C<table_info>.
+
+=back
+
+=head1 AUTHOR
+
+This module is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+The original author is Jochen Wiedmann.
+
+=head1 COPYRIGHT AND LICENSE
+
+ Copyright (C) 2009-2010 by H.Merijn Brand & Jens Rehsack
+ Copyright (C) 2004-2009 by Jeff Zucker
+ Copyright (C) 1998-2004 by Jochen Wiedmann
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=head1 SEE ALSO
+
+L<DBI|DBI>, L<DBD::DBM|DBD::DBM>, L<DBD::CSV|DBD::CSV>, L<Text::CSV|Text::CSV>,
+L<Text::CSV_XS|Text::CSV_XS>, L<SQL::Statement|SQL::Statement>, and
+L<DBI::SQL::Nano|DBI::SQL::Nano>
+
+=cut
diff --git a/lib/DBD/File/Developers.pod b/lib/DBD/File/Developers.pod
new file mode 100644
index 0000000..a9bef85
--- /dev/null
+++ b/lib/DBD/File/Developers.pod
@@ -0,0 +1,556 @@
+=head1 NAME
+
+DBD::File::Developers - Developers documentation for DBD::File
+
+=head1 SYNOPSIS
+
+ package DBD::myDriver;
+
+ use base qw(DBD::File);
+
+ sub driver
+ {
+ ...
+ my $drh = $proto->SUPER::driver($attr);
+ ...
+ return $drh->{class};
+ }
+
+ sub CLONE { ... }
+
+ package DBD::myDriver::dr;
+
+ @ISA = qw(DBD::File::dr);
+
+ sub data_sources { ... }
+ ...
+
+ package DBD::myDriver::db;
+
+ @ISA = qw(DBD::File::db);
+
+ sub init_valid_attributes { ... }
+ sub init_default_attributes { ... }
+ sub set_versions { ... }
+ sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
+ sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
+ sub get_myd_versions { ... }
+
+ package DBD::myDriver::st;
+
+ @ISA = qw(DBD::File::st);
+
+ sub FETCH { ... }
+ sub STORE { ... }
+
+ package DBD::myDriver::Statement;
+
+ @ISA = qw(DBD::File::Statement);
+
+ package DBD::myDriver::Table;
+
+ @ISA = qw(DBD::File::Table);
+
+ my %reset_on_modify = (
+ myd_abc => "myd_foo",
+ myd_mno => "myd_bar",
+ );
+ __PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+ my %compat_map = (
+ abc => 'foo_abc',
+ xyz => 'foo_xyz',
+ );
+ __PACKAGE__->register_compat_map( \%compat_map );
+
+ sub bootstrap_table_meta { ... }
+ sub init_table_meta { ... }
+ sub table_meta_attr_changed { ... }
+ sub open_file { ... }
+
+ sub fetch_row { ... }
+ sub push_row { ... }
+ sub push_names { ... }
+
+ # optimize the SQL engine by add one or more of
+ sub update_current_row { ... }
+ # or
+ sub update_specific_row { ... }
+ # or
+ sub update_one_row { ... }
+ # or
+ sub insert_new_row { ... }
+ # or
+ sub delete_current_row { ... }
+ # or
+ sub delete_one_row { ... }
+
+=head1 DESCRIPTION
+
+This document describes how DBD developers can write DBD::File based DBI
+drivers. It supplements L<DBI::DBD> and L<DBI::DBD::SqlEngine::Developers>,
+which you should read first.
+
+=head1 CLASSES
+
+Each DBI driver must provide a package global C<driver> method and three
+DBI related classes:
+
+=over 4
+
+=item DBD::File::dr
+
+Driver package, contains the methods DBI calls indirectly via DBI
+interface:
+
+ DBI->connect ('DBI:DBM:', undef, undef, {})
+
+ # invokes
+ package DBD::DBM::dr;
+ @DBD::DBM::dr::ISA = qw(DBD::File::dr);
+
+ sub connect ($$;$$$)
+ {
+ ...
+ }
+
+Similar for C<< data_sources () >> and C<< disconnect_all() >>.
+
+Pure Perl DBI drivers derived from DBD::File do not usually need to
+override any of the methods provided through the DBD::XXX::dr package
+however if you need additional initialization in the connect method
+you may need to.
+
+=item DBD::File::db
+
+Contains the methods which are called through DBI database handles
+(C<< $dbh >>). e.g.,
+
+ $sth = $dbh->prepare ("select * from foo");
+ # returns the f_encoding setting for table foo
+ $dbh->csv_get_meta ("foo", "f_encoding");
+
+DBD::File provides the typical methods required here. Developers who
+write DBI drivers based on DBD::File need to override the methods C<<
+set_versions >> and C<< init_valid_attributes >>.
+
+=item DBD::File::st
+
+Contains the methods to deal with prepared statement handles. e.g.,
+
+ $sth->execute () or die $sth->errstr;
+
+=back
+
+=head2 DBD::File
+
+This is the main package containing the routines to initialize
+DBD::File based DBI drivers. Primarily the C<< DBD::File::driver >>
+method is invoked, either directly from DBI when the driver is
+initialized or from the derived class.
+
+ package DBD::DBM;
+
+ use base qw( DBD::File );
+
+ sub driver
+ {
+ my ( $class, $attr ) = @_;
+ ...
+ my $drh = $class->SUPER::driver( $attr );
+ ...
+ return $drh;
+ }
+
+It is not necessary to implement your own driver method as long as
+additional initialization (e.g. installing more private driver
+methods) is not required. You do not need to call C<< setup_driver >>
+as DBD::File takes care of it.
+
+=head2 DBD::File::dr
+
+The driver package contains the methods DBI calls indirectly via the DBI
+interface (see L<DBI/DBI Class Methods>).
+
+DBD::File based DBI drivers usually do not need to implement anything here,
+it is enough to do the basic initialization:
+
+ package DBD:XXX::dr;
+
+ @DBD::XXX::dr::ISA = qw (DBD::File::dr);
+ $DBD::XXX::dr::imp_data_size = 0;
+ $DBD::XXX::dr::data_sources_attr = undef;
+ $DBD::XXX::ATTRIBUTION = "DBD::XXX $DBD::XXX::VERSION by Hans Mustermann";
+
+=head2 DBD::File::db
+
+This package defines the database methods, which are called via the DBI
+database handle C<< $dbh >>.
+
+Methods provided by DBD::File:
+
+=over 4
+
+=item ping
+
+Simply returns the content of the C<< Active >> attribute. Override
+when your driver needs more complicated actions here.
+
+=item prepare
+
+Prepares a new SQL statement to execute. Returns a statement handle,
+C<< $sth >> - instance of the DBD:XXX::st. It is neither required nor
+recommended to override this method.
+
+=item FETCH
+
+Fetches an attribute of a DBI database object. Private handle attributes
+must have a prefix (this is mandatory). If a requested attribute is
+detected as a private attribute without a valid prefix, the driver prefix
+(written as C<$drv_prefix>) is added.
+
+The driver prefix is extracted from the attribute name and verified against
+C<< $dbh->{ $drv_prefix . "valid_attrs" } >> (when it exists). If the
+requested attribute value is not listed as a valid attribute, this method
+croaks. If the attribute is valid and readonly (listed in C<< $dbh->{
+$drv_prefix . "readonly_attrs" } >> when it exists), a real copy of the
+attribute value is returned. So it's not possible to modify
+C<f_valid_attrs> from outside of DBD::File::db or a derived class.
+
+=item STORE
+
+Stores a database private attribute. Private handle attributes must have a
+prefix (this is mandatory). If a requested attribute is detected as a private
+attribute without a valid prefix, the driver prefix (written as
+C<$drv_prefix>) is added. If the database handle has an attribute
+C<${drv_prefix}_valid_attrs> - for attribute names which are not listed in
+that hash, this method croaks. If the database handle has an attribute
+C<${drv_prefix}_readonly_attrs>, only attributes which are not listed there
+can be stored (once they are initialized). Trying to overwrite such an
+immutable attribute forces this method to croak.
+
+An example of a valid attributes list can be found in
+C<< DBD::File::db::init_valid_attributes >>.
+
+=item set_versions
+
+This method sets the attribute C<f_version> with the version of DBD::File.
+
+This method is called at the begin of the C<connect ()> phase.
+
+When overriding this method, do not forget to invoke the superior one.
+
+=item init_valid_attributes
+
+This method is called after the database handle is instantiated as the
+first attribute initialization.
+
+C<< DBD::File::db::init_valid_attributes >> initializes the attributes
+C<f_valid_attrs> and C<f_readonly_attrs>.
+
+When overriding this method, do not forget to invoke the superior one,
+preferably before doing anything else. Compatibility table attribute
+access must be initialized here to allow DBD::File to instantiate the
+map tie:
+
+ # for DBD::CSV
+ $dbh->{csv_meta} = "csv_tables";
+ # for DBD::DBM
+ $dbh->{dbm_meta} = "dbm_tables";
+ # for DBD::AnyData
+ $dbh->{ad_meta} = "ad_tables";
+
+=item init_default_attributes
+
+This method is called after the database handle is instantiated to
+initialize the default attributes.
+
+C<< DBD::File::db::init_default_attributes >> initializes the attributes
+C<f_dir>, C<f_meta>, C<f_meta_map>, C<f_version>.
+
+When the derived implementor class provides the attribute to validate
+attributes (e.g. C<< $dbh->{dbm_valid_attrs} = {...}; >>) or the attribute
+containing the immutable attributes (e.g. C<< $dbh->{dbm_readonly_attrs}
+= {...}; >>), the attributes C<drv_valid_attrs>, C<drv_readonly_attrs>,
+C<drv_version> and C<drv_meta> are added (when available) to the list of
+valid and immutable attributes (where C<drv_> is interpreted as the driver
+prefix).
+
+If C<drv_meta> is set, an attribute with the name in C<drv_meta> is
+initialized providing restricted read/write access to the meta data of the
+tables using C<DBD::File::TieTables> in the first (table) level and
+C<DBD::File::TieMeta> for the meta attribute level. C<DBD::File::TieTables>
+uses C<DBD::DRV::Table::get_table_meta> to initialize the second level
+tied hash on FETCH/STORE. The C<DBD::File::TieMeta> class uses
+C<DBD::DRV::Table::get_table_meta_attr> to FETCH attribute values and
+C<DBD::DRV::Table::set_table_meta_attr> to STORE attribute values. This
+allows it to map meta attributes for compatibility reasons.
+
+=item get_single_table_meta
+
+=item get_file_meta
+
+Retrieve an attribute from a table's meta information. The method
+signature is C<< get_file_meta ($dbh, $table, $attr) >>. This method
+is called by the injected db handle method C<< ${drv_prefix}get_meta
+>>.
+
+While get_file_meta allows C<$table> or C<$attr> to be a list of tables or
+attributes to retrieve, get_single_table_meta allows only one table name
+and only one attribute name. A table name of C<'.'> (single dot) is
+interpreted as the default table and this will retrieve the appropriate
+attribute globally from the dbh. This has the same restrictions as
+C<< $dbh->{$attrib} >>.
+
+get_file_meta allows C<'+'> and C<'*'> as wildcards for table names and
+C<$table> being a regular expression matching against the table names
+(evaluated without the default table). The table name C<'*'> is
+I<all currently known tables, including the default one>. The table
+name C<'+'> is I<all table names which conform to
+ANSI file name restrictions> (/^[_A-Za-z0-9]+$/).
+
+The table meta information is retrieved using the get_table_meta and
+get_table_meta_attr methods of the table class of the implementation.
+
+=item set_single_table_meta
+
+=item set_file_meta
+
+Sets an attribute in a table's meta information. The method signature is
+C<< set_file_meta ($dbh, $table, $attr, $value) >>. This method is called
+by the injected db handle method C<< ${drv_prefix}set_meta >>.
+
+While set_file_meta allows C<$table> to be a list of tables and C<$attr>
+to be a hash of several attributes to set, set_single_table_meta allows
+only one table name and only one attribute name/value pair.
+
+The wildcard characters for the table name are the same as for
+get_file_meta.
+
+The table meta information is updated using the get_table_meta and
+set_table_meta_attr methods of the table class of the implementation.
+
+=item clear_file_meta
+
+Clears all meta information cached about a table. The method signature is
+C<< clear_file_meta ($dbh, $table) >>. This method is called
+by the injected db handle method C<< ${drv_prefix}clear_meta >>.
+
+=back
+
+=head2 DBD::File::st
+
+Contains the methods to deal with prepared statement handles:
+
+=over 4
+
+=item FETCH
+
+Fetches statement handle attributes. Supported attributes (for full overview
+see L<DBI/Statement Handle Attributes>) are C<NAME>, C<TYPE>, C<PRECISION>
+and C<NULLABLE> in case that SQL::Statement is used as SQL execution engine
+and a statement is successful prepared. When SQL::Statement has additional
+information about a table, those information are returned. Otherwise, the
+same defaults as in L<DBI::DBD::SqlEngine> are used.
+
+This method usually requires extending in a derived implementation.
+See L<DBD::CSV> or L<DBD::DBM> for some example.
+
+=back
+
+=head2 DBD::File::Statement
+
+Derives from DBI::SQL::Nano::Statement to provide following method:
+
+=over 4
+
+=item open_table
+
+Implements the open_table method required by L<SQL::Statement> and
+L<DBI::SQL::Nano>. All the work for opening the file(s) belonging to the
+table is handled and parameterized in DBD::File::Table. Unless you intend
+to add anything to the following implementation, an empty DBD::XXX::Statement
+package satisfies DBD::File.
+
+ sub open_table ($$$$$)
+ {
+ my ($self, $data, $table, $createMode, $lockMode) = @_;
+
+ my $class = ref $self;
+ $class =~ s/::Statement/::Table/;
+
+ my $flags = {
+ createMode => $createMode,
+ lockMode => $lockMode,
+ };
+ $self->{command} eq "DROP" and $flags->{dropMode} = 1;
+
+ return $class->new ($data, { table => $table }, $flags);
+ } # open_table
+
+=back
+
+=head2 DBD::File::Table
+
+Derives from DBI::SQL::Nano::Table and provides physical file access for
+the table data which are stored in the files.
+
+=over 4
+
+=item file2table
+
+This method tries to map a filename to the associated table
+name. It is called with a partially filled meta structure for the
+resulting table containing at least the following attributes:
+C<< f_ext >>, C<< f_dir >>, C<< f_lockfile >> and C<< sql_identifier_case >>.
+
+If a file/table map can be found then this method sets the C<< f_fqfn
+>>, C<< f_fqbn >>, C<< f_fqln >> and C<< table_name >> attributes in
+the meta structure. If a map cannot be found the table name will be
+undef.
+
+=item bootstrap_table_meta
+
+Initializes a table meta structure. Can be safely overridden in a
+derived class, as long as the C<< SUPER >> method is called at the end
+of the overridden method.
+
+It copies the following attributes from the database into the table meta data
+C<< f_dir >>, C<< f_ext >>, C<< f_encoding >>, C<< f_lock >>, C<< f_schema >>,
+C<< f_lockfile >> and C<< sql_identifier_case >> and makes them sticky to the
+table.
+
+This method should be called before you attempt to map between file
+name and table name to ensure the correct directory, extension etc. are
+used.
+
+=item init_table_meta
+
+Initializes more attributes of the table meta data - usually more
+expensive ones (e.g. those which require class instantiations) - when
+the file name and the table name could mapped.
+
+=item get_table_meta
+
+Returns the table meta data. If there are none for the required
+table, a new one is initialized. When it fails, nothing is
+returned. On success, the name of the table and the meta data
+structure is returned.
+
+=item get_table_meta_attr
+
+Returns a single attribute from the table meta data. If the attribute
+name appears in C<%compat_map>, the attribute name is updated from
+there.
+
+=item set_table_meta_attr
+
+Sets a single attribute in the table meta data. If the attribute
+name appears in C<%compat_map>, the attribute name is updated from
+there.
+
+=item table_meta_attr_changed
+
+Called when an attribute of the meta data is modified.
+
+If the modified attribute requires to reset a calculated attribute, the
+calculated attribute is reset (deleted from meta data structure) and
+the I<initialized> flag is removed, too. The decision is made based on
+C<%register_reset_on_modify>.
+
+=item register_reset_on_modify
+
+Allows C<set_table_meta_attr> to reset meta attributes when special
+attributes are modified. For DBD::File, modifying one of C<f_file>, C<f_dir>,
+C<f_ext> or C<f_lockfile> will reset C<f_fqfn>. DBD::DBM extends the
+list for C<dbm_type> and C<dbm_mldbm> to reset the value of C<dbm_tietype>.
+
+If your DBD has calculated values in the meta data area, then call
+C<register_reset_on_modify>:
+
+ my %reset_on_modify = ( "xxx_foo" => "xxx_bar" );
+ __PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+
+=item register_compat_map
+
+Allows C<get_table_meta_attr> and C<set_table_meta_attr> to update the
+attribute name to the current favored one:
+
+ # from DBD::DBM
+ my %compat_map = ( "dbm_ext" => "f_ext" );
+ __PACKAGE__->register_compat_map( \%compat_map );
+
+=item open_file
+
+Called to open the table's data file.
+
+Depending on the attributes set in the table's meta data, the
+following steps are performed. Unless C<< f_dontopen >> is set to a
+true value, C<< f_fqfn >> must contain the full qualified file name
+for the table to work on (file2table ensures this). The encoding in
+C<< f_encoding >> is applied if set and the file is opened. If
+C<<f_fqln >> (full qualified lock name) is set, this file is opened,
+too. Depending on the value in C<< f_lock >>, the appropriate lock is
+set on the opened data file or lock file.
+
+After this is done, a derived class might add more steps in an overridden
+C<< open_file >> method.
+
+=item new
+
+Instantiates the table. This is done in 3 steps:
+
+ 1. get the table meta data
+ 2. open the data file
+ 3. bless the table data structure using inherited constructor new
+
+It is not recommended to override the constructor of the table class.
+Find a reasonable place to add you extensions in one of the above four
+methods.
+
+=item drop
+
+Implements the abstract table method for the C<< DROP >>
+command. Discards table meta data after all files belonging to the
+table are closed and unlinked.
+
+Overriding this method might be reasonable in very rare cases.
+
+=item seek
+
+Implements the abstract table method used when accessing the table from the
+engine. C<< seek >> is called every time the engine uses dumb algorithms
+for iterating over the table content.
+
+=item truncate
+
+Implements the abstract table method used when dumb table algorithms
+for C<< UPDATE >> or C<< DELETE >> need to truncate the table storage
+after the last written row.
+
+=back
+
+You should consult the documentation of C<< SQL::Eval::Table >> (see
+L<SQL::Eval>) to get more information about the abstract methods of the
+table's base class you have to override and a description of the table
+meta information expected by the SQL engines.
+
+=head1 AUTHOR
+
+The module DBD::File is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+The original author is Jochen Wiedmann.
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=cut
diff --git a/lib/DBD/File/HowTo.pod b/lib/DBD/File/HowTo.pod
new file mode 100644
index 0000000..3d45257
--- /dev/null
+++ b/lib/DBD/File/HowTo.pod
@@ -0,0 +1,270 @@
+=head1 NAME
+
+DBD::File::HowTo - Guide to create DBD::File based driver
+
+=head1 SYNOPSIS
+
+ perldoc DBD::File::HowTo
+ perldoc DBI
+ perldoc DBI::DBD
+ perldoc DBD::File::Developers
+ perldoc DBI::DBD::SqlEngine::Developers
+ perldoc DBI::DBD::SqlEngine
+ perldoc SQL::Eval
+ perldoc DBI::DBD::SqlEngine::HowTo
+ perldoc SQL::Statement::Embed
+ perldoc DBD::File
+ perldoc DBD::File::HowTo
+ perldoc DBD::File::Developers
+
+=head1 DESCRIPTION
+
+This document provides a step-by-step guide, how to create a new
+C<DBD::File> based DBD. It expects that you carefully read the L<DBI>
+documentation and that you're familiar with L<DBI::DBD> and had read and
+understood L<DBD::ExampleP>.
+
+This document addresses experienced developers who are really sure that
+they need to invest time when writing a new DBI Driver. Writing a DBI
+Driver is neither a weekend project nor an easy job for hobby coders
+after work. Expect one or two man-month of time for the first start.
+
+Those who are still reading, should be able to sing the rules of
+L<DBI::DBD/CREATING A NEW DRIVER>.
+
+Of course, DBD::File is a DBI::DBD::SqlEngine and you surely read
+L<DBI::DBD::SqlEngine::HowTo> before continuing here.
+
+=head1 CREATING DRIVER CLASSES
+
+Do you have an entry in DBI's DBD registry? For this guide, a prefix of
+C<foo_> is assumed.
+
+=head2 Sample Skeleton
+
+ package DBD::Foo;
+
+ use strict;
+ use warnings;
+ use vars qw(@ISA $VERSION);
+ use base qw(DBD::File);
+
+ use DBI ();
+
+ $VERSION = "0.001";
+
+ package DBD::Foo::dr;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBD::File::dr);
+ $imp_data_size = 0;
+
+ package DBD::Foo::db;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBD::File::db);
+ $imp_data_size = 0;
+
+ package DBD::Foo::st;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBD::File::st);
+ $imp_data_size = 0;
+
+ package DBD::Foo::Statement;
+
+ use vars qw(@ISA);
+
+ @ISA = qw(DBD::File::Statement);
+
+ package DBD::Foo::Table;
+
+ use vars qw(@ISA);
+
+ @ISA = qw(DBD::File::Table);
+
+ 1;
+
+Tiny, eh? And all you have now is a DBD named foo which will is able to
+deal with temporary tables, as long as you use L<SQL::Statement>. In
+L<DBI::SQL::Nano> environments, this DBD can do nothing.
+
+=head2 Start over
+
+Based on L<DBI::DBD::SqlEngine::HowTo>, we're now having a driver which
+could do basic things. Of course, it should now derive from DBD::File
+instead of DBI::DBD::SqlEngine, shouldn't it?
+
+DBD::File extends DBI::DBD::SqlEngine to deal with any kind of files.
+In principle, the only extensions required are to the table class:
+
+ package DBD::Foo::Table;
+
+ sub bootstrap_table_meta
+ {
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ # initialize all $meta attributes which might be relevant for
+ # file2table
+
+ return $self->SUPER::bootstrap_table_meta($dbh, $meta, $table);
+ }
+
+ sub init_table_meta
+ {
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ # called after $meta contains the results from file2table
+ # initialize all missing $meta attributes
+
+ $self->SUPER::init_table_meta( $dbh, $meta, $table );
+ }
+
+In case C<DBD::File::Table::open_file> doesn't open the files as the driver
+needs that, override it!
+
+ sub open_file
+ {
+ my ( $self, $meta, $attrs, $flags ) = @_;
+ # ensure that $meta->{f_dontopen} is set
+ $self->SUPER::open_file( $meta, $attrs, $flags );
+ # now do what ever needs to be done
+ }
+
+Combined with the methods implemented using the L<SQL::Statement::Embed>
+guide, the table is full working and you could try a start over.
+
+=head2 User comfort
+
+C<DBD::File> since C<0.39> consolidates all persistent meta data of a table
+into a single structure stored in C<< $dbh->{f_meta} >>. While DBD::File
+provides only readonly access to this structure, modifications are still
+allowed.
+
+Primarily DBD::File provides access via setters C<get_file_meta>,
+C<set_file_meta> and C<clear_file_meta>. Those methods are easily
+accessible by the users via the C<< $dbh->func () >> interface provided
+by DBI. Well, many users don't feel comfortize when calling
+
+ # don't require extension for tables cars
+ $dbh->func ("cars", "f_ext", ".csv", "set_file_meta");
+
+DBD::File will inject a method into your driver to increase the user
+comfort to allow:
+
+ # don't require extension for tables cars
+ $dbh->foo_set_meta ("cars", "f_ext", ".csv");
+
+Better, but here and there users likes to do:
+
+ # don't require extension for tables cars
+ $dbh->{foo_tables}->{cars}->{f_ext} = ".csv";
+
+This interface is provided when derived DBD's define following in
+C<init_valid_attributes> (please compare carefully with the example in
+DBI::DBD::SqlEngine::HowTo):
+
+ sub init_valid_attributes
+ {
+ my $dbh = $_[0];
+
+ $dbh->SUPER::init_valid_attributes ();
+
+ $dbh->{foo_valid_attrs} = {
+ foo_version => 1, # contains version of this driver
+ foo_valid_attrs => 1, # contains the valid attributes of foo drivers
+ foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
+ foo_bar => 1, # contains the bar attribute
+ foo_baz => 1, # contains the baz attribute
+ foo_manager => 1, # contains the manager of the driver instance
+ foo_manager_type => 1, # contains the manager class of the driver instance
+ foo_meta => 1, # contains the public interface to modify table meta attributes
+ };
+ $dbh->{foo_readonly_attrs} = {
+ foo_version => 1, # ensure no-one modifies the driver version
+ foo_valid_attrs => 1, # do not permit to add more valid attributes ...
+ foo_readonly_attrs => 1, # ... or make the immutable mutable
+ foo_manager => 1, # manager is set internally only
+ foo_meta => 1, # ensure public interface to modify table meta attributes are immutable
+ };
+
+ $dbh->{foo_meta} = "foo_tables";
+
+ return $dbh;
+ }
+
+This provides a tied hash in C<< $dbh->{foo_tables} >> and a tied hash for
+each table's meta data in C<< $dbh->{foo_tables}->{$table_name} >>.
+Modifications on the table meta attributes are done using the table
+methods:
+
+ sub get_table_meta_attr { ... }
+ sub set_table_meta_attr { ... }
+
+Both methods can adjust the attribute name for compatibility reasons, e.g.
+when former versions of the DBD allowed different names to be used for the
+same flag:
+
+ my %compat_map = (
+ abc => 'foo_abc',
+ xyz => 'foo_xyz',
+ );
+ __PACKAGE__->register_compat_map( \%compat_map );
+
+If any user modification on a meta attribute needs reinitialization of
+the meta structure (in case of C<DBD::File> these are the attributes
+C<f_file>, C<f_dir>, C<f_ext> and C<f_lockfile>), inform DBD::File by
+doing
+
+ my %reset_on_modify = (
+ foo_xyz => "foo_bar",
+ foo_abc => "foo_bar",
+ );
+ __PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+
+The next access to the table meta data will force DBD::File to re-do the
+entire meta initialization process.
+
+Any further action which needs to be taken can handled in
+C<table_meta_attr_changed>:
+
+ sub table_meta_attr_changed
+ {
+ my ($class, $meta, $attrib, $value) = @_;
+ ...
+ $class->SUPER::table_meta_attr_changed ($meta, $attrib, $value);
+ }
+
+This is done before the new value is set in C<$meta>, so the attribute
+changed handler can act depending on the old value.
+
+=head2 Testing
+
+Now you should have your own DBD::File based driver. Was easy, wasn't it?
+But does it work well? Prove it by writing tests and remember to use
+dbd_edit_mm_attribs from L<DBI::DBD> to ensure testing even rare cases.
+
+=head1 AUTHOR
+
+This guide is written by Jens Rehsack. DBD::File is written by Jochen
+Wiedmann and Jeff Zucker.
+
+The module DBD::File is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=cut
diff --git a/lib/DBD/File/Roadmap.pod b/lib/DBD/File/Roadmap.pod
new file mode 100644
index 0000000..804d759
--- /dev/null
+++ b/lib/DBD/File/Roadmap.pod
@@ -0,0 +1,176 @@
+=head1 NAME
+
+DBD::File::Roadmap - Planned Enhancements for DBD::File and pure Perl DBD's
+
+Jens Rehsack - May 2010
+
+=head1 SYNOPSIS
+
+This document gives a high level overview of the future of the DBD::File DBI
+driver and groundwork for pure Perl DBI drivers.
+
+The planned enhancements cover features, testing, performance, reliability,
+extensibility and more.
+
+=head1 CHANGES AND ENHANCEMENTS
+
+=head2 Features
+
+There are some features missing we would like to add, but there is
+no time plan:
+
+=over 4
+
+=item LOCK TABLE
+
+The newly implemented internal common table meta storage area would allow
+us to implement LOCK TABLE support based on file system C<flock ()>
+support.
+
+=item Transaction support
+
+While DBD::AnyData recommends explicitly committing by importing and
+exporting tables, DBD::File might be enhanced in a future version to allow
+transparent transactions using the temporary tables of SQL::Statement as
+shadow (dirty) tables.
+
+Transaction support will heavily rely on lock table support.
+
+=item Data Dictionary Persistence
+
+SQL::Statement provides dictionary information when a "CREATE TABLE ..."
+statement is executed. This dictionary is preserved for some statement
+handle attribute fetches (as C<NULLABLE> or C<PRECISION>).
+
+It is planned to extend DBD::File to support data dictionaries to work
+on the tables in it. It is not planned to support one table in different
+dictionaries, but you can have several dictionaries in one directory.
+
+=item SQL Engine selecting on connect
+
+Currently the SQL engine selected is chosen during the loading of the module
+L<DBI::SQL::Nano>. Ideally end users should be able to select the engine
+used in C<< DBI->connect () >> with a special DBD::File attribute.
+
+=back
+
+Other points of view to the planned features (and more features for the
+SQL::Statement engine) are shown in L<SQL::Statement::Roadmap>.
+
+=head2 Testing
+
+DBD::File and the dependent DBD::DBM requires a lot more automated tests
+covering API stability and compatibility with optional modules
+like SQL::Statement.
+
+=head2 Performance
+
+Several arguments for support of features like indexes on columns
+and cursors are made for DBD::CSV (which is a DBD::File based driver,
+too). Similar arguments could be made for DBD::DBM, DBD::AnyData,
+DBD::RAM or DBD::PO etc.
+
+To improve the performance of the underlying SQL engines, a clean
+reimplementation seems to be required. Currently both engines are
+prematurely optimized and therefore it is not trivial to provide
+further optimization without the risk of breaking existing features.
+
+Join the DBI developers IRC channel at L<irc://irc.perl.org/dbi> to
+participate or post to the DBI Developers Mailing List.
+
+=head2 Reliability
+
+DBD::File currently lacks the following points:
+
+=over 4
+
+=item duplicate table names
+
+It is currently possible to access a table quoted with a relative path
+(a) and additionally using an absolute path (b). If (a) and (b) are
+the same file that is not recognized (except for
+flock protection handled by the Operating System) and two independent
+tables are handled.
+
+=item invalid table names
+
+The current implementation does not prevent someone choosing a
+directory name as a physical file name for the table to open.
+
+=back
+
+=head2 Extensibility
+
+I (Jens Rehsack) have some (partially for example only) DBD's in mind:
+
+=over 4
+
+=item DBD::Sys
+
+Derive DBD::Sys from a common code base shared with DBD::File which handles
+all the emulation DBI needs (as getinfo, SQL engine handling, ...)
+
+=item DBD::Dir
+
+Provide a DBD::File derived to work with fixed table definitions through the
+file system to demonstrate how DBI / Pure Perl DBDs could handle databases
+with hierarchical structures.
+
+=item DBD::Join
+
+Provide a DBI driver which is able to manage multiple connections to other
+Databases (as DBD::Multiplex), but allow them to point to different data
+sources and allow joins between the tables of them:
+
+ # Example
+ # Let table 'lsof' being a table in DBD::Sys giving a list of open files using lsof utility
+ # Let table 'dir' being a atable from DBD::Dir
+ $sth = $dbh->prepare( "select * from dir,lsof where path='/documents' and dir.entry = lsof.filename" )
+ $sth->execute(); # gives all open files in '/documents'
+ ...
+
+ # Let table 'filesys' a DBD::Sys table of known file systems on current host
+ # Let table 'applications' a table of your Configuration Management Database
+ # where current applications (relocatable, with mountpoints for filesystems)
+ # are stored
+ $sth = dbh->prepare( "select * from applications,filesys where " .
+ "application.mountpoint = filesys.mountpoint and ".
+ "filesys.mounted is true" );
+ $sth->execute(); # gives all currently mounted applications on this host
+
+=back
+
+=head1 PRIORITIES
+
+Our priorities are focussed on current issues. Initially many new test
+cases for DBD::File and DBD::DBM should be added to the DBI test
+suite. After that some additional documentation on how to use the
+DBD::File API will be provided.
+
+Any additional priorities will come later and can be modified by (paying)
+users.
+
+=head1 RESOURCES AND CONTRIBUTIONS
+
+See L<http://dbi.perl.org/contributing> for I<how you can help>.
+
+If your company has benefited from DBI, please consider if
+it could make a donation to The Perl Foundation "DBI Development"
+fund at L<http://dbi.perl.org/donate> to secure future development.
+
+Alternatively, if your company would benefit from a specific new
+DBI feature, please consider sponsoring it's development through
+the options listed in the section "Commercial Support from the Author"
+on L<http://dbi.perl.org/support/>.
+
+Using such targeted financing allows you to contribute to DBI
+development and rapidly get something specific and directly valuable
+to you in return.
+
+My company also offers annual support contracts for the DBI, which
+provide another way to support the DBI and get something specific
+in return. Contact me for details.
+
+Thank you.
+
+=cut
diff --git a/lib/DBD/Gofer.pm b/lib/DBD/Gofer.pm
new file mode 100644
index 0000000..afd8201
--- /dev/null
+++ b/lib/DBD/Gofer.pm
@@ -0,0 +1,1292 @@
+{
+ package DBD::Gofer;
+
+ use strict;
+
+ require DBI;
+ require DBI::Gofer::Request;
+ require DBI::Gofer::Response;
+ require Carp;
+
+ our $VERSION = sprintf("0.%06d", q$Revision: 15326 $ =~ /(\d+)/o);
+
+# $Id: Gofer.pm 15326 2012-06-06 16:32:38Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+
+
+ # attributes we'll allow local STORE
+ our %xxh_local_store_attrib = map { $_=>1 } qw(
+ Active
+ CachedKids
+ Callbacks
+ DbTypeSubclass
+ ErrCount Executed
+ FetchHashKeyName
+ HandleError HandleSetErr
+ InactiveDestroy
+ AutoInactiveDestroy
+ PrintError PrintWarn
+ Profile
+ RaiseError
+ RootClass
+ ShowErrorStatement
+ Taint TaintIn TaintOut
+ TraceLevel
+ Warn
+ dbi_quote_identifier_cache
+ dbi_connect_closure
+ dbi_go_execute_unique
+ );
+ our %xxh_local_store_attrib_if_same_value = map { $_=>1 } qw(
+ Username
+ dbi_connect_method
+ );
+
+ our $drh = undef; # holds driver handle once initialized
+ our $methods_already_installed;
+
+ sub driver{
+ return $drh if $drh;
+
+ DBI->setup_driver('DBD::Gofer');
+
+ unless ($methods_already_installed++) {
+ my $opts = { O=> 0x0004 }; # IMA_KEEP_ERR
+ DBD::Gofer::db->install_method('go_dbh_method', $opts);
+ DBD::Gofer::st->install_method('go_sth_method', $opts);
+ DBD::Gofer::st->install_method('go_clone_sth', $opts);
+ DBD::Gofer::db->install_method('go_cache', $opts);
+ DBD::Gofer::st->install_method('go_cache', $opts);
+ }
+
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'Gofer',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD Gofer by Tim Bunce',
+ });
+
+ $drh;
+ }
+
+
+ sub CLONE {
+ undef $drh;
+ }
+
+
+ sub go_cache {
+ my $h = shift;
+ $h->{go_cache} = shift if @_;
+ # return handle's override go_cache, if it has one
+ return $h->{go_cache} if defined $h->{go_cache};
+ # or else the transports default go_cache
+ return $h->{go_transport}->{go_cache};
+ }
+
+
+ sub set_err_from_response { # set error/warn/info and propagate warnings
+ my $h = shift;
+ my $response = shift;
+ if (my $warnings = $response->warnings) {
+ warn $_ for @$warnings;
+ }
+ my ($err, $errstr, $state) = $response->err_errstr_state;
+ # Only set_err() if there's an error else leave the current values
+ # (The current values will normally be set undef by the DBI dispatcher
+ # except for methods marked KEEPERR such as ping.)
+ $h->set_err($err, $errstr, $state) if defined $err;
+ return undef;
+ }
+
+
+ sub install_methods_proxy {
+ my ($installed_methods) = @_;
+ while ( my ($full_method, $attr) = each %$installed_methods ) {
+ # need to install both a DBI dispatch stub and a proxy stub
+ # (the dispatch stub may be already here due to local driver use)
+
+ DBI->_install_method($full_method, "", $attr||{})
+ unless defined &{$full_method};
+
+ # now install proxy stubs on the driver side
+ $full_method =~ m/^DBI::(\w\w)::(\w+)$/
+ or die "Invalid method name '$full_method' for install_method";
+ my ($type, $method) = ($1, $2);
+ my $driver_method = "DBD::Gofer::${type}::${method}";
+ next if defined &{$driver_method};
+ my $sub;
+ if ($type eq 'db') {
+ $sub = sub { return shift->go_dbh_method(undef, $method, @_) };
+ }
+ else {
+ $sub = sub { shift->set_err($DBI::stderr, "Can't call \$${type}h->$method when using DBD::Gofer"); return; };
+ }
+ no strict 'refs';
+ *$driver_method = $sub;
+ }
+ }
+}
+
+
+{ package DBD::Gofer::dr; # ====== DRIVER ======
+
+ $imp_data_size = 0;
+ use strict;
+
+ sub connect_cached {
+ my ($drh, $dsn, $user, $auth, $attr)= @_;
+ $attr ||= {};
+ return $drh->SUPER::connect_cached($dsn, $user, $auth, {
+ (%$attr),
+ go_connect_method => $attr->{go_connect_method} || 'connect_cached',
+ });
+ }
+
+
+ sub connect {
+ my($drh, $dsn, $user, $auth, $attr)= @_;
+ my $orig_dsn = $dsn;
+
+ # first remove dsn= and everything after it
+ my $remote_dsn = ($dsn =~ s/;?\bdsn=(.*)$// && $1)
+ or return $drh->set_err($DBI::stderr, "No dsn= argument in '$orig_dsn'");
+
+ if ($attr->{go_bypass}) { # don't use DBD::Gofer for this connection
+ # useful for testing with DBI_AUTOPROXY, e.g., t/03handle.t
+ return DBI->connect($remote_dsn, $user, $auth, $attr);
+ }
+
+ my %go_attr;
+ # extract any go_ attributes from the connect() attr arg
+ for my $k (grep { /^go_/ } keys %$attr) {
+ $go_attr{$k} = delete $attr->{$k};
+ }
+ # then override those with any attributes embedded in our dsn (not remote_dsn)
+ for my $kv (grep /=/, split /;/, $dsn, -1) {
+ my ($k, $v) = split /=/, $kv, 2;
+ $go_attr{ "go_$k" } = $v;
+ }
+
+ if (not ref $go_attr{go_policy}) { # if not a policy object already
+ my $policy_class = $go_attr{go_policy} || 'classic';
+ $policy_class = "DBD::Gofer::Policy::$policy_class"
+ unless $policy_class =~ /::/;
+ _load_class($policy_class)
+ or return $drh->set_err($DBI::stderr, "Can't load $policy_class: $@");
+ # replace policy name in %go_attr with policy object
+ $go_attr{go_policy} = eval { $policy_class->new(\%go_attr) }
+ or return $drh->set_err($DBI::stderr, "Can't instanciate $policy_class: $@");
+ }
+ # policy object is left in $go_attr{go_policy} so transport can see it
+ my $go_policy = $go_attr{go_policy};
+
+ if ($go_attr{go_cache} and not ref $go_attr{go_cache}) { # if not a cache object already
+ my $cache_class = $go_attr{go_cache};
+ $cache_class = "DBI::Util::CacheMemory" if $cache_class eq '1';
+ _load_class($cache_class)
+ or return $drh->set_err($DBI::stderr, "Can't load $cache_class $@");
+ $go_attr{go_cache} = eval { $cache_class->new() }
+ or $drh->set_err(0, "Can't instanciate $cache_class: $@"); # warning
+ }
+
+ # delete any other attributes that don't apply to transport
+ my $go_connect_method = delete $go_attr{go_connect_method};
+
+ my $transport_class = delete $go_attr{go_transport}
+ or return $drh->set_err($DBI::stderr, "No transport= argument in '$orig_dsn'");
+ $transport_class = "DBD::Gofer::Transport::$transport_class"
+ unless $transport_class =~ /::/;
+ _load_class($transport_class)
+ or return $drh->set_err($DBI::stderr, "Can't load $transport_class: $@");
+ my $go_transport = eval { $transport_class->new(\%go_attr) }
+ or return $drh->set_err($DBI::stderr, "Can't instanciate $transport_class: $@");
+
+ my $request_class = "DBI::Gofer::Request";
+ my $go_request = eval {
+ my $go_attr = { %$attr };
+ # XXX user/pass of fwd server vs db server ? also impact of autoproxy
+ if ($user) {
+ $go_attr->{Username} = $user;
+ $go_attr->{Password} = $auth;
+ }
+ # delete any attributes we can't serialize (or don't want to)
+ delete @{$go_attr}{qw(Profile HandleError HandleSetErr Callbacks)};
+ # delete any attributes that should only apply to the client-side
+ delete @{$go_attr}{qw(RootClass DbTypeSubclass)};
+
+ $go_connect_method ||= $go_policy->connect_method($remote_dsn, $go_attr) || 'connect';
+ $request_class->new({
+ dbh_connect_call => [ $go_connect_method, $remote_dsn, $user, $auth, $go_attr ],
+ })
+ } or return $drh->set_err($DBI::stderr, "Can't instanciate $request_class: $@");
+
+ my ($dbh, $dbh_inner) = DBI::_new_dbh($drh, {
+ 'Name' => $dsn,
+ 'USER' => $user,
+ go_transport => $go_transport,
+ go_request => $go_request,
+ go_policy => $go_policy,
+ });
+
+ # mark as inactive temporarily for STORE. Active not set until connected() called.
+ $dbh->STORE(Active => 0);
+
+ # should we ping to check the connection
+ # and fetch dbh attributes
+ my $skip_connect_check = $go_policy->skip_connect_check($attr, $dbh);
+ if (not $skip_connect_check) {
+ if (not $dbh->go_dbh_method(undef, 'ping')) {
+ return undef if $dbh->err; # error already recorded, typically
+ return $dbh->set_err($DBI::stderr, "ping failed");
+ }
+ }
+
+ return $dbh;
+ }
+
+ sub _load_class { # return true or false+$@
+ my $class = shift;
+ (my $pm = $class) =~ s{::}{/}g;
+ $pm .= ".pm";
+ return 1 if eval { require $pm };
+ delete $INC{$pm}; # shouldn't be needed (perl bug?) and assigning undef isn't enough
+ undef; # error in $@
+ }
+
+}
+
+
+{ package DBD::Gofer::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+ use Carp qw(carp croak);
+
+ my %dbh_local_store_attrib = %DBD::Gofer::xxh_local_store_attrib;
+
+ sub connected {
+ shift->STORE(Active => 1);
+ }
+
+ sub go_dbh_method {
+ my $dbh = shift;
+ my $meta = shift;
+ # @_ now contains ($method_name, @args)
+
+ my $request = $dbh->{go_request};
+ $request->init_request([ wantarray, @_ ], $dbh);
+ ++$dbh->{go_request_count};
+
+ my $go_policy = $dbh->{go_policy};
+ my $dbh_attribute_update = $go_policy->dbh_attribute_update();
+ $request->dbh_attributes( $go_policy->dbh_attribute_list() )
+ if $dbh_attribute_update eq 'every'
+ or $dbh->{go_request_count}==1;
+
+ $request->dbh_last_insert_id_args($meta->{go_last_insert_id_args})
+ if $meta->{go_last_insert_id_args};
+
+ my $transport = $dbh->{go_transport}
+ or return $dbh->set_err($DBI::stderr, "Not connected (no transport)");
+
+ local $transport->{go_cache} = $dbh->{go_cache}
+ if defined $dbh->{go_cache};
+
+ my ($response, $retransmit_sub) = $transport->transmit_request($request);
+ $response ||= $transport->receive_response($request, $retransmit_sub);
+ $dbh->{go_response} = $response
+ or die "No response object returned by $transport";
+
+ die "response '$response' returned by $transport is not a response object"
+ unless UNIVERSAL::isa($response,"DBI::Gofer::Response");
+
+ if (my $dbh_attributes = $response->dbh_attributes) {
+
+ # XXX installed_methods piggybacks on dbh_attributes for now
+ if (my $installed_methods = delete $dbh_attributes->{dbi_installed_methods}) {
+ DBD::Gofer::install_methods_proxy($installed_methods)
+ if $dbh->{go_request_count}==1;
+ }
+
+ # XXX we don't STORE here, we just stuff the value into the attribute cache
+ $dbh->{$_} = $dbh_attributes->{$_}
+ for keys %$dbh_attributes;
+ }
+
+ my $rv = $response->rv;
+ if (my $resultset_list = $response->sth_resultsets) {
+ # dbh method call returned one or more resultsets
+ # (was probably a metadata method like table_info)
+ #
+ # setup an sth but don't execute/forward it
+ my $sth = $dbh->prepare(undef, { go_skip_prepare_check => 1 });
+ # set the sth response to our dbh response
+ (tied %$sth)->{go_response} = $response;
+ # setup the sth with the results in our response
+ $sth->more_results;
+ # and return that new sth as if it came from original request
+ $rv = [ $sth ];
+ }
+ elsif (!$rv) { # should only occur for major transport-level error
+ #carp("no rv in response { @{[ %$response ]} }");
+ $rv = [ ];
+ }
+
+ DBD::Gofer::set_err_from_response($dbh, $response);
+
+ return (wantarray) ? @$rv : $rv->[0];
+ }
+
+
+ # Methods that should be forwarded but can be cached
+ for my $method (qw(
+ tables table_info column_info primary_key_info foreign_key_info statistics_info
+ data_sources type_info_all get_info
+ parse_trace_flags parse_trace_flag
+ func
+ )) {
+ my $policy_name = "cache_$method";
+ my $super_name = "SUPER::$method";
+ my $sub = sub {
+ my $dbh = shift;
+ my $rv;
+
+ # if we know the remote side doesn't override the DBI's default method
+ # then we might as well just call the DBI's default method on the client
+ # (which may, in turn, call other methods that are forwarded, like get_info)
+ if ($dbh->{dbi_default_methods}{$method} && $dbh->{go_policy}->skip_default_methods()) {
+ $dbh->trace_msg(" !! $method: using local default as remote method is also default\n");
+ return $dbh->$super_name(@_);
+ }
+
+ my $cache;
+ my $cache_key;
+ if (my $cache_it = $dbh->{go_policy}->$policy_name(undef, $dbh, @_)) {
+ $cache = $dbh->{go_meta_cache} ||= {}; # keep separate from go_cache
+ $cache_key = sprintf "%s_wa%d(%s)", $policy_name, wantarray||0,
+ join(",\t", map { # XXX basic but sufficient for now
+ !ref($_) ? DBI::neat($_,1e6)
+ : ref($_) eq 'ARRAY' ? DBI::neat_list($_,1e6,",\001")
+ : ref($_) eq 'HASH' ? do { my @k = sort keys %$_; DBI::neat_list([@k,@{$_}{@k}],1e6,",\002") }
+ : do { warn "unhandled argument type ($_)"; $_ }
+ } @_);
+ if ($rv = $cache->{$cache_key}) {
+ $dbh->trace_msg("$method(@_) returning previously cached value ($cache_key)\n",4);
+ my @cache_rv = @$rv;
+ # if it's an sth we have to clone it
+ $cache_rv[0] = $cache_rv[0]->go_clone_sth if UNIVERSAL::isa($cache_rv[0],'DBI::st');
+ return (wantarray) ? @cache_rv : $cache_rv[0];
+ }
+ }
+
+ $rv = [ (wantarray)
+ ? ($dbh->go_dbh_method(undef, $method, @_))
+ : scalar $dbh->go_dbh_method(undef, $method, @_)
+ ];
+
+ if ($cache) {
+ $dbh->trace_msg("$method(@_) caching return value ($cache_key)\n",4);
+ my @cache_rv = @$rv;
+ # if it's an sth we have to clone it
+ #$cache_rv[0] = $cache_rv[0]->go_clone_sth
+ # if UNIVERSAL::isa($cache_rv[0],'DBI::st');
+ $cache->{$cache_key} = \@cache_rv
+ unless UNIVERSAL::isa($cache_rv[0],'DBI::st'); # XXX cloning sth not yet done
+ }
+
+ return (wantarray) ? @$rv : $rv->[0];
+ };
+ no strict 'refs';
+ *$method = $sub;
+ }
+
+
+ # Methods that can use the DBI defaults for some situations/drivers
+ for my $method (qw(
+ quote quote_identifier
+ )) { # XXX keep DBD::Gofer::Policy::Base in sync
+ my $policy_name = "locally_$method";
+ my $super_name = "SUPER::$method";
+ my $sub = sub {
+ my $dbh = shift;
+
+ # if we know the remote side doesn't override the DBI's default method
+ # then we might as well just call the DBI's default method on the client
+ # (which may, in turn, call other methods that are forwarded, like get_info)
+ if ($dbh->{dbi_default_methods}{$method} && $dbh->{go_policy}->skip_default_methods()) {
+ $dbh->trace_msg(" !! $method: using local default as remote method is also default\n");
+ return $dbh->$super_name(@_);
+ }
+
+ # false: use remote gofer
+ # 1: use local DBI default method
+ # code ref: use the code ref
+ my $locally = $dbh->{go_policy}->$policy_name($dbh, @_);
+ if ($locally) {
+ return $locally->($dbh, @_) if ref $locally eq 'CODE';
+ return $dbh->$super_name(@_);
+ }
+ return $dbh->go_dbh_method(undef, $method, @_); # propagate context
+ };
+ no strict 'refs';
+ *$method = $sub;
+ }
+
+
+ # Methods that should always fail
+ for my $method (qw(
+ begin_work commit rollback
+ )) {
+ no strict 'refs';
+ *$method = sub { return shift->set_err($DBI::stderr, "$method not available with DBD::Gofer") }
+ }
+
+
+ sub do {
+ my ($dbh, $sql, $attr, @args) = @_;
+ delete $dbh->{Statement}; # avoid "Modification of non-creatable hash value attempted"
+ $dbh->{Statement} = $sql; # for profiling and ShowErrorStatement
+ my $meta = { go_last_insert_id_args => $attr->{go_last_insert_id_args} };
+ return $dbh->go_dbh_method($meta, 'do', $sql, $attr, @args);
+ }
+
+ sub ping {
+ my $dbh = shift;
+ return $dbh->set_err(0, "can't ping while not connected") # warning
+ unless $dbh->SUPER::FETCH('Active');
+ my $skip_ping = $dbh->{go_policy}->skip_ping();
+ return ($skip_ping) ? 1 : $dbh->go_dbh_method(undef, 'ping', @_);
+ }
+
+ sub last_insert_id {
+ my $dbh = shift;
+ my $response = $dbh->{go_response} or return undef;
+ return $response->last_insert_id;
+ }
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+
+ # FETCH is effectively already cached because the DBI checks the
+ # attribute cache in the handle before calling FETCH
+ # and this FETCH copies the value into the attribute cache
+
+ # forward driver-private attributes (except ours)
+ if ($attrib =~ m/^[a-z]/ && $attrib !~ /^go_/) {
+ my $value = $dbh->go_dbh_method(undef, 'FETCH', $attrib);
+ $dbh->{$attrib} = $value; # XXX forces caching by DBI
+ return $dbh->{$attrib} = $value;
+ }
+
+ # else pass up to DBI to handle
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ if ($attrib eq 'AutoCommit') {
+ croak "Can't enable transactions when using DBD::Gofer" if !$value;
+ return $dbh->SUPER::STORE($attrib => ($value) ? -901 : -900);
+ }
+ return $dbh->SUPER::STORE($attrib => $value)
+ # we handle this attribute locally
+ if $dbh_local_store_attrib{$attrib}
+ # or it's a private_ (application) attribute
+ or $attrib =~ /^private_/
+ # or not yet connected (ie being called by DBI->connect)
+ or not $dbh->FETCH('Active');
+
+ return $dbh->SUPER::STORE($attrib => $value)
+ if $DBD::Gofer::xxh_local_store_attrib_if_same_value{$attrib}
+ && do { # values are the same
+ my $crnt = $dbh->FETCH($attrib);
+ local $^W;
+ (defined($value) ^ defined($crnt))
+ ? 0 # definedness differs
+ : $value eq $crnt;
+ };
+
+ # dbh attributes are set at connect-time - see connect()
+ carp("Can't alter \$dbh->{$attrib} after handle created with DBD::Gofer") if $dbh->FETCH('Warn');
+ return $dbh->set_err($DBI::stderr, "Can't alter \$dbh->{$attrib} after handle created with DBD::Gofer");
+ }
+
+ sub disconnect {
+ my $dbh = shift;
+ $dbh->{go_transport} = undef;
+ $dbh->STORE(Active => 0);
+ }
+
+ sub prepare {
+ my ($dbh, $statement, $attr)= @_;
+
+ return $dbh->set_err($DBI::stderr, "Can't prepare when disconnected")
+ unless $dbh->FETCH('Active');
+
+ $attr = { %$attr } if $attr; # copy so we can edit
+
+ my $policy = delete($attr->{go_policy}) || $dbh->{go_policy};
+ my $lii_args = delete $attr->{go_last_insert_id_args};
+ my $go_prepare = delete($attr->{go_prepare_method})
+ || $dbh->{go_prepare_method}
+ || $policy->prepare_method($dbh, $statement, $attr)
+ || 'prepare'; # e.g. for code not using placeholders
+ my $go_cache = delete $attr->{go_cache};
+ # set to undef if there are no attributes left for the actual prepare call
+ $attr = undef if $attr and not %$attr;
+
+ my ($sth, $sth_inner) = DBI::_new_sth($dbh, {
+ Statement => $statement,
+ go_prepare_call => [ 0, $go_prepare, $statement, $attr ],
+ # go_method_calls => [], # autovivs if needed
+ go_request => $dbh->{go_request},
+ go_transport => $dbh->{go_transport},
+ go_policy => $policy,
+ go_last_insert_id_args => $lii_args,
+ go_cache => $go_cache,
+ });
+ $sth->STORE(Active => 0);
+
+ my $skip_prepare_check = $policy->skip_prepare_check($attr, $dbh, $statement, $attr, $sth);
+ if (not $skip_prepare_check) {
+ $sth->go_sth_method() or return undef;
+ }
+
+ return $sth;
+ }
+
+ sub prepare_cached {
+ my ($dbh, $sql, $attr, $if_active)= @_;
+ $attr ||= {};
+ return $dbh->SUPER::prepare_cached($sql, {
+ %$attr,
+ go_prepare_method => $attr->{go_prepare_method} || 'prepare_cached',
+ }, $if_active);
+ }
+
+ *go_cache = \&DBD::Gofer::go_cache;
+}
+
+
+{ package DBD::Gofer::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict;
+
+ my %sth_local_store_attrib = (%DBD::Gofer::xxh_local_store_attrib, NUM_OF_FIELDS => 1);
+
+ sub go_sth_method {
+ my ($sth, $meta) = @_;
+
+ if (my $ParamValues = $sth->{ParamValues}) {
+ my $ParamAttr = $sth->{ParamAttr};
+ # XXX the sort here is a hack to work around a DBD::Sybase bug
+ # but only works properly for params 1..9
+ # (reverse because of the unshift)
+ my @params = reverse sort keys %$ParamValues;
+ if (@params > 9 && ($sth->{Database}{go_dsn}||'') =~ /dbi:Sybase/) {
+ # if more than 9 then we need to do a proper numeric sort
+ # also warn to alert user of this issue
+ warn "Sybase param binding order hack in use";
+ @params = sort { $b <=> $a } @params;
+ }
+ for my $p (@params) {
+ # unshift to put binds before execute call
+ unshift @{ $sth->{go_method_calls} },
+ [ 'bind_param', $p, $ParamValues->{$p}, $ParamAttr->{$p} ];
+ }
+ }
+
+ my $dbh = $sth->{Database} or die "panic";
+ ++$dbh->{go_request_count};
+
+ my $request = $sth->{go_request};
+ $request->init_request($sth->{go_prepare_call}, $sth);
+ $request->sth_method_calls(delete $sth->{go_method_calls})
+ if $sth->{go_method_calls};
+ $request->sth_result_attr({}); # (currently) also indicates this is an sth request
+
+ $request->dbh_last_insert_id_args($meta->{go_last_insert_id_args})
+ if $meta->{go_last_insert_id_args};
+
+ my $go_policy = $sth->{go_policy};
+ my $dbh_attribute_update = $go_policy->dbh_attribute_update();
+ $request->dbh_attributes( $go_policy->dbh_attribute_list() )
+ if $dbh_attribute_update eq 'every'
+ or $dbh->{go_request_count}==1;
+
+ my $transport = $sth->{go_transport}
+ or return $sth->set_err($DBI::stderr, "Not connected (no transport)");
+
+ local $transport->{go_cache} = $sth->{go_cache}
+ if defined $sth->{go_cache};
+
+ my ($response, $retransmit_sub) = $transport->transmit_request($request);
+ $response ||= $transport->receive_response($request, $retransmit_sub);
+ $sth->{go_response} = $response
+ or die "No response object returned by $transport";
+ $dbh->{go_response} = $response; # mainly for last_insert_id
+
+ if (my $dbh_attributes = $response->dbh_attributes) {
+ # XXX we don't STORE here, we just stuff the value into the attribute cache
+ $dbh->{$_} = $dbh_attributes->{$_}
+ for keys %$dbh_attributes;
+ # record the values returned, so we know that we have fetched
+ # values are which we have fetched (see dbh->FETCH method)
+ $dbh->{go_dbh_attributes_fetched} = $dbh_attributes;
+ }
+
+ my $rv = $response->rv; # may be undef on error
+ if ($response->sth_resultsets) {
+ # setup first resultset - including sth attributes
+ $sth->more_results;
+ }
+ else {
+ $sth->STORE(Active => 0);
+ $sth->{go_rows} = $rv;
+ }
+ # set error/warn/info (after more_results as that'll clear err)
+ DBD::Gofer::set_err_from_response($sth, $response);
+
+ return $rv;
+ }
+
+
+ sub bind_param {
+ my ($sth, $param, $value, $attr) = @_;
+ $sth->{ParamValues}{$param} = $value;
+ $sth->{ParamAttr}{$param} = $attr
+ if defined $attr; # attr is sticky if not explicitly set
+ return 1;
+ }
+
+
+ sub execute {
+ my $sth = shift;
+ $sth->bind_param($_, $_[$_-1]) for (1..@_);
+ push @{ $sth->{go_method_calls} }, [ 'execute' ];
+ my $meta = { go_last_insert_id_args => $sth->{go_last_insert_id_args} };
+ return $sth->go_sth_method($meta);
+ }
+
+
+ sub more_results {
+ my $sth = shift;
+
+ $sth->finish;
+
+ my $response = $sth->{go_response} or do {
+ # e.g., we haven't sent a request yet (ie prepare then more_results)
+ $sth->trace_msg(" No response object present", 3);
+ return;
+ };
+
+ my $resultset_list = $response->sth_resultsets
+ or return $sth->set_err($DBI::stderr, "No sth_resultsets");
+
+ my $meta = shift @$resultset_list
+ or return undef; # no more result sets
+ #warn "more_results: ".Data::Dumper::Dumper($meta);
+
+ # pull out the special non-atributes first
+ my ($rowset, $err, $errstr, $state)
+ = delete @{$meta}{qw(rowset err errstr state)};
+
+ # copy meta attributes into attribute cache
+ my $NUM_OF_FIELDS = delete $meta->{NUM_OF_FIELDS};
+ $sth->STORE('NUM_OF_FIELDS', $NUM_OF_FIELDS);
+ # XXX need to use STORE for some?
+ $sth->{$_} = $meta->{$_} for keys %$meta;
+
+ if (($NUM_OF_FIELDS||0) > 0) {
+ $sth->{go_rows} = ($rowset) ? @$rowset : -1;
+ $sth->{go_current_rowset} = $rowset;
+ $sth->{go_current_rowset_err} = [ $err, $errstr, $state ]
+ if defined $err;
+ $sth->STORE(Active => 1) if $rowset;
+ }
+
+ return $sth;
+ }
+
+
+ sub go_clone_sth {
+ my ($sth1) = @_;
+ # clone an (un-fetched-from) sth - effectively undoes the initial more_results
+ # not 100% so just for use in caching returned sth e.g. table_info
+ my $sth2 = $sth1->{Database}->prepare($sth1->{Statement}, { go_skip_prepare_check => 1 });
+ $sth2->STORE($_, $sth1->{$_}) for qw(NUM_OF_FIELDS Active);
+ my $sth2_inner = tied %$sth2;
+ $sth2_inner->{$_} = $sth1->{$_} for qw(NUM_OF_PARAMS FetchHashKeyName);
+ die "not fully implemented yet";
+ return $sth2;
+ }
+
+
+ sub fetchrow_arrayref {
+ my ($sth) = @_;
+ my $resultset = $sth->{go_current_rowset} || do {
+ # should only happen if fetch called after execute failed
+ my $rowset_err = $sth->{go_current_rowset_err}
+ || [ 1, 'no result set (did execute fail)' ];
+ return $sth->set_err( @$rowset_err );
+ };
+ return $sth->_set_fbav(shift @$resultset) if @$resultset;
+ $sth->finish; # no more data so finish
+ return undef;
+ }
+ *fetch = \&fetchrow_arrayref; # alias
+
+
+ sub fetchall_arrayref {
+ my ($sth, $slice, $max_rows) = @_;
+ my $resultset = $sth->{go_current_rowset} || do {
+ # should only happen if fetch called after execute failed
+ my $rowset_err = $sth->{go_current_rowset_err}
+ || [ 1, 'no result set (did execute fail)' ];
+ return $sth->set_err( @$rowset_err );
+ };
+ my $mode = ref($slice) || 'ARRAY';
+ return $sth->SUPER::fetchall_arrayref($slice, $max_rows)
+ if ref($slice) or defined $max_rows;
+ $sth->finish; # no more data after this so finish
+ return $resultset;
+ }
+
+
+ sub rows {
+ return shift->{go_rows};
+ }
+
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+
+ return $sth->SUPER::STORE($attrib => $value)
+ if $sth_local_store_attrib{$attrib} # handle locally
+ # or it's a private_ (application) attribute
+ or $attrib =~ /^private_/;
+
+ # otherwise warn but do it anyway
+ # this will probably need refining later
+ my $msg = "Altering \$sth->{$attrib} won't affect proxied handle";
+ Carp::carp($msg) if $sth->FETCH('Warn');
+
+ # XXX could perhaps do
+ # push @{ $sth->{go_method_calls} }, [ 'STORE', $attrib, $value ]
+ # if not $sth->FETCH('Executed');
+ # but how to handle repeat executions? How to we know when an
+ # attribute is being set to affect the current resultset or the
+ # next execution?
+ # Could just always use go_method_calls I guess.
+
+ # do the store locally anyway, just in case
+ $sth->SUPER::STORE($attrib => $value);
+
+ return $sth->set_err($DBI::stderr, $msg);
+ }
+
+ # sub bind_param_array
+ # we use DBI's default, which sets $sth->{ParamArrays}{$param} = $value
+ # and calls bind_param($param, undef, $attr) if $attr.
+
+ sub execute_array {
+ my $sth = shift;
+ my $attr = shift;
+ $sth->bind_param_array($_, $_[$_-1]) for (1..@_);
+ push @{ $sth->{go_method_calls} }, [ 'execute_array', $attr ];
+ return $sth->go_sth_method($attr);
+ }
+
+ *go_cache = \&DBD::Gofer::go_cache;
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer - A stateless-proxy driver for communicating with a remote DBI
+
+=head1 SYNOPSIS
+
+ use DBI;
+
+ $original_dsn = "dbi:..."; # your original DBI Data Source Name
+
+ $dbh = DBI->connect("dbi:Gofer:transport=$transport;...;dsn=$original_dsn",
+ $user, $passwd, \%attributes);
+
+ ... use $dbh as if it was connected to $original_dsn ...
+
+
+The C<transport=$transport> part specifies the name of the module to use to
+transport the requests to the remote DBI. If $transport doesn't contain any
+double colons then it's prefixed with C<DBD::Gofer::Transport::>.
+
+The C<dsn=$original_dsn> part I<must be the last element> of the DSN because
+everything after C<dsn=> is assumed to be the DSN that the remote DBI should
+use.
+
+The C<...> represents attributes that influence the operation of the Gofer
+driver or transport. These are described below or in the documentation of the
+transport module being used.
+
+=encoding ISO8859-1
+
+=head1 DESCRIPTION
+
+DBD::Gofer is a DBI database driver that forwards requests to another DBI
+driver, usually in a separate process, often on a separate machine. It tries to
+be as transparent as possible so it appears that you are using the remote
+driver directly.
+
+DBD::Gofer is very similar to DBD::Proxy. The major difference is that with
+DBD::Gofer no state is maintained on the remote end. That means every
+request contains all the information needed to create the required state. (So,
+for example, every request includes the DSN to connect to.) Each request can be
+sent to any available server. The server executes the request and returns a
+single response that includes all the data.
+
+This is very similar to the way http works as a stateless protocol for the web.
+Each request from your web browser can be handled by a different web server process.
+
+=head2 Use Cases
+
+This may seem like pointless overhead but there are situations where this is a
+very good thing. Let's consider a specific case.
+
+Imagine using DBD::Gofer with an http transport. Your application calls
+connect(), prepare("select * from table where foo=?"), bind_param(), and execute().
+At this point DBD::Gofer builds a request containing all the information
+about the method calls. It then uses the httpd transport to send that request
+to an apache web server.
+
+This 'dbi execute' web server executes the request (using DBI::Gofer::Execute
+and related modules) and builds a response that contains all the rows of data,
+if the statement returned any, along with all the attributes that describe the
+results, such as $sth->{NAME}. This response is sent back to DBD::Gofer which
+unpacks it and presents it to the application as if it had executed the
+statement itself.
+
+=head2 Advantages
+
+Okay, but you still don't see the point? Well let's consider what we've gained:
+
+=head3 Connection Pooling and Throttling
+
+The 'dbi execute' web server leverages all the functionality of web
+infrastructure in terms of load balancing, high-availability, firewalls, access
+management, proxying, caching.
+
+At its most basic level you get a configurable pool of persistent database connections.
+
+=head3 Simple Scaling
+
+Got thousands of processes all trying to connect to the database? You can use
+DBD::Gofer to connect them to your smaller pool of 'dbi execute' web servers instead.
+
+=head3 Caching
+
+Client-side caching is as simple as adding "C<cache=1>" to the DSN.
+This feature alone can be worth using DBD::Gofer for.
+
+=head3 Fewer Network Round-trips
+
+DBD::Gofer sends as few requests as possible (dependent on the policy being used).
+
+=head3 Thin Clients / Unsupported Platforms
+
+You no longer need drivers for your database on every system. DBD::Gofer is pure perl.
+
+=head1 CONSTRAINTS
+
+There are some natural constraints imposed by the DBD::Gofer 'stateless' approach.
+But not many:
+
+=head2 You can't change database handle attributes after connect()
+
+You can't change database handle attributes after you've connected.
+Use the connect() call to specify all the attribute settings you want.
+
+This is because it's critical that when a request is complete the database
+handle is left in the same state it was when first connected.
+
+An exception is made for attributes with names starting "C<private_>":
+They can be set after connect() but the change is only applied locally.
+
+=head2 You can't change statement handle attributes after prepare()
+
+You can't change statement handle attributes after prepare.
+
+An exception is made for attributes with names starting "C<private_>":
+They can be set after prepare() but the change is only applied locally.
+
+=head2 You can't use transactions
+
+AutoCommit only. Transactions aren't supported.
+
+(In theory transactions could be supported when using a transport that
+maintains a connection, like C<stream> does. If you're interested in this
+please get in touch via dbi-dev@perl.org)
+
+=head2 You can't call driver-private sth methods
+
+But that's rarely needed anyway.
+
+=head1 GENERAL CAVEATS
+
+A few important things to keep in mind when using DBD::Gofer:
+
+=head2 Temporary tables, locks, and other per-connection persistent state
+
+You shouldn't expect any per-session state to persist between requests.
+This includes locks and temporary tables.
+
+Because the server-side may execute your requests via a different
+database connections, you can't rely on any per-connection persistent state,
+such as temporary tables, being available from one request to the next.
+
+This is an easy trap to fall into. A good way to check for this is to test your
+code with a Gofer policy package that sets the C<connect_method> policy to
+'connect' to force a new connection for each request. The C<pedantic> policy does this.
+
+=head2 Driver-private Database Handle Attributes
+
+Some driver-private dbh attributes may not be available if the driver has not
+implemented the private_attribute_info() method (added in DBI 1.54).
+
+=head2 Driver-private Statement Handle Attributes
+
+Driver-private sth attributes can be set in the prepare() call. TODO
+
+Some driver-private sth attributes may not be available if the driver has not
+implemented the private_attribute_info() method (added in DBI 1.54).
+
+=head2 Multiple Resultsets
+
+Multiple resultsets are supported only if the driver supports the more_results() method
+(an exception is made for DBD::Sybase).
+
+=head2 Statement activity that also updates dbh attributes
+
+Some drivers may update one or more dbh attributes after performing activity on
+a child sth. For example, DBD::mysql provides $dbh->{mysql_insertid} in addition to
+$sth->{mysql_insertid}. Currently mysql_insertid is supported via a hack but a
+more general mechanism is needed for other drivers to use.
+
+=head2 Methods that report an error always return undef
+
+With DBD::Gofer, a method that sets an error always return an undef or empty list.
+That shouldn't be a problem in practice because the DBI doesn't define any
+methods that return meaningful values while also reporting an error.
+
+=head2 Subclassing only applies to client-side
+
+The RootClass and DbTypeSubclass attributes are not passed to the Gofer server.
+
+=head1 CAVEATS FOR SPECIFIC METHODS
+
+=head2 last_insert_id
+
+To enable use of last_insert_id you need to indicate to DBD::Gofer that you'd
+like to use it. You do that my adding a C<go_last_insert_id_args> attribute to
+the do() or prepare() method calls. For example:
+
+ $dbh->do($sql, { go_last_insert_id_args => [...] });
+
+or
+
+ $sth = $dbh->prepare($sql, { go_last_insert_id_args => [...] });
+
+The array reference should contains the args that you want passed to the
+last_insert_id() method.
+
+=head2 execute_for_fetch
+
+The array methods bind_param_array() and execute_array() are supported.
+When execute_array() is called the data is serialized and executed in a single
+round-trip to the Gofer server. This makes it very fast, but requires enough
+memory to store all the serialized data.
+
+The execute_for_fetch() method currently isn't optimised, it uses the DBI
+fallback behaviour of executing each tuple individually.
+(It could be implemented as a wrapper for execute_array() - patches welcome.)
+
+=head1 TRANSPORTS
+
+DBD::Gofer doesn't concern itself with transporting requests and responses to and fro.
+For that it uses special Gofer transport modules.
+
+Gofer transport modules usually come in pairs: one for the 'client' DBD::Gofer
+driver to use and one for the remote 'server' end. They have very similar names:
+
+ DBD::Gofer::Transport::<foo>
+ DBI::Gofer::Transport::<foo>
+
+Sometimes the transports on the DBD and DBI sides may have different names. For
+example DBD::Gofer::Transport::http is typically used with DBI::Gofer::Transport::mod_perl
+(DBD::Gofer::Transport::http and DBI::Gofer::Transport::mod_perl modules are
+part of the GoferTransport-http distribution).
+
+=head2 Bundled Transports
+
+Several transport modules are provided with DBD::Gofer:
+
+=head3 null
+
+The null transport is the simplest of them all. It doesn't actually transport the request anywhere.
+It just serializes (freezes) the request into a string, then thaws it back into
+a data structure before passing it to DBI::Gofer::Execute to execute. The same
+freeze and thaw is applied to the results.
+
+The null transport is the best way to test if your application will work with Gofer.
+Just set the DBI_AUTOPROXY environment variable to "C<dbi:Gofer:transport=null;policy=pedantic>"
+(see L</Using DBI_AUTOPROXY> below) and run your application, or ideally its test suite, as usual.
+
+It doesn't take any parameters.
+
+=head3 pipeone
+
+The pipeone transport launches a subprocess for each request. It passes in the
+request and reads the response.
+
+The fact that a new subprocess is started for each request ensures that the
+server side is truly stateless. While this does make the transport I<very> slow,
+it is useful as a way to test that your application doesn't depend on
+per-connection state, such as temporary tables, persisting between requests.
+
+It's also useful both as a proof of concept and as a base class for the stream
+driver.
+
+=head3 stream
+
+The stream driver also launches a subprocess and writes requests and reads
+responses, like the pipeone transport. In this case, however, the subprocess
+is expected to handle more that one request. (Though it will be automatically
+restarted if it exits.)
+
+This is the first transport that is truly useful because it can launch the
+subprocess on a remote machine using C<ssh>. This means you can now use DBD::Gofer
+to easily access any databases that's accessible from any system you can login to.
+You also get all the benefits of ssh, including encryption and optional compression.
+
+See L</Using DBI_AUTOPROXY> below for an example.
+
+=head2 Other Transports
+
+Implementing a Gofer transport is I<very> simple, and more transports are very welcome.
+Just take a look at any existing transports that are similar to your needs.
+
+=head3 http
+
+See the GoferTransport-http distribution on CPAN: http://search.cpan.org/dist/GoferTransport-http/
+
+=head3 Gearman
+
+I know Ask Bjørn Hansen has implemented a transport for the C<gearman> distributed
+job system, though it's not on CPAN at the time of writing this.
+
+=head1 CONNECTING
+
+Simply prefix your existing DSN with "C<dbi:Gofer:transport=$transport;dsn=>"
+where $transport is the name of the Gofer transport you want to use (see L</TRANSPORTS>).
+The C<transport> and C<dsn> attributes must be specified and the C<dsn> attributes must be last.
+
+Other attributes can be specified in the DSN to configure DBD::Gofer and/or the
+Gofer transport module being used. The main attributes after C<transport>, are
+C<url> and C<policy>. These and other attributes are described below.
+
+=head2 Using DBI_AUTOPROXY
+
+The simplest way to try out DBD::Gofer is to set the DBI_AUTOPROXY environment variable.
+In this case you don't include the C<dsn=> part. For example:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=null"
+
+or, for a more useful example, try:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=stream;url=ssh:user@example.com"
+
+=head2 Connection Attributes
+
+These attributes can be specified in the DSN. They can also be passed in the
+\%attr parameter of the DBI connect method by adding a "C<go_>" prefix to the name.
+
+=head3 transport
+
+Specifies the Gofer transport class to use. Required. See L</TRANSPORTS> above.
+
+If the value does not include C<::> then "C<DBD::Gofer::Transport::>" is prefixed.
+
+The transport object can be accessed via $h->{go_transport}.
+
+=head3 dsn
+
+Specifies the DSN for the remote side to connect to. Required, and must be last.
+
+=head3 url
+
+Used to tell the transport where to connect to. The exact form of the value depends on the transport used.
+
+=head3 policy
+
+Specifies the policy to use. See L</CONFIGURING BEHAVIOUR POLICY>.
+
+If the value does not include C<::> then "C<DBD::Gofer::Policy>" is prefixed.
+
+The policy object can be accessed via $h->{go_policy}.
+
+=head3 timeout
+
+Specifies a timeout, in seconds, to use when waiting for responses from the server side.
+
+=head3 retry_limit
+
+Specifies the number of times a failed request will be retried. Default is 0.
+
+=head3 retry_hook
+
+Specifies a code reference to be called to decide if a failed request should be retried.
+The code reference is called like this:
+
+ $transport = $h->{go_transport};
+ $retry = $transport->go_retry_hook->($request, $response, $transport);
+
+If it returns true then the request will be retried, upto the C<retry_limit>.
+If it returns a false but defined value then the request will not be retried.
+If it returns undef then the default behaviour will be used, as if C<retry_hook>
+had not been specified.
+
+The default behaviour is to retry requests where $request->is_idempotent is true,
+or the error message matches C</induced by DBI_GOFER_RANDOM/>.
+
+=head3 cache
+
+Specifies that client-side caching should be performed. The value is the name
+of a cache class to use.
+
+Any class implementing get($key) and set($key, $value) methods can be used.
+That includes a great many powerful caching classes on CPAN, including the
+Cache and Cache::Cache distributions.
+
+You can use "C<cache=1>" is a shortcut for "C<cache=DBI::Util::CacheMemory>".
+See L<DBI::Util::CacheMemory> for a description of this simple fast default cache.
+
+The cache object can be accessed via $h->go_cache. For example:
+
+ $dbh->go_cache->clear; # free up memory being used by the cache
+
+The cache keys are the frozen (serialized) requests, and the values are the
+frozen responses.
+
+The default behaviour is to only use the cache for requests where
+$request->is_idempotent is true (i.e., the dbh has the ReadOnly attribute set
+or the SQL statement is obviously a SELECT without a FOR UPDATE clause.)
+
+For even more control you can use the C<go_cache> attribute to pass in an
+instantiated cache object. Individual methods, including prepare(), can also
+specify alternative caches via the C<go_cache> attribute. For example, to
+specify no caching for a particular query, you could use
+
+ $sth = $dbh->prepare( $sql, { go_cache => 0 } );
+
+This can be used to implement different caching policies for different statements.
+
+It's interesting to note that DBD::Gofer can be used to add client-side caching
+to any (gofer compatible) application, with no code changes and no need for a
+gofer server. Just set the DBI_AUTOPROXY environment variable like this:
+
+ DBI_AUTOPROXY='dbi:Gofer:transport=null;cache=1'
+
+=head1 CONFIGURING BEHAVIOUR POLICY
+
+DBD::Gofer supports a 'policy' mechanism that allows you to fine-tune the number of round-trips to the Gofer server.
+The policies are grouped into classes (which may be subclassed) and referenced by the name of the class.
+
+The L<DBD::Gofer::Policy::Base> class is the base class for all the policy
+packages and describes all the available policies.
+
+Three policy packages are supplied with DBD::Gofer:
+
+L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it
+makes more round-trips to the Gofer server.
+
+L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy.
+
+L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications.
+
+Generally the default C<classic> policy is fine. When first testing an existing
+application with Gofer it is a good idea to start with the C<pedantic> policy
+first and then switch to C<classic> or a custom policy, for final testing.
+
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 ACKNOWLEDGEMENTS
+
+The development of DBD::Gofer and related modules was sponsored by
+Shopzilla.com (L<http://Shopzilla.com>), where I currently work.
+
+=head1 SEE ALSO
+
+L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>.
+
+L<DBI::Gofer::Transport::Base>, L<DBD::Gofer::Policy::Base>.
+
+L<DBI>
+
+=head1 Caveats for specific drivers
+
+This section aims to record issues to be aware of when using Gofer with specific drivers.
+It usually only documents issues that are not natural consequences of the limitations
+of the Gofer approach - as documented above.
+
+=head1 TODO
+
+This is just a random brain dump... (There's more in the source of the Changes file, not the pod)
+
+Document policy mechanism
+
+Add mechanism for transports to list config params and for Gofer to apply any that match (and warn if any left over?)
+
+Driver-private sth attributes - set via prepare() - change DBI spec
+
+add hooks into transport base class for checking & updating a result set cache
+ ie via a standard cache interface such as:
+ http://search.cpan.org/~robm/Cache-FastMmap/FastMmap.pm
+ http://search.cpan.org/~bradfitz/Cache-Memcached/lib/Cache/Memcached.pm
+ http://search.cpan.org/~dclinton/Cache-Cache/
+ http://search.cpan.org/~cleishman/Cache/
+Also caching instructions could be passed through the httpd transport layer
+in such a way that appropriate http cache headers are added to the results
+so that web caches (squid etc) could be used to implement the caching.
+(MUST require the use of GET rather than POST requests.)
+
+Rework handling of installed_methods to not piggyback on dbh_attributes?
+
+Perhaps support transactions for transports where it's possible (ie null and stream)?
+Would make stream transport (ie ssh) more useful to more people.
+
+Make sth_result_attr more like dbh_attributes (using '*' etc)
+
+Add @val = FETCH_many(@names) to DBI in C and use in Gofer/Execute?
+
+Implement _new_sth in C.
+
+=cut
diff --git a/lib/DBD/Gofer/Policy/Base.pm b/lib/DBD/Gofer/Policy/Base.pm
new file mode 100644
index 0000000..1725b03
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/Base.pm
@@ -0,0 +1,162 @@
+package DBD::Gofer::Policy::Base;
+
+# $Id: Base.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+use Carp;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+our $AUTOLOAD;
+
+my %policy_defaults = (
+ # force connect method (unless overridden by go_connect_method=>'...' attribute)
+ # if false: call same method on client as on server
+ connect_method => 'connect',
+ # force prepare method (unless overridden by go_prepare_method=>'...' attribute)
+ # if false: call same method on client as on server
+ prepare_method => 'prepare',
+ skip_connect_check => 0,
+ skip_default_methods => 0,
+ skip_prepare_check => 0,
+ skip_ping => 0,
+ dbh_attribute_update => 'every',
+ dbh_attribute_list => ['*'],
+ locally_quote => 0,
+ locally_quote_identifier => 0,
+ cache_parse_trace_flags => 1,
+ cache_parse_trace_flag => 1,
+ cache_data_sources => 1,
+ cache_type_info_all => 1,
+ cache_tables => 0,
+ cache_table_info => 0,
+ cache_column_info => 0,
+ cache_primary_key_info => 0,
+ cache_foreign_key_info => 0,
+ cache_statistics_info => 0,
+ cache_get_info => 0,
+ cache_func => 0,
+);
+
+my $base_policy_file = $INC{"DBD/Gofer/Policy/Base.pm"};
+
+__PACKAGE__->create_policy_subs(\%policy_defaults);
+
+sub create_policy_subs {
+ my ($class, $policy_defaults) = @_;
+
+ while ( my ($policy_name, $policy_default) = each %$policy_defaults) {
+ my $policy_attr_name = "go_$policy_name";
+ my $sub = sub {
+ # $policy->foo($attr, ...)
+ #carp "$policy_name($_[1],...)";
+ # return the policy default value unless an attribute overrides it
+ return (ref $_[1] && exists $_[1]->{$policy_attr_name})
+ ? $_[1]->{$policy_attr_name}
+ : $policy_default;
+ };
+ no strict 'refs';
+ *{$class . '::' . $policy_name} = $sub;
+ }
+}
+
+sub AUTOLOAD {
+ carp "Unknown policy name $AUTOLOAD used";
+ # only warn once
+ no strict 'refs';
+ *$AUTOLOAD = sub { undef };
+ return undef;
+}
+
+sub new {
+ my ($class, $args) = @_;
+ my $policy = {};
+ bless $policy, $class;
+}
+
+sub DESTROY { };
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::Base - Base class for DBD::Gofer policies
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=...", ...)
+
+=head1 DESCRIPTION
+
+DBD::Gofer can be configured via a 'policy' mechanism that allows you to
+fine-tune the number of round-trips to the Gofer server. The policies are
+grouped into classes (which may be subclassed) and referenced by the name of
+the class.
+
+The L<DBD::Gofer::Policy::Base> class is the base class for all the policy
+classes and describes all the individual policy items.
+
+The Base policy is not used directly. You should use a policy class derived from it.
+
+=head1 POLICY CLASSES
+
+Three policy classes are supplied with DBD::Gofer:
+
+L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it
+makes more round-trips to the Gofer server.
+
+L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy.
+
+L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications.
+
+Generally the default C<classic> policy is fine. When first testing an existing
+application with Gofer it is a good idea to start with the C<pedantic> policy
+first and then switch to C<classic> or a custom policy, for final testing.
+
+=head1 POLICY ITEMS
+
+These are temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+See the source code to this module for more details.
+
+=head1 POLICY CUSTOMIZATION
+
+XXX This area of DBD::Gofer is subject to change.
+
+There are three ways to customize policies:
+
+Policy classes are designed to influence the overall behaviour of DBD::Gofer
+with existing, unaltered programs, so they work in a reasonably optimal way
+without requiring code changes. You can implement new policy classes as
+subclasses of existing policies.
+
+In many cases individual policy items can be overridden on a case-by-case basis
+within your application code. You do this by passing a corresponding
+C<<go_<policy_name>>> attribute into DBI methods by your application code.
+This let's you fine-tune the behaviour for special cases.
+
+The policy items are implemented as methods. In many cases the methods are
+passed parameters relating to the DBD::Gofer code being executed. This means
+the policy can implement dynamic behaviour that varies depending on the
+particular circumstances, such as the particular statement being executed.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Policy/classic.pm b/lib/DBD/Gofer/Policy/classic.pm
new file mode 100644
index 0000000..8f828f0
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/classic.pm
@@ -0,0 +1,79 @@
+package DBD::Gofer::Policy::classic;
+
+# $Id: classic.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+use base qw(DBD::Gofer::Policy::Base);
+
+__PACKAGE__->create_policy_subs({
+
+ # always use connect_cached on server
+ connect_method => 'connect_cached',
+
+ # use same methods on server as is called on client
+ prepare_method => '',
+
+ # don't skip the connect check since that also sets dbh attributes
+ # although this makes connect more expensive, that's partly offset
+ # by skip_ping=>1 below, which makes connect_cached very fast.
+ skip_connect_check => 0,
+
+ # most code doesn't rely on sth attributes being set after prepare
+ skip_prepare_check => 1,
+
+ # we're happy to use local method if that's the same as the remote
+ skip_default_methods => 1,
+
+ # ping is not important for DBD::Gofer and most transports
+ skip_ping => 1,
+
+ # only update dbh attributes on first contact with server
+ dbh_attribute_update => 'first',
+
+ # we'd like to set locally_* but can't because drivers differ
+
+ # get_info results usually don't change
+ cache_get_info => 1,
+});
+
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::classic - The 'classic' policy for DBD::Gofer
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=classic", ...)
+
+The C<classic> policy is the default DBD::Gofer policy, so need not be included in the DSN.
+
+=head1 DESCRIPTION
+
+Temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Policy/pedantic.pm b/lib/DBD/Gofer/Policy/pedantic.pm
new file mode 100644
index 0000000..6829bea
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/pedantic.pm
@@ -0,0 +1,53 @@
+package DBD::Gofer::Policy::pedantic;
+
+# $Id: pedantic.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+use base qw(DBD::Gofer::Policy::Base);
+
+# the 'pedantic' policy is the same as the Base policy
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::pedantic - The 'pedantic' policy for DBD::Gofer
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=pedantic", ...)
+
+=head1 DESCRIPTION
+
+The C<pedantic> policy tries to be as transparent as possible. To do this it
+makes round-trips to the server for almost every DBI method call.
+
+This is the best policy to use when first testing existing code with Gofer.
+Once it's working well you should consider moving to the C<classic> policy or defining your own policy class.
+
+Temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Policy/rush.pm b/lib/DBD/Gofer/Policy/rush.pm
new file mode 100644
index 0000000..9cfd582
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/rush.pm
@@ -0,0 +1,90 @@
+package DBD::Gofer::Policy::rush;
+
+# $Id: rush.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+use base qw(DBD::Gofer::Policy::Base);
+
+__PACKAGE__->create_policy_subs({
+
+ # always use connect_cached on server
+ connect_method => 'connect_cached',
+
+ # use same methods on server as is called on client
+ # (because code not using placeholders would bloat the sth cache)
+ prepare_method => '',
+
+ # Skipping the connect check is fast, but it also skips
+ # fetching the remote dbh attributes!
+ # Make sure that your application doesn't need access to dbh attributes.
+ skip_connect_check => 1,
+
+ # most code doesn't rely on sth attributes being set after prepare
+ skip_prepare_check => 1,
+
+ # we're happy to use local method if that's the same as the remote
+ skip_default_methods => 1,
+
+ # ping is almost meaningless for DBD::Gofer and most transports anyway
+ skip_ping => 1,
+
+ # don't update dbh attributes at all
+ # XXX actually we currently need dbh_attribute_update for skip_default_methods to work
+ # and skip_default_methods is more valuable to us than the cost of dbh_attribute_update
+ dbh_attribute_update => 'none', # actually means 'first' currently
+ #dbh_attribute_list => undef,
+
+ # we'd like to set locally_* but can't because drivers differ
+
+ # in a rush assume metadata doesn't change
+ cache_tables => 1,
+ cache_table_info => 1,
+ cache_column_info => 1,
+ cache_primary_key_info => 1,
+ cache_foreign_key_info => 1,
+ cache_statistics_info => 1,
+ cache_get_info => 1,
+});
+
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::rush - The 'rush' policy for DBD::Gofer
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=rush", ...)
+
+=head1 DESCRIPTION
+
+The C<rush> policy tries to make as few round-trips as possible.
+It's the opposite end of the policy spectrum to the C<pedantic> policy.
+
+Temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Transport/Base.pm b/lib/DBD/Gofer/Transport/Base.pm
new file mode 100644
index 0000000..fe0d078
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/Base.pm
@@ -0,0 +1,410 @@
+package DBD::Gofer::Transport::Base;
+
+# $Id: Base.pm 14120 2010-06-07 19:52:19Z hmbrand $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use base qw(DBI::Gofer::Transport::Base);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 14120 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ trace
+ go_dsn
+ go_url
+ go_policy
+ go_timeout
+ go_retry_hook
+ go_retry_limit
+ go_cache
+ cache_hit
+ cache_miss
+ cache_store
+));
+__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
+ meta
+));
+
+
+sub new {
+ my ($class, $args) = @_;
+ $args->{$_} = 0 for (qw(cache_hit cache_miss cache_store));
+ $args->{keep_meta_frozen} ||= 1 if $args->{go_cache};
+ #warn "args @{[ %$args ]}\n";
+ return $class->SUPER::new($args);
+}
+
+
+sub _init_trace { $ENV{DBD_GOFER_TRACE} || 0 }
+
+
+sub new_response {
+ my $self = shift;
+ return DBI::Gofer::Response->new(@_);
+}
+
+
+sub transmit_request {
+ my ($self, $request) = @_;
+ my $trace = $self->trace;
+ my $response;
+
+ my ($go_cache, $request_cache_key);
+ if ($go_cache = $self->{go_cache}) {
+ $request_cache_key
+ = $request->{meta}{request_cache_key}
+ = $self->get_cache_key_for_request($request);
+ if ($request_cache_key) {
+ my $frozen_response = eval { $go_cache->get($request_cache_key) };
+ if ($frozen_response) {
+ $self->_dump("cached response found for ".ref($request), $request)
+ if $trace;
+ $response = $self->thaw_response($frozen_response);
+ $self->trace_msg("transmit_request is returning a response from cache $go_cache\n")
+ if $trace;
+ ++$self->{cache_hit};
+ return $response;
+ }
+ warn $@ if $@;
+ ++$self->{cache_miss};
+ $self->trace_msg("transmit_request cache miss\n")
+ if $trace;
+ }
+ }
+
+ my $to = $self->go_timeout;
+ my $transmit_sub = sub {
+ $self->trace_msg("transmit_request\n") if $trace;
+ local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to;
+
+ my $response = eval {
+ local $SIG{PIPE} = sub {
+ my $extra = ($! eq "Broken pipe") ? "" : " ($!)";
+ die "Unable to send request: Broken pipe$extra\n";
+ };
+ alarm($to) if $to;
+ $self->transmit_request_by_transport($request);
+ };
+ alarm(0) if $to;
+
+ if ($@) {
+ return $self->transport_timedout("transmit_request", $to)
+ if $@ eq "TIMEOUT\n";
+ return $self->new_response({ err => 1, errstr => $@ });
+ }
+
+ return $response;
+ };
+
+ $response = $self->_transmit_request_with_retries($request, $transmit_sub);
+
+ if ($response) {
+ my $frozen_response = delete $response->{meta}{frozen};
+ $self->_store_response_in_cache($frozen_response, $request_cache_key)
+ if $request_cache_key;
+ }
+
+ $self->trace_msg("transmit_request is returning a response itself\n")
+ if $trace && $response;
+
+ return $response unless wantarray;
+ return ($response, $transmit_sub);
+}
+
+
+sub _transmit_request_with_retries {
+ my ($self, $request, $transmit_sub) = @_;
+ my $response;
+ do {
+ $response = $transmit_sub->();
+ } while ( $response && $self->response_needs_retransmit($request, $response) );
+ return $response;
+}
+
+
+sub receive_response {
+ my ($self, $request, $retransmit_sub) = @_;
+ my $to = $self->go_timeout;
+
+ my $receive_sub = sub {
+ $self->trace_msg("receive_response\n");
+ local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to;
+
+ my $response = eval {
+ alarm($to) if $to;
+ $self->receive_response_by_transport($request);
+ };
+ alarm(0) if $to;
+
+ if ($@) {
+ return $self->transport_timedout("receive_response", $to)
+ if $@ eq "TIMEOUT\n";
+ return $self->new_response({ err => 1, errstr => $@ });
+ }
+ return $response;
+ };
+
+ my $response;
+ do {
+ $response = $receive_sub->();
+ if ($self->response_needs_retransmit($request, $response)) {
+ $response = $self->_transmit_request_with_retries($request, $retransmit_sub);
+ $response ||= $receive_sub->();
+ }
+ } while ( $self->response_needs_retransmit($request, $response) );
+
+ if ($response) {
+ my $frozen_response = delete $response->{meta}{frozen};
+ my $request_cache_key = $request->{meta}{request_cache_key};
+ $self->_store_response_in_cache($frozen_response, $request_cache_key)
+ if $request_cache_key && $self->{go_cache};
+ }
+
+ return $response;
+}
+
+
+sub response_retry_preference {
+ my ($self, $request, $response) = @_;
+
+ # give the user a chance to express a preference (or undef for default)
+ if (my $go_retry_hook = $self->go_retry_hook) {
+ my $retry = $go_retry_hook->($request, $response, $self);
+ $self->trace_msg(sprintf "go_retry_hook returned %s\n",
+ (defined $retry) ? $retry : 'undef');
+ return $retry if defined $retry;
+ }
+
+ # This is the main decision point. We don't retry requests that got
+ # as far as executing because the error is probably from the database
+ # (not transport) so retrying is unlikely to help. But note that any
+ # severe transport error occuring after execute is likely to return
+ # a new response object that doesn't have the execute flag set. Beware!
+ return 0 if $response->executed_flag_set;
+
+ return 1 if ($response->errstr || '') =~ m/induced by DBI_GOFER_RANDOM/;
+
+ return 1 if $request->is_idempotent; # i.e. is SELECT or ReadOnly was set
+
+ return undef; # we couldn't make up our mind
+}
+
+
+sub response_needs_retransmit {
+ my ($self, $request, $response) = @_;
+
+ my $err = $response->err
+ or return 0; # nothing went wrong
+
+ my $retry = $self->response_retry_preference($request, $response);
+
+ if (!$retry) { # false or undef
+ $self->trace_msg("response_needs_retransmit: response not suitable for retry\n");
+ return 0;
+ }
+
+ # we'd like to retry but have we retried too much already?
+
+ my $retry_limit = $self->go_retry_limit;
+ if (!$retry_limit) {
+ $self->trace_msg("response_needs_retransmit: retries disabled (retry_limit not set)\n");
+ return 0;
+ }
+
+ my $request_meta = $request->meta;
+ my $retry_count = $request_meta->{retry_count} || 0;
+ if ($retry_count >= $retry_limit) {
+ $self->trace_msg("response_needs_retransmit: $retry_count is too many retries\n");
+ # XXX should be possible to disable altering the err
+ $response->errstr(sprintf "%s (after %d retries by gofer)", $response->errstr, $retry_count);
+ return 0;
+ }
+
+ # will retry now, do the admin
+ ++$retry_count;
+ $self->trace_msg("response_needs_retransmit: retry $retry_count\n");
+
+ # hook so response_retry_preference can defer some code execution
+ # until we've checked retry_count and retry_limit.
+ if (ref $retry eq 'CODE') {
+ $retry->($retry_count, $retry_limit)
+ and warn "should return false"; # protect future use
+ }
+
+ ++$request_meta->{retry_count}; # update count for this request object
+ ++$self->meta->{request_retry_count}; # update cumulative transport stats
+
+ return 1;
+}
+
+
+sub transport_timedout {
+ my ($self, $method, $timeout) = @_;
+ $timeout ||= $self->go_timeout;
+ return $self->new_response({ err => 1, errstr => "DBD::Gofer $method timed-out after $timeout seconds" });
+}
+
+
+# return undef if we don't want to cache this request
+# subclasses may use more specialized rules
+sub get_cache_key_for_request {
+ my ($self, $request) = @_;
+
+ # we only want to cache idempotent requests
+ # is_idempotent() is true if GOf_REQUEST_IDEMPOTENT or GOf_REQUEST_READONLY set
+ return undef if not $request->is_idempotent;
+
+ # XXX would be nice to avoid the extra freeze here
+ my $key = $self->freeze_request($request, undef, 1);
+
+ #use Digest::MD5; warn "get_cache_key_for_request: ".Digest::MD5::md5_base64($key)."\n";
+
+ return $key;
+}
+
+
+sub _store_response_in_cache {
+ my ($self, $frozen_response, $request_cache_key) = @_;
+ my $go_cache = $self->{go_cache}
+ or return;
+
+ # new() ensures that enabling go_cache also enables keep_meta_frozen
+ warn "No meta frozen in response" if !$frozen_response;
+ warn "No request_cache_key" if !$request_cache_key;
+
+ if ($frozen_response && $request_cache_key) {
+ $self->trace_msg("receive_response added response to cache $go_cache\n");
+ eval { $go_cache->set($request_cache_key, $frozen_response) };
+ warn $@ if $@;
+ ++$self->{cache_store};
+ }
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::Base - base class for DBD::Gofer client transports
+
+=head1 SYNOPSIS
+
+ my $remote_dsn = "..."
+ DBI->connect("dbi:Gofer:transport=...;url=...;timeout=...;retry_limit=...;dsn=$remote_dsn",...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY='dbi:Gofer:transport=...;url=...'
+
+which will force I<all> DBI connections to be made via that Gofer server.
+
+=head1 DESCRIPTION
+
+This is the base class for all DBD::Gofer client transports.
+
+=head1 ATTRIBUTES
+
+Gofer transport attributes can be specified either in the attributes parameter
+of the connect() method call, or in the DSN string. When used in the DSN
+string, attribute names don't have the C<go_> prefix.
+
+=head2 go_dsn
+
+The full DBI DSN that the Gofer server should connect to on your behalf.
+
+When used in the DSN it must be the last element in the DSN string.
+
+=head2 go_timeout
+
+A time limit for sending a request and receiving a response. Some drivers may
+implement sending and receiving as separate steps, in which case (currently)
+the timeout applies to each separately.
+
+If a request needs to be resent then the timeout is restarted for each sending
+of a request and receiving of a response.
+
+=head2 go_retry_limit
+
+The maximum number of times an request may be retried. The default is 2.
+
+=head2 go_retry_hook
+
+This subroutine reference is called, if defined, for each response received where $response->err is true.
+
+The subroutine is pass three parameters: the request object, the response object, and the transport object.
+
+If it returns an undefined value then the default retry behaviour is used. See L</RETRY ON ERROR> below.
+
+If it returns a defined but false value then the request is not resent.
+
+If it returns true value then the request is resent, so long as the number of retries does not exceed C<go_retry_limit>.
+
+=head1 RETRY ON ERROR
+
+The default retry on error behaviour is:
+
+ - Retry if the error was due to DBI_GOFER_RANDOM. See L<DBI::Gofer::Execute>.
+
+ - Retry if $request->is_idempotent returns true. See L<DBI::Gofer::Request>.
+
+A retry won't be allowed if the number of previous retries has reached C<go_retry_limit>.
+
+=head1 TRACING
+
+Tracing of gofer requests and responses can be enabled by setting the
+C<DBD_GOFER_TRACE> environment variable. A value of 1 gives a reasonably
+compact summary of each request and response. A value of 2 or more gives a
+detailed, and voluminous, dump.
+
+The trace is written using DBI->trace_msg() and so is written to the default
+DBI trace output, which is usually STDERR.
+
+=head1 METHODS
+
+I<This section is currently far from complete.>
+
+=head2 response_retry_preference
+
+ $retry = $transport->response_retry_preference($request, $response);
+
+The response_retry_preference is called by DBD::Gofer when considering if a
+request should be retried after an error.
+
+Returns true (would like to retry), false (must not retry), undef (no preference).
+
+If a true value is returned in the form of a CODE ref then, if DBD::Gofer does
+decide to retry the request, it calls the code ref passing $retry_count, $retry_limit.
+Can be used for logging and/or to implement exponential backoff behaviour.
+Currently the called code must return using C<return;> to allow for future extensions.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007-2008, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer>, L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>.
+
+and some example transports:
+
+L<DBD::Gofer::Transport::stream>
+
+L<DBD::Gofer::Transport::http>
+
+L<DBI::Gofer::Transport::mod_perl>
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/corostream.pm b/lib/DBD/Gofer/Transport/corostream.pm
new file mode 100644
index 0000000..6e79278
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/corostream.pm
@@ -0,0 +1,144 @@
+package DBD::Gofer::Transport::corostream;
+
+use strict;
+use warnings;
+
+use Carp;
+
+use Coro::Select; # a slow but coro-aware replacement for CORE::select (global effect!)
+
+use Coro;
+use Coro::Handle;
+
+use base qw(DBD::Gofer::Transport::stream);
+
+# XXX ensure DBI_PUREPERL for parent doesn't pass to child
+sub start_pipe_command {
+ local $ENV{DBI_PUREPERL} = $ENV{DBI_PUREPERL_COROCHILD}; # typically undef
+ my $connection = shift->SUPER::start_pipe_command(@_);
+ return $connection;
+}
+
+
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::corostream - Async DBD::Gofer stream transport using Coro and AnyEvent
+
+=head1 SYNOPSIS
+
+ DBI_AUTOPROXY="dbi:Gofer:transport=corostream" perl some-perl-script-using-dbi.pl
+
+or
+
+ $dsn = ...; # the DSN for the driver and database you want to use
+ $dbh = DBI->connect("dbi:Gofer:transport=corostream;dsn=$dsn", ...);
+
+=head1 DESCRIPTION
+
+The I<BIG WIN> from using L<Coro> is that it enables the use of existing
+DBI frameworks like L<DBIx::Class>.
+
+=head1 KNOWN ISSUES AND LIMITATIONS
+
+ - Uses Coro::Select so alters CORE::select globally
+ Parent class probably needs refactoring to enable a more encapsulated approach.
+
+ - Doesn't prevent multiple concurrent requests
+ Probably just needs a per-connection semaphore
+
+ - Coro has many caveats. Caveat emptor.
+
+=head1 STATUS
+
+THIS IS CURRENTLY JUST A PROOF-OF-CONCEPT IMPLEMENTATION FOR EXPERIMENTATION.
+
+Please note that I have no plans to develop this code further myself.
+I'd very much welcome contributions. Interested? Let me know!
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2010, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::stream>
+
+L<DBD::Gofer>
+
+=head1 APPENDIX
+
+Example code:
+
+ #!perl
+
+ use strict;
+ use warnings;
+ use Time::HiRes qw(time);
+
+ BEGIN { $ENV{PERL_ANYEVENT_STRICT} = 1; $ENV{PERL_ANYEVENT_VERBOSE} = 1; }
+
+ use AnyEvent;
+
+ BEGIN { $ENV{DBI_TRACE} = 0; $ENV{DBI_GOFER_TRACE} = 0; $ENV{DBD_GOFER_TRACE} = 0; };
+
+ use DBI;
+
+ $ENV{DBI_AUTOPROXY} = 'dbi:Gofer:transport=corostream';
+
+ my $ticker = AnyEvent->timer( after => 0, interval => 0.1, cb => sub {
+ warn sprintf "-tick- %.2f\n", time
+ } );
+
+ warn "connecting...\n";
+ my $dbh = DBI->connect("dbi:NullP:");
+ warn "...connected\n";
+
+ for (1..3) {
+ warn "entering DBI...\n";
+ $dbh->do("sleep 0.3"); # pseudo-sql understood by the DBD::NullP driver
+ warn "...returned\n";
+ }
+
+ warn "done.";
+
+Example output:
+
+ $ perl corogofer.pl
+ connecting...
+ -tick- 1293631437.14
+ -tick- 1293631437.14
+ ...connected
+ entering DBI...
+ -tick- 1293631437.25
+ -tick- 1293631437.35
+ -tick- 1293631437.45
+ -tick- 1293631437.55
+ ...returned
+ entering DBI...
+ -tick- 1293631437.66
+ -tick- 1293631437.76
+ -tick- 1293631437.86
+ ...returned
+ entering DBI...
+ -tick- 1293631437.96
+ -tick- 1293631438.06
+ -tick- 1293631438.16
+ ...returned
+ done. at corogofer.pl line 39.
+
+You can see that the timer callback is firing while the code 'waits' inside the
+do() method for the response from the database. Normally that would block.
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/null.pm b/lib/DBD/Gofer/Transport/null.pm
new file mode 100644
index 0000000..4b8d86c
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/null.pm
@@ -0,0 +1,111 @@
+package DBD::Gofer::Transport::null;
+
+# $Id: null.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use base qw(DBD::Gofer::Transport::Base);
+
+use DBI::Gofer::Execute;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ pending_response
+ transmit_count
+));
+
+my $executor = DBI::Gofer::Execute->new();
+
+
+sub transmit_request_by_transport {
+ my ($self, $request) = @_;
+ $self->transmit_count( ($self->transmit_count()||0) + 1 ); # just for tests
+
+ my $frozen_request = $self->freeze_request($request);
+
+ # ...
+ # the request is magically transported over to ... ourselves
+ # ...
+
+ my $response = $executor->execute_request( $self->thaw_request($frozen_request, undef, 1) );
+
+ # put response 'on the shelf' ready for receive_response()
+ $self->pending_response( $response );
+
+ return undef;
+}
+
+
+sub receive_response_by_transport {
+ my $self = shift;
+
+ my $response = $self->pending_response;
+
+ my $frozen_response = $self->freeze_response($response, undef, 1);
+
+ # ...
+ # the response is magically transported back to ... ourselves
+ # ...
+
+ return $self->thaw_response($frozen_response);
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::null - DBD::Gofer client transport for testing
+
+=head1 SYNOPSIS
+
+ my $original_dsn = "..."
+ DBI->connect("dbi:Gofer:transport=null;dsn=$original_dsn",...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=null"
+
+=head1 DESCRIPTION
+
+Connect via DBD::Gofer but execute the requests within the same process.
+
+This is a quick and simple way to test applications for compatibility with the
+(few) restrictions that DBD::Gofer imposes.
+
+It also provides a simple, portable way for the DBI test suite to be used to
+test DBD::Gofer on all platforms with no setup.
+
+Also, by measuring the difference in performance between normal connections and
+connections via C<dbi:Gofer:transport=null> the basic cost of using DBD::Gofer
+can be measured. Furthermore, the additional cost of more advanced transports can be
+isolated by comparing their performance with the null transport.
+
+The C<t/85gofer.t> script in the DBI distribution includes a comparative benchmark.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::Base>
+
+L<DBD::Gofer>
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/pipeone.pm b/lib/DBD/Gofer/Transport/pipeone.pm
new file mode 100644
index 0000000..3df2bf3
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/pipeone.pm
@@ -0,0 +1,253 @@
+package DBD::Gofer::Transport::pipeone;
+
+# $Id: pipeone.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use Carp;
+use Fcntl;
+use IO::Select;
+use IPC::Open3 qw(open3);
+use Symbol qw(gensym);
+
+use base qw(DBD::Gofer::Transport::Base);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ connection_info
+ go_perl
+));
+
+
+sub new {
+ my ($self, $args) = @_;
+ $args->{go_perl} ||= do {
+ ($INC{"blib.pm"}) ? [ $^X, '-Mblib' ] : [ $^X ];
+ };
+ if (not ref $args->{go_perl}) {
+ # user can override the perl to be used, either with an array ref
+ # containing the command name and args to use, or with a string
+ # (ie via the DSN) in which case, to enable args to be passed,
+ # we split on two or more consecutive spaces (otherwise the path
+ # to perl couldn't contain a space itself).
+ $args->{go_perl} = [ split /\s{2,}/, $args->{go_perl} ];
+ }
+ return $self->SUPER::new($args);
+}
+
+
+# nonblock($fh) puts filehandle into nonblocking mode
+sub nonblock {
+ my $fh = shift;
+ my $flags = fcntl($fh, F_GETFL, 0)
+ or croak "Can't get flags for filehandle $fh: $!";
+ fcntl($fh, F_SETFL, $flags | O_NONBLOCK)
+ or croak "Can't make filehandle $fh nonblocking: $!";
+}
+
+
+sub start_pipe_command {
+ my ($self, $cmd) = @_;
+ $cmd = [ $cmd ] unless ref $cmd eq 'ARRAY';
+
+ # if it's important that the subprocess uses the same
+ # (versions of) modules as us then the caller should
+ # set PERL5LIB itself.
+
+ # limit various forms of insanity, for now
+ local $ENV{DBI_TRACE}; # use DBI_GOFER_TRACE instead
+ local $ENV{DBI_AUTOPROXY};
+ local $ENV{DBI_PROFILE};
+
+ my ($wfh, $rfh, $efh) = (gensym, gensym, gensym);
+ my $pid = open3($wfh, $rfh, $efh, @$cmd)
+ or die "error starting @$cmd: $!\n";
+ if ($self->trace) {
+ $self->trace_msg(sprintf("Started pid $pid: @$cmd {fd: w%d r%d e%d, ppid=$$}\n", fileno $wfh, fileno $rfh, fileno $efh),0);
+ }
+ nonblock($rfh);
+ nonblock($efh);
+ my $ios = IO::Select->new($rfh, $efh);
+
+ return {
+ cmd=>$cmd,
+ pid=>$pid,
+ wfh=>$wfh, rfh=>$rfh, efh=>$efh,
+ ios=>$ios,
+ };
+}
+
+
+sub cmd_as_string {
+ my $self = shift;
+ # XXX meant to return a properly shell-escaped string suitable for system
+ # but its only for debugging so that can wait
+ my $connection_info = $self->connection_info;
+ return join " ", map { (m/^[-:\w]*$/) ? $_ : "'$_'" } @{$connection_info->{cmd}};
+}
+
+
+sub transmit_request_by_transport {
+ my ($self, $request) = @_;
+
+ my $frozen_request = $self->freeze_request($request);
+
+ my $cmd = [ @{$self->go_perl}, qw(-MDBI::Gofer::Transport::pipeone -e run_one_stdio)];
+ my $info = $self->start_pipe_command($cmd);
+
+ my $wfh = delete $info->{wfh};
+ # send frozen request
+ local $\;
+ print $wfh $frozen_request
+ or warn "error writing to @$cmd: $!\n";
+ # indicate that there's no more
+ close $wfh
+ or die "error closing pipe to @$cmd: $!\n";
+
+ $self->connection_info( $info );
+ return;
+}
+
+
+sub read_response_from_fh {
+ my ($self, $fh_actions) = @_;
+ my $trace = $self->trace;
+
+ my $info = $self->connection_info || die;
+ my ($ios) = @{$info}{qw(ios)};
+ my $errors = 0;
+ my $complete;
+
+ die "No handles to read response from" unless $ios->count;
+
+ while ($ios->count) {
+ my @readable = $ios->can_read();
+ for my $fh (@readable) {
+ local $_;
+ my $actions = $fh_actions->{$fh} || die "panic: no action for $fh";
+ my $rv = sysread($fh, $_='', 1024*31); # to fit in 32KB slab
+ unless ($rv) { # error (undef) or end of file (0)
+ my $action;
+ unless (defined $rv) { # was an error
+ $self->trace_msg("error on handle $fh: $!\n") if $trace >= 4;
+ $action = $actions->{error} || $actions->{eof};
+ ++$errors;
+ # XXX an error may be a permenent condition of the handle
+ # if so we'll loop here - not good
+ }
+ else {
+ $action = $actions->{eof};
+ $self->trace_msg("eof on handle $fh\n") if $trace >= 4;
+ }
+ if ($action->($fh)) {
+ $self->trace_msg("removing $fh from handle set\n") if $trace >= 4;
+ $ios->remove($fh);
+ }
+ next;
+ }
+ # action returns true if the response is now complete
+ # (we finish all handles
+ $actions->{read}->($fh) && ++$complete;
+ }
+ last if $complete;
+ }
+ return $errors;
+}
+
+
+sub receive_response_by_transport {
+ my $self = shift;
+
+ my $info = $self->connection_info || die;
+ my ($pid, $rfh, $efh, $ios, $cmd) = @{$info}{qw(pid rfh efh ios cmd)};
+
+ my $frozen_response;
+ my $stderr_msg;
+
+ $self->read_response_from_fh( {
+ $efh => {
+ error => sub { warn "error reading response stderr: $!"; 1 },
+ eof => sub { warn "eof on stderr" if 0; 1 },
+ read => sub { $stderr_msg .= $_; 0 },
+ },
+ $rfh => {
+ error => sub { warn "error reading response: $!"; 1 },
+ eof => sub { warn "eof on stdout" if 0; 1 },
+ read => sub { $frozen_response .= $_; 0 },
+ },
+ });
+
+ waitpid $info->{pid}, 0
+ or warn "waitpid: $!"; # XXX do something more useful?
+
+ die ref($self)." command (@$cmd) failed: $stderr_msg"
+ if not $frozen_response; # no output on stdout at all
+
+ # XXX need to be able to detect and deal with corruption
+ my $response = $self->thaw_response($frozen_response);
+
+ if ($stderr_msg) {
+ # add stderr messages as warnings (for PrintWarn)
+ $response->add_err(0, $stderr_msg, undef, $self->trace)
+ # but ignore warning from old version of blib
+ unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/;
+ }
+
+ return $response;
+}
+
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::pipeone - DBD::Gofer client transport for testing
+
+=head1 SYNOPSIS
+
+ $original_dsn = "...";
+ DBI->connect("dbi:Gofer:transport=pipeone;dsn=$original_dsn",...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=pipeone"
+
+=head1 DESCRIPTION
+
+Connect via DBD::Gofer and execute each request by starting executing a subprocess.
+
+This is, as you might imagine, spectacularly inefficient!
+
+It's only intended for testing. Specifically it demonstrates that the server
+side is completely stateless.
+
+It also provides a base class for the much more useful L<DBD::Gofer::Transport::stream>
+transport.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::Base>
+
+L<DBD::Gofer>
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/stream.pm b/lib/DBD/Gofer/Transport/stream.pm
new file mode 100644
index 0000000..61e211c
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/stream.pm
@@ -0,0 +1,292 @@
+package DBD::Gofer::Transport::stream;
+
+# $Id: stream.pm 14598 2010-12-21 22:53:25Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use Carp;
+
+use base qw(DBD::Gofer::Transport::pipeone);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 14598 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ go_persist
+));
+
+my $persist_all = 5;
+my %persist;
+
+
+sub _connection_key {
+ my ($self) = @_;
+ return join "~", $self->go_url||"", @{ $self->go_perl || [] };
+}
+
+
+sub _connection_get {
+ my ($self) = @_;
+
+ my $persist = $self->go_persist; # = 0 can force non-caching
+ $persist = $persist_all if not defined $persist;
+ my $key = ($persist) ? $self->_connection_key : '';
+ if ($persist{$key} && $self->_connection_check($persist{$key})) {
+ $self->trace_msg("reusing persistent connection $key\n",0) if $self->trace >= 1;
+ return $persist{$key};
+ }
+
+ my $connection = $self->_make_connection;
+
+ if ($key) {
+ %persist = () if keys %persist > $persist_all; # XXX quick hack to limit subprocesses
+ $persist{$key} = $connection;
+ }
+
+ return $connection;
+}
+
+
+sub _connection_check {
+ my ($self, $connection) = @_;
+ $connection ||= $self->connection_info;
+ my $pid = $connection->{pid};
+ my $ok = (kill 0, $pid);
+ $self->trace_msg("_connection_check: $ok (pid $$)\n",0) if $self->trace;
+ return $ok;
+}
+
+
+sub _connection_kill {
+ my ($self) = @_;
+ my $connection = $self->connection_info;
+ my ($pid, $wfh, $rfh, $efh) = @{$connection}{qw(pid wfh rfh efh)};
+ $self->trace_msg("_connection_kill: closing write handle\n",0) if $self->trace;
+ # closing the write file handle should be enough, generally
+ close $wfh;
+ # in future we may want to be more aggressive
+ #close $rfh; close $efh; kill 15, $pid
+ # but deleting from the persist cache...
+ delete $persist{ $self->_connection_key };
+ # ... and removing the connection_info should suffice
+ $self->connection_info( undef );
+ return;
+}
+
+
+sub _make_connection {
+ my ($self) = @_;
+
+ my $go_perl = $self->go_perl;
+ my $cmd = [ @$go_perl, qw(-MDBI::Gofer::Transport::stream -e run_stdio_hex)];
+
+ #push @$cmd, "DBI_TRACE=2=/tmp/goferstream.log", "sh", "-c";
+ if (my $url = $self->go_url) {
+ die "Only 'ssh:user\@host' style url supported by this transport"
+ unless $url =~ s/^ssh://;
+ my $ssh = $url;
+ my $setup_env = join "||", map { "source $_ 2>/dev/null" } qw(.bash_profile .bash_login .profile);
+ my $setup = $setup_env.q{; exec "$@"};
+ # don't use $^X on remote system by default as it's possibly wrong
+ $cmd->[0] = 'perl' if "@$go_perl" eq $^X;
+ # -x not only 'Disables X11 forwarding' but also makes connections *much* faster
+ unshift @$cmd, qw(ssh -xq), split(' ', $ssh), qw(bash -c), $setup;
+ }
+
+ $self->trace_msg("new connection: @$cmd\n",0) if $self->trace;
+
+ # XXX add a handshake - some message from DBI::Gofer::Transport::stream that's
+ # sent as soon as it starts that we can wait for to report success - and soak up
+ # and report useful warnings etc from ssh before we get it? Increases latency though.
+ my $connection = $self->start_pipe_command($cmd);
+ return $connection;
+}
+
+
+sub transmit_request_by_transport {
+ my ($self, $request) = @_;
+ my $trace = $self->trace;
+
+ my $connection = $self->connection_info || do {
+ my $con = $self->_connection_get;
+ $self->connection_info( $con );
+ $con;
+ };
+
+ my $encoded_request = unpack("H*", $self->freeze_request($request));
+ $encoded_request .= "\015\012";
+
+ my $wfh = $connection->{wfh};
+ $self->trace_msg(sprintf("transmit_request_by_transport: to fh %s fd%d\n", $wfh, fileno($wfh)),0)
+ if $trace >= 4;
+
+ # send frozen request
+ local $\;
+ $wfh->print($encoded_request) # autoflush enabled
+ or do {
+ my $err = $!;
+ # XXX could/should make new connection and retry
+ $self->_connection_kill;
+ die "Error sending request: $err";
+ };
+ $self->trace_msg("Request sent: $encoded_request\n",0) if $trace >= 4;
+
+ return undef; # indicate no response yet (so caller calls receive_response_by_transport)
+}
+
+
+sub receive_response_by_transport {
+ my $self = shift;
+ my $trace = $self->trace;
+
+ $self->trace_msg("receive_response_by_transport: awaiting response\n",0) if $trace >= 4;
+ my $connection = $self->connection_info || die;
+ my ($pid, $rfh, $efh, $cmd) = @{$connection}{qw(pid rfh efh cmd)};
+
+ my $errno = 0;
+ my $encoded_response;
+ my $stderr_msg;
+
+ $self->read_response_from_fh( {
+ $efh => {
+ error => sub { warn "error reading response stderr: $!"; $errno||=$!; 1 },
+ eof => sub { warn "eof reading efh" if $trace >= 4; 1 },
+ read => sub { $stderr_msg .= $_; 0 },
+ },
+ $rfh => {
+ error => sub { warn "error reading response: $!"; $errno||=$!; 1 },
+ eof => sub { warn "eof reading rfh" if $trace >= 4; 1 },
+ read => sub { $encoded_response .= $_; ($encoded_response=~s/\015\012$//) ? 1 : 0 },
+ },
+ });
+
+ # if we got no output on stdout at all then the command has
+ # probably exited, possibly with an error to stderr.
+ # Turn this situation into a reasonably useful DBI error.
+ if (not $encoded_response) {
+ my @msg;
+ push @msg, "error while reading response: $errno" if $errno;
+ if ($stderr_msg) {
+ chomp $stderr_msg;
+ push @msg, sprintf "error reported by \"%s\" (pid %d%s): %s",
+ $self->cmd_as_string,
+ $pid, ((kill 0, $pid) ? "" : ", exited"),
+ $stderr_msg;
+ }
+ die join(", ", "No response received", @msg)."\n";
+ }
+
+ $self->trace_msg("Response received: $encoded_response\n",0)
+ if $trace >= 4;
+
+ $self->trace_msg("Gofer stream stderr message: $stderr_msg\n",0)
+ if $stderr_msg && $trace;
+
+ my $frozen_response = pack("H*", $encoded_response);
+
+ # XXX need to be able to detect and deal with corruption
+ my $response = $self->thaw_response($frozen_response);
+
+ if ($stderr_msg) {
+ # add stderr messages as warnings (for PrintWarn)
+ $response->add_err(0, $stderr_msg, undef, $trace)
+ # but ignore warning from old version of blib
+ unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/;
+ }
+
+ return $response;
+}
+
+sub transport_timedout {
+ my $self = shift;
+ $self->_connection_kill;
+ return $self->SUPER::transport_timedout(@_);
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::stream - DBD::Gofer transport for stdio streaming
+
+=head1 SYNOPSIS
+
+ DBI->connect('dbi:Gofer:transport=stream;url=ssh:username@host.example.com;dsn=dbi:...',...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY='dbi:Gofer:transport=stream;url=ssh:username@host.example.com'
+
+=head1 DESCRIPTION
+
+Without the C<url=> parameter it launches a subprocess as
+
+ perl -MDBI::Gofer::Transport::stream -e run_stdio_hex
+
+and feeds requests into it and reads responses from it. But that's not very useful.
+
+With a C<url=ssh:username@host.example.com> parameter it uses ssh to launch the subprocess
+on a remote system. That's much more useful!
+
+It gives you secure remote access to DBI databases on any system you can login to.
+Using ssh also gives you optional compression and many other features (see the
+ssh manual for how to configure that and many other options via ~/.ssh/config file).
+
+The actual command invoked is something like:
+
+ ssh -xq ssh:username@host.example.com bash -c $setup $run
+
+where $run is the command shown above, and $command is
+
+ . .bash_profile 2>/dev/null || . .bash_login 2>/dev/null || . .profile 2>/dev/null; exec "$@"
+
+which is trying (in a limited and fairly unportable way) to setup the environment
+(PATH, PERL5LIB etc) as it would be if you had logged in to that system.
+
+The "C<perl>" used in the command will default to the value of $^X when not using ssh.
+On most systems that's the full path to the perl that's currently executing.
+
+
+=head1 PERSISTENCE
+
+Currently gofer stream connections persist (remain connected) after all
+database handles have been disconnected. This makes later connections in the
+same process very fast.
+
+Currently up to 5 different gofer stream connections (based on url) can
+persist. If more than 5 are in the cache when a new connection is made then
+the cache is cleared before adding the new connection. Simple but effective.
+
+=head1 TO DO
+
+Document go_perl attribute
+
+Automatically reconnect (within reason) if there's a transport error.
+
+Decide on default for persistent connection - on or off? limits? ttl?
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::Base>
+
+L<DBD::Gofer>
+
+=cut
diff --git a/lib/DBD/NullP.pm b/lib/DBD/NullP.pm
new file mode 100644
index 0000000..b1f8a71
--- /dev/null
+++ b/lib/DBD/NullP.pm
@@ -0,0 +1,166 @@
+{
+ package DBD::NullP;
+
+ require DBI;
+ require Carp;
+
+ @EXPORT = qw(); # Do NOT @EXPORT anything.
+ $VERSION = sprintf("12.%06d", q$Revision: 14714 $ =~ /(\d+)/o);
+
+# $Id: NullP.pm 14714 2011-02-22 17:27:07Z timbo $
+#
+# Copyright (c) 1994-2007 Tim Bunce
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+ $drh = undef; # holds driver handle once initialised
+
+ sub driver{
+ return $drh if $drh;
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'NullP',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD Example Null Perl stub by Tim Bunce',
+ }, [ qw'example implementors private data']);
+ $drh;
+ }
+
+ sub CLONE {
+ undef $drh;
+ }
+}
+
+
+{ package DBD::NullP::dr; # ====== DRIVER ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub connect { # normally overridden, but a handy default
+ my $dbh = shift->SUPER::connect(@_)
+ or return;
+ $dbh->STORE(Active => 1);
+ $dbh;
+ }
+
+
+ sub DESTROY { undef }
+}
+
+
+{ package DBD::NullP::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+ use Carp qw(croak);
+
+ sub prepare {
+ my ($dbh, $statement)= @_;
+
+ my ($outer, $sth) = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ });
+
+ return $outer;
+ }
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ if ($attrib eq 'AutoCommit') {
+ Carp::croak("Can't disable AutoCommit") unless $value;
+ # convert AutoCommit values to magic ones to let DBI
+ # know that the driver has 'handled' the AutoCommit attribute
+ $value = ($value) ? -901 : -900;
+ }
+ return $dbh->SUPER::STORE($attrib, $value);
+ }
+
+ sub ping { 1 }
+
+ sub disconnect {
+ shift->STORE(Active => 0);
+ }
+
+}
+
+
+{ package DBD::NullP::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub bind_param {
+ my ($sth, $param, $value, $attr) = @_;
+ $sth->{ParamValues}{$param} = $value;
+ $sth->{ParamAttr}{$param} = $attr
+ if defined $attr; # attr is sticky if not explicitly set
+ return 1;
+ }
+
+ sub execute {
+ my $sth = shift;
+ $sth->bind_param($_, $_[$_-1]) for (1..@_);
+ if ($sth->{Statement} =~ m/^ \s* SELECT \s+/xmsi) {
+ $sth->STORE(NUM_OF_FIELDS => 1);
+ $sth->{NAME} = [ "fieldname" ];
+ # just for the sake of returning something, we return the params
+ my $params = $sth->{ParamValues} || {};
+ $sth->{dbd_nullp_data} = [ @{$params}{ sort keys %$params } ];
+ $sth->STORE(Active => 1);
+ }
+ # force a sleep - handy for testing
+ elsif ($sth->{Statement} =~ m/^ \s* SLEEP \s+ (\S+) /xmsi) {
+ my $secs = $1;
+ if (eval { require Time::HiRes; defined &Time::HiRes::sleep }) {
+ Time::HiRes::sleep($secs);
+ }
+ else {
+ sleep $secs;
+ }
+ }
+ # force an error - handy for testing
+ elsif ($sth->{Statement} =~ m/^ \s* ERROR \s+ (\d+) \s* (.*) /xmsi) {
+ return $sth->set_err($1, $2);
+ }
+ # anything else is silently ignored, sucessfully
+ 1;
+ }
+
+ sub fetchrow_arrayref {
+ my $sth = shift;
+ my $data = $sth->{dbd_nullp_data};
+ if (!$data || !@$data) {
+ $sth->finish; # no more data so finish
+ return undef;
+ }
+ return $sth->_set_fbav(shift @$data);
+ }
+ *fetch = \&fetchrow_arrayref; # alias
+
+ sub FETCH {
+ my ($sth, $attrib) = @_;
+ # would normally validate and only fetch known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::STORE($attrib, $value);
+ }
+
+}
+
+1;
diff --git a/lib/DBD/Proxy.pm b/lib/DBD/Proxy.pm
new file mode 100644
index 0000000..6c9e14d
--- /dev/null
+++ b/lib/DBD/Proxy.pm
@@ -0,0 +1,997 @@
+# -*- perl -*-
+#
+#
+# DBD::Proxy - DBI Proxy driver
+#
+#
+# Copyright (c) 1997,1998 Jochen Wiedmann
+#
+# The DBD::Proxy module is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself. In particular permission
+# is granted to Tim Bunce for distributing this as a part of the DBI.
+#
+#
+# Author: Jochen Wiedmann
+# Am Eisteich 9
+# 72555 Metzingen
+# Germany
+#
+# Email: joe@ispsoft.de
+# Phone: +49 7123 14881
+#
+
+use strict;
+use Carp;
+
+require DBI;
+DBI->require_version(1.0201);
+
+use RPC::PlClient 0.2000; # XXX change to 0.2017 once it's released
+
+{ package DBD::Proxy::RPC::PlClient;
+ @DBD::Proxy::RPC::PlClient::ISA = qw(RPC::PlClient);
+ sub Call {
+ my $self = shift;
+ if ($self->{debug}) {
+ my ($rpcmeth, $obj, $method, @args) = @_;
+ local $^W; # silence undefs
+ Carp::carp("Server $rpcmeth $method(@args)");
+ }
+ return $self->SUPER::Call(@_);
+ }
+}
+
+
+package DBD::Proxy;
+
+use vars qw($VERSION $drh %ATTR);
+
+$VERSION = "0.2004";
+
+$drh = undef; # holds driver handle once initialised
+
+%ATTR = ( # common to db & st, see also %ATTR in DBD::Proxy::db & ::st
+ 'Warn' => 'local',
+ 'Active' => 'local',
+ 'Kids' => 'local',
+ 'CachedKids' => 'local',
+ 'PrintError' => 'local',
+ 'RaiseError' => 'local',
+ 'HandleError' => 'local',
+ 'TraceLevel' => 'cached',
+ 'CompatMode' => 'local',
+);
+
+sub driver ($$) {
+ if (!$drh) {
+ my($class, $attr) = @_;
+
+ $class .= "::dr";
+
+ $drh = DBI::_new_drh($class, {
+ 'Name' => 'Proxy',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD::Proxy by Jochen Wiedmann',
+ });
+ $drh->STORE(CompatMode => 1); # disable DBI dispatcher attribute cache (for FETCH)
+ }
+ $drh;
+}
+
+sub CLONE {
+ undef $drh;
+}
+
+sub proxy_set_err {
+ my ($h,$errmsg) = @_;
+ my ($err, $state) = ($errmsg =~ s/ \[err=(.*?),state=(.*?)\]//)
+ ? ($1, $2) : (1, ' ' x 5);
+ return $h->set_err($err, $errmsg, $state);
+}
+
+package DBD::Proxy::dr; # ====== DRIVER ======
+
+$DBD::Proxy::dr::imp_data_size = 0;
+
+sub connect ($$;$$) {
+ my($drh, $dsn, $user, $auth, $attr)= @_;
+ my($dsnOrig) = $dsn;
+
+ my %attr = %$attr;
+ my ($var, $val);
+ while (length($dsn)) {
+ if ($dsn =~ /^dsn=(.*)/) {
+ $attr{'dsn'} = $1;
+ last;
+ }
+ if ($dsn =~ /^(.*?);(.*)/) {
+ $var = $1;
+ $dsn = $2;
+ } else {
+ $var = $dsn;
+ $dsn = '';
+ }
+ if ($var =~ /^(.*?)=(.*)/) {
+ $var = $1;
+ $val = $2;
+ $attr{$var} = $val;
+ }
+ }
+
+ my $err = '';
+ if (!defined($attr{'hostname'})) { $err .= " Missing hostname."; }
+ if (!defined($attr{'port'})) { $err .= " Missing port."; }
+ if (!defined($attr{'dsn'})) { $err .= " Missing remote dsn."; }
+
+ # Create a cipher object, if requested
+ my $cipherRef = undef;
+ if ($attr{'cipher'}) {
+ $cipherRef = eval { $attr{'cipher'}->new(pack('H*',
+ $attr{'key'})) };
+ if ($@) { $err .= " Cannot create cipher object: $@."; }
+ }
+ my $userCipherRef = undef;
+ if ($attr{'userkey'}) {
+ my $cipher = $attr{'usercipher'} || $attr{'cipher'};
+ $userCipherRef = eval { $cipher->new(pack('H*', $attr{'userkey'})) };
+ if ($@) { $err .= " Cannot create usercipher object: $@."; }
+ }
+
+ return DBD::Proxy::proxy_set_err($drh, $err) if $err; # Returns undef
+
+ my %client_opts = (
+ 'peeraddr' => $attr{'hostname'},
+ 'peerport' => $attr{'port'},
+ 'socket_proto' => 'tcp',
+ 'application' => $attr{dsn},
+ 'user' => $user || '',
+ 'password' => $auth || '',
+ 'version' => $DBD::Proxy::VERSION,
+ 'cipher' => $cipherRef,
+ 'debug' => $attr{debug} || 0,
+ 'timeout' => $attr{timeout} || undef,
+ 'logfile' => $attr{logfile} || undef
+ );
+ # Options starting with 'proxy_rpc_' are forwarded to the RPC layer after
+ # stripping the prefix.
+ while (my($var,$val) = each %attr) {
+ if ($var =~ s/^proxy_rpc_//) {
+ $client_opts{$var} = $val;
+ }
+ }
+ # Create an RPC::PlClient object.
+ my($client, $msg) = eval { DBD::Proxy::RPC::PlClient->new(%client_opts) };
+
+ return DBD::Proxy::proxy_set_err($drh, "Cannot log in to DBI::ProxyServer: $@")
+ if $@; # Returns undef
+ return DBD::Proxy::proxy_set_err($drh, "Constructor didn't return a handle: $msg")
+ unless ($msg =~ /^((?:\w+|\:\:)+)=(\w+)/); # Returns undef
+
+ $msg = RPC::PlClient::Object->new($1, $client, $msg);
+
+ my $max_proto_ver;
+ my ($server_ver_str) = eval { $client->Call('Version') };
+ if ( $@ ) {
+ # Server denies call, assume legacy protocol.
+ $max_proto_ver = 1;
+ } else {
+ # Parse proxy server version.
+ my ($server_ver_num) = $server_ver_str =~ /^DBI::ProxyServer\s+([\d\.]+)/;
+ $max_proto_ver = $server_ver_num >= 0.3 ? 2 : 1;
+ }
+ my $req_proto_ver;
+ if ( exists $attr{proxy_lazy_prepare} ) {
+ $req_proto_ver = ($attr{proxy_lazy_prepare} == 0) ? 2 : 1;
+ return DBD::Proxy::proxy_set_err($drh,
+ "DBI::ProxyServer does not support synchronous statement preparation.")
+ if $max_proto_ver < $req_proto_ver;
+ }
+
+ # Switch to user specific encryption mode, if desired
+ if ($userCipherRef) {
+ $client->{'cipher'} = $userCipherRef;
+ }
+
+ # create a 'blank' dbh
+ my $this = DBI::_new_dbh($drh, {
+ 'Name' => $dsnOrig,
+ 'proxy_dbh' => $msg,
+ 'proxy_client' => $client,
+ 'RowCacheSize' => $attr{'RowCacheSize'} || 20,
+ 'proxy_proto_ver' => $req_proto_ver || 1
+ });
+
+ foreach $var (keys %attr) {
+ if ($var =~ /proxy_/) {
+ $this->{$var} = $attr{$var};
+ }
+ }
+ $this->SUPER::STORE('Active' => 1);
+
+ $this;
+}
+
+
+sub DESTROY { undef }
+
+
+package DBD::Proxy::db; # ====== DATABASE ======
+
+$DBD::Proxy::db::imp_data_size = 0;
+
+# XXX probably many more methods need to be added here
+# in order to trigger our AUTOLOAD to redirect them to the server.
+# (Unless the sub is declared it's bypassed by perl method lookup.)
+# See notes in ToDo about method metadata
+# The question is whether to add all the methods in %DBI::DBI_methods
+# to the corresponding classes (::db, ::st etc)
+# Also need to consider methods that, if proxied, would change the server state
+# in a way that might not be visible on the client, ie begin_work -> AutoCommit.
+
+sub commit;
+sub connected;
+sub rollback;
+sub ping;
+
+
+use vars qw(%ATTR $AUTOLOAD);
+
+# inherited: STORE / FETCH against this class.
+# local: STORE / FETCH against parent class.
+# cached: STORE to remote and local objects, FETCH from local.
+# remote: STORE / FETCH against remote object only (default).
+#
+# Note: Attribute names starting with 'proxy_' always treated as 'inherited'.
+#
+%ATTR = ( # see also %ATTR in DBD::Proxy::st
+ %DBD::Proxy::ATTR,
+ RowCacheSize => 'inherited',
+ #AutoCommit => 'cached',
+ 'FetchHashKeyName' => 'cached',
+ Statement => 'local',
+ Driver => 'local',
+ dbi_connect_closure => 'local',
+ Username => 'local',
+);
+
+sub AUTOLOAD {
+ my $method = $AUTOLOAD;
+ $method =~ s/(.*::(.*)):://;
+ my $class = $1;
+ my $type = $2;
+ #warn "AUTOLOAD of $method (class=$class, type=$type)";
+ my %expand = (
+ 'method' => $method,
+ 'class' => $class,
+ 'type' => $type,
+ 'call' => "$method(\@_)",
+ # XXX was trying to be smart but was tripping up over the DBI's own
+ # smartness. Disabled, but left here in case there are issues.
+ # 'call' => (UNIVERSAL::can("DBI::_::$type", $method)) ? "$method(\@_)" : "func(\@_, '$method')",
+ );
+
+ my $method_code = q{
+ package ~class~;
+ sub ~method~ {
+ my $h = shift;
+ local $@;
+ my @result = wantarray
+ ? eval { $h->{'proxy_~type~h'}->~call~ }
+ : eval { scalar $h->{'proxy_~type~h'}->~call~ };
+ return DBD::Proxy::proxy_set_err($h, $@) if $@;
+ return wantarray ? @result : $result[0];
+ }
+ };
+ $method_code =~ s/\~(\w+)\~/$expand{$1}/eg;
+ local $SIG{__DIE__} = 'DEFAULT';
+ my $err = do { local $@; eval $method_code.2; $@ };
+ die $err if $err;
+ goto &$AUTOLOAD;
+}
+
+sub DESTROY {
+ my $dbh = shift;
+ local $@ if $@; # protect $@
+ $dbh->disconnect if $dbh->SUPER::FETCH('Active');
+}
+
+sub disconnect ($) {
+ my ($dbh) = @_;
+
+ # Sadly the Proxy too-often disagrees with the backend database
+ # on the subject of 'Active'. In the short term, I'd like the
+ # Proxy to ease up and let me decide when it's proper to go over
+ # the wire. This ultimately applies to finish() as well.
+ #return unless $dbh->SUPER::FETCH('Active');
+
+ # Drop database connection at remote end
+ my $rdbh = $dbh->{'proxy_dbh'};
+ if ( $rdbh ) {
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ eval { $rdbh->disconnect() } ;
+ DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ }
+
+ # Close TCP connect to remote
+ # XXX possibly best left till DESTROY? Add a config attribute to choose?
+ #$dbh->{proxy_client}->Disconnect(); # Disconnect method requires newer PlRPC module
+ $dbh->{proxy_client}->{socket} = undef; # hack
+
+ $dbh->SUPER::STORE('Active' => 0);
+ 1;
+}
+
+
+sub STORE ($$$) {
+ my($dbh, $attr, $val) = @_;
+ my $type = $ATTR{$attr} || 'remote';
+
+ if ($attr eq 'TraceLevel') {
+ warn("TraceLevel $val");
+ my $pc = $dbh->{proxy_client} || die;
+ $pc->{logfile} ||= 1; # XXX hack
+ $pc->{debug} = ($val && $val >= 4);
+ $pc->Debug("$pc debug enabled") if $pc->{debug};
+ }
+
+ if ($attr =~ /^proxy_/ || $type eq 'inherited') {
+ $dbh->{$attr} = $val;
+ return 1;
+ }
+
+ if ($type eq 'remote' || $type eq 'cached') {
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->STORE($attr => $val) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@; # returns undef
+ $dbh->SUPER::STORE($attr => $val) if $type eq 'cached';
+ return $result;
+ }
+ return $dbh->SUPER::STORE($attr => $val);
+}
+
+sub FETCH ($$) {
+ my($dbh, $attr) = @_;
+ # we only get here for cached attribute values if the handle is in CompatMode
+ # otherwise the DBI dispatcher handles the FETCH itself from the attribute cache.
+ my $type = $ATTR{$attr} || 'remote';
+
+ if ($attr =~ /^proxy_/ || $type eq 'inherited' || $type eq 'cached') {
+ return $dbh->{$attr};
+ }
+
+ return $dbh->SUPER::FETCH($attr) unless $type eq 'remote';
+
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->FETCH($attr) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ return $result;
+}
+
+sub prepare ($$;$) {
+ my($dbh, $stmt, $attr) = @_;
+ my $sth = DBI::_new_sth($dbh, {
+ 'Statement' => $stmt,
+ 'proxy_attr' => $attr,
+ 'proxy_cache_only' => 0,
+ 'proxy_params' => [],
+ }
+ );
+ my $proto_ver = $dbh->{'proxy_proto_ver'};
+ if ( $proto_ver > 1 ) {
+ $sth->{'proxy_attr_cache'} = {cache_filled => 0};
+ my $rdbh = $dbh->{'proxy_dbh'};
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $rsth = eval { $rdbh->prepare($sth->{'Statement'}, $sth->{'proxy_attr'}, undef, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return DBD::Proxy::proxy_set_err($sth, "Constructor didn't return a handle: $rsth")
+ unless ($rsth =~ /^((?:\w+|\:\:)+)=(\w+)/);
+
+ my $client = $dbh->{'proxy_client'};
+ $rsth = RPC::PlClient::Object->new($1, $client, $rsth);
+
+ $sth->{'proxy_sth'} = $rsth;
+ # If statement is a positioned update we do not want any readahead.
+ $sth->{'RowCacheSize'} = 1 if $stmt =~ /\bfor\s+update\b/i;
+ # Since resources are used by prepared remote handle, mark us active.
+ $sth->SUPER::STORE(Active => 1);
+ }
+ $sth;
+}
+
+sub quote {
+ my $dbh = shift;
+ my $proxy_quote = $dbh->{proxy_quote} || 'remote';
+
+ return $dbh->SUPER::quote(@_)
+ if $proxy_quote eq 'local' && @_ == 1;
+
+ # For the common case of only a single argument
+ # (no $data_type) we could learn and cache the behaviour.
+ # Or we could probe the driver with a few test cases.
+ # Or we could add a way to ask the DBI::ProxyServer
+ # if $dbh->can('quote') == \&DBI::_::db::quote.
+ # Tim
+ #
+ # Sounds all *very* smart to me. I'd rather suggest to
+ # implement some of the typical quote possibilities
+ # and let the user set
+ # $dbh->{'proxy_quote'} = 'backslash_escaped';
+ # for example.
+ # Jochen
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->quote(@_) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ return $result;
+}
+
+sub table_info {
+ my $dbh = shift;
+ my $rdbh = $dbh->{'proxy_dbh'};
+ #warn "table_info(@_)";
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my($numFields, $names, $types, @rows) = eval { $rdbh->table_info(@_) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ my ($sth, $inner) = DBI::_new_sth($dbh, {
+ 'Statement' => "SHOW TABLES",
+ 'proxy_params' => [],
+ 'proxy_data' => \@rows,
+ 'proxy_attr_cache' => {
+ 'NUM_OF_PARAMS' => 0,
+ 'NUM_OF_FIELDS' => $numFields,
+ 'NAME' => $names,
+ 'TYPE' => $types,
+ 'cache_filled' => 1
+ },
+ 'proxy_cache_only' => 1,
+ });
+ $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields);
+ $inner->{NAME} = $names;
+ $inner->{TYPE} = $types;
+ $sth->SUPER::STORE('Active' => 1); # already execute()'d
+ $sth->{'proxy_rows'} = @rows;
+ return $sth;
+}
+
+sub tables {
+ my $dbh = shift;
+ #warn "tables(@_)";
+ return $dbh->SUPER::tables(@_);
+}
+
+
+sub type_info_all {
+ my $dbh = shift;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->type_info_all(@_) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ return $result;
+}
+
+
+package DBD::Proxy::st; # ====== STATEMENT ======
+
+$DBD::Proxy::st::imp_data_size = 0;
+
+use vars qw(%ATTR);
+
+# inherited: STORE to current object. FETCH from current if exists, else call up
+# to the (proxy) database object.
+# local: STORE / FETCH against parent class.
+# cache_only: STORE noop (read-only). FETCH from private_* if exists, else call
+# remote and cache the result.
+# remote: STORE / FETCH against remote object only (default).
+#
+# Note: Attribute names starting with 'proxy_' always treated as 'inherited'.
+#
+%ATTR = ( # see also %ATTR in DBD::Proxy::db
+ %DBD::Proxy::ATTR,
+ 'Database' => 'local',
+ 'RowsInCache' => 'local',
+ 'RowCacheSize' => 'inherited',
+ 'NULLABLE' => 'cache_only',
+ 'NAME' => 'cache_only',
+ 'TYPE' => 'cache_only',
+ 'PRECISION' => 'cache_only',
+ 'SCALE' => 'cache_only',
+ 'NUM_OF_FIELDS' => 'cache_only',
+ 'NUM_OF_PARAMS' => 'cache_only'
+);
+
+*AUTOLOAD = \&DBD::Proxy::db::AUTOLOAD;
+
+sub execute ($@) {
+ my $sth = shift;
+ my $params = @_ ? \@_ : $sth->{'proxy_params'};
+
+ # new execute, so delete any cached rows from previous execute
+ undef $sth->{'proxy_data'};
+ undef $sth->{'proxy_rows'};
+
+ my $rsth = $sth->{proxy_sth};
+ my $dbh = $sth->FETCH('Database');
+ my $proto_ver = $dbh->{proxy_proto_ver};
+
+ my ($numRows, @outData);
+
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ if ( $proto_ver > 1 ) {
+ ($numRows, @outData) = eval { $rsth->execute($params, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+
+ # Attributes passed back only on the first execute() of a statement.
+ unless ($sth->{proxy_attr_cache}->{cache_filled}) {
+ my ($numFields, $numParams, $names, $types) = splice(@outData, 0, 4);
+ $sth->{'proxy_attr_cache'} = {
+ 'NUM_OF_FIELDS' => $numFields,
+ 'NUM_OF_PARAMS' => $numParams,
+ 'NAME' => $names,
+ 'cache_filled' => 1
+ };
+ $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields);
+ $sth->SUPER::STORE('NUM_OF_PARAMS' => $numParams);
+ }
+
+ }
+ else {
+ if ($rsth) {
+ ($numRows, @outData) = eval { $rsth->execute($params, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+
+ }
+ else {
+ my $rdbh = $dbh->{'proxy_dbh'};
+
+ # Legacy prepare is actually prepare + first execute on the server.
+ ($rsth, @outData) =
+ eval { $rdbh->prepare($sth->{'Statement'},
+ $sth->{'proxy_attr'}, $params, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return DBD::Proxy::proxy_set_err($sth, "Constructor didn't return a handle: $rsth")
+ unless ($rsth =~ /^((?:\w+|\:\:)+)=(\w+)/);
+
+ my $client = $dbh->{'proxy_client'};
+ $rsth = RPC::PlClient::Object->new($1, $client, $rsth);
+
+ my ($numFields, $numParams, $names, $types) = splice(@outData, 0, 4);
+ $sth->{'proxy_sth'} = $rsth;
+ $sth->{'proxy_attr_cache'} = {
+ 'NUM_OF_FIELDS' => $numFields,
+ 'NUM_OF_PARAMS' => $numParams,
+ 'NAME' => $names
+ };
+ $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields);
+ $sth->SUPER::STORE('NUM_OF_PARAMS' => $numParams);
+ $numRows = shift @outData;
+ }
+ }
+ # Always condition active flag.
+ $sth->SUPER::STORE('Active' => 1) if $sth->FETCH('NUM_OF_FIELDS'); # is SELECT
+ $sth->{'proxy_rows'} = $numRows;
+ # Any remaining items are output params.
+ if (@outData) {
+ foreach my $p (@$params) {
+ if (ref($p->[0])) {
+ my $ref = shift @outData;
+ ${$p->[0]} = $$ref;
+ }
+ }
+ }
+
+ $sth->{'proxy_rows'} || '0E0';
+}
+
+sub fetch ($) {
+ my $sth = shift;
+
+ my $data = $sth->{'proxy_data'};
+
+ $sth->{'proxy_rows'} = 0 unless defined $sth->{'proxy_rows'};
+
+ if(!$data || !@$data) {
+ return undef unless $sth->SUPER::FETCH('Active');
+
+ my $rsth = $sth->{'proxy_sth'};
+ if (!$rsth) {
+ die "Attempt to fetch row without execute";
+ }
+ my $num_rows = $sth->FETCH('RowCacheSize') || 20;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my @rows = eval { $rsth->fetch($num_rows) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ unless (@rows == $num_rows) {
+ undef $sth->{'proxy_data'};
+ # server side has already called finish
+ $sth->SUPER::STORE(Active => 0);
+ }
+ return undef unless @rows;
+ $sth->{'proxy_data'} = $data = [@rows];
+ }
+ my $row = shift @$data;
+
+ $sth->SUPER::STORE(Active => 0) if ( $sth->{proxy_cache_only} and !@$data );
+ $sth->{'proxy_rows'}++;
+ return $sth->_set_fbav($row);
+}
+*fetchrow_arrayref = \&fetch;
+
+sub rows ($) {
+ my $rows = shift->{'proxy_rows'};
+ return (defined $rows) ? $rows : -1;
+}
+
+sub finish ($) {
+ my($sth) = @_;
+ return 1 unless $sth->SUPER::FETCH('Active');
+ my $rsth = $sth->{'proxy_sth'};
+ $sth->SUPER::STORE('Active' => 0);
+ return 0 unless $rsth; # Something's out of sync
+ my $no_finish = exists($sth->{'proxy_no_finish'})
+ ? $sth->{'proxy_no_finish'}
+ : $sth->FETCH('Database')->{'proxy_no_finish'};
+ unless ($no_finish) {
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $rsth->finish() };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return $result;
+ }
+ 1;
+}
+
+sub STORE ($$$) {
+ my($sth, $attr, $val) = @_;
+ my $type = $ATTR{$attr} || 'remote';
+
+ if ($attr =~ /^proxy_/ || $type eq 'inherited') {
+ $sth->{$attr} = $val;
+ return 1;
+ }
+
+ if ($type eq 'cache_only') {
+ return 0;
+ }
+
+ if ($type eq 'remote' || $type eq 'cached') {
+ my $rsth = $sth->{'proxy_sth'} or return undef;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $rsth->STORE($attr => $val) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if ($@);
+ return $result if $type eq 'remote'; # else fall through to cache locally
+ }
+ return $sth->SUPER::STORE($attr => $val);
+}
+
+sub FETCH ($$) {
+ my($sth, $attr) = @_;
+
+ if ($attr =~ /^proxy_/) {
+ return $sth->{$attr};
+ }
+
+ my $type = $ATTR{$attr} || 'remote';
+ if ($type eq 'inherited') {
+ if (exists($sth->{$attr})) {
+ return $sth->{$attr};
+ }
+ return $sth->FETCH('Database')->{$attr};
+ }
+
+ if ($type eq 'cache_only' &&
+ exists($sth->{'proxy_attr_cache'}->{$attr})) {
+ return $sth->{'proxy_attr_cache'}->{$attr};
+ }
+
+ if ($type ne 'local') {
+ my $rsth = $sth->{'proxy_sth'} or return undef;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $rsth->FETCH($attr) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return $result;
+ }
+ elsif ($attr eq 'RowsInCache') {
+ my $data = $sth->{'proxy_data'};
+ $data ? @$data : 0;
+ }
+ else {
+ $sth->SUPER::FETCH($attr);
+ }
+}
+
+sub bind_param ($$$@) {
+ my $sth = shift; my $param = shift;
+ $sth->{'proxy_params'}->[$param-1] = [@_];
+}
+*bind_param_inout = \&bind_param;
+
+sub DESTROY {
+ my $sth = shift;
+ $sth->finish if $sth->SUPER::FETCH('Active');
+}
+
+
+1;
+
+
+__END__
+
+=head1 NAME
+
+DBD::Proxy - A proxy driver for the DBI
+
+=head1 SYNOPSIS
+
+ use DBI;
+
+ $dbh = DBI->connect("dbi:Proxy:hostname=$host;port=$port;dsn=$db",
+ $user, $passwd);
+
+ # See the DBI module documentation for full details
+
+=head1 DESCRIPTION
+
+DBD::Proxy is a Perl module for connecting to a database via a remote
+DBI driver. See L<DBD::Gofer> for an alternative with different trade-offs.
+
+This is of course not needed for DBI drivers which already
+support connecting to a remote database, but there are engines which
+don't offer network connectivity.
+
+Another application is offering database access through a firewall, as
+the driver offers query based restrictions. For example you can
+restrict queries to exactly those that are used in a given CGI
+application.
+
+Speaking of CGI, another application is (or rather, will be) to reduce
+the database connect/disconnect overhead from CGI scripts by using
+proxying the connect_cached method. The proxy server will hold the
+database connections open in a cache. The CGI script then trades the
+database connect/disconnect overhead for the DBD::Proxy
+connect/disconnect overhead which is typically much less.
+I<Note that the connect_cached method is new and still experimental.>
+
+
+=head1 CONNECTING TO THE DATABASE
+
+Before connecting to a remote database, you must ensure, that a Proxy
+server is running on the remote machine. There's no default port, so
+you have to ask your system administrator for the port number. See
+L<DBI::ProxyServer> for details.
+
+Say, your Proxy server is running on machine "alpha", port 3334, and
+you'd like to connect to an ODBC database called "mydb" as user "joe"
+with password "hello". When using DBD::ODBC directly, you'd do a
+
+ $dbh = DBI->connect("DBI:ODBC:mydb", "joe", "hello");
+
+With DBD::Proxy this becomes
+
+ $dsn = "DBI:Proxy:hostname=alpha;port=3334;dsn=DBI:ODBC:mydb";
+ $dbh = DBI->connect($dsn, "joe", "hello");
+
+You see, this is mainly the same. The DBD::Proxy module will create a
+connection to the Proxy server on "alpha" which in turn will connect
+to the ODBC database.
+
+Refer to the L<DBI> documentation on the C<connect> method for a way
+to automatically use DBD::Proxy without having to change your code.
+
+DBD::Proxy's DSN string has the format
+
+ $dsn = "DBI:Proxy:key1=val1; ... ;keyN=valN;dsn=valDSN";
+
+In other words, it is a collection of key/value pairs. The following
+keys are recognized:
+
+=over 4
+
+=item hostname
+
+=item port
+
+Hostname and port of the Proxy server; these keys must be present,
+no defaults. Example:
+
+ hostname=alpha;port=3334
+
+=item dsn
+
+The value of this attribute will be used as a dsn name by the Proxy
+server. Thus it must have the format C<DBI:driver:...>, in particular
+it will contain colons. The I<dsn> value may contain semicolons, hence
+this key *must* be the last and it's value will be the complete
+remaining part of the dsn. Example:
+
+ dsn=DBI:ODBC:mydb
+
+=item cipher
+
+=item key
+
+=item usercipher
+
+=item userkey
+
+By using these fields you can enable encryption. If you set,
+for example,
+
+ cipher=$class;key=$key
+
+(note the semicolon) then DBD::Proxy will create a new cipher object
+by executing
+
+ $cipherRef = $class->new(pack("H*", $key));
+
+and pass this object to the RPC::PlClient module when creating a
+client. See L<RPC::PlClient>. Example:
+
+ cipher=IDEA;key=97cd2375efa329aceef2098babdc9721
+
+The usercipher/userkey attributes allow you to use two phase encryption:
+The cipher/key encryption will be used in the login and authorisation
+phase. Once the client is authorised, he will change to usercipher/userkey
+encryption. Thus the cipher/key pair is a B<host> based secret, typically
+less secure than the usercipher/userkey secret and readable by anyone.
+The usercipher/userkey secret is B<your> private secret.
+
+Of course encryption requires an appropriately configured server. See
+<DBD::ProxyServer/CONFIGURATION FILE>.
+
+=item debug
+
+Turn on debugging mode
+
+=item stderr
+
+This attribute will set the corresponding attribute of the RPC::PlClient
+object, thus logging will not use syslog(), but redirected to stderr.
+This is the default under Windows.
+
+ stderr=1
+
+=item logfile
+
+Similar to the stderr attribute, but output will be redirected to the
+given file.
+
+ logfile=/dev/null
+
+=item RowCacheSize
+
+The DBD::Proxy driver supports this attribute (which is DBI standard,
+as of DBI 1.02). It's used to reduce network round-trips by fetching
+multiple rows in one go. The current default value is 20, but this may
+change.
+
+
+=item proxy_no_finish
+
+This attribute can be used to reduce network traffic: If the
+application is calling $sth->finish() then the proxy tells the server
+to finish the remote statement handle. Of course this slows down things
+quite a lot, but is perfectly good for reducing memory usage with
+persistent connections.
+
+However, if you set the I<proxy_no_finish> attribute to a TRUE value,
+either in the database handle or in the statement handle, then finish()
+calls will be supressed. This is what you want, for example, in small
+and fast CGI applications.
+
+=item proxy_quote
+
+This attribute can be used to reduce network traffic: By default calls
+to $dbh->quote() are passed to the remote driver. Of course this slows
+down things quite a lot, but is the safest default behaviour.
+
+However, if you set the I<proxy_quote> attribute to the value 'C<local>'
+either in the database handle or in the statement handle, and the call
+to quote has only one parameter, then the local default DBI quote
+method will be used (which will be faster but may be wrong).
+
+=back
+
+=head1 KNOWN ISSUES
+
+=head2 Unproxied method calls
+
+If a method isn't being proxied, try declaring a stub sub in the appropriate
+package (DBD::Proxy::db for a dbh method, and DBD::Proxy::st for an sth method).
+For example:
+
+ sub DBD::Proxy::db::selectall_arrayref;
+
+That will enable selectall_arrayref to be proxied.
+
+Currently many methods aren't explicitly proxied and so you get the DBI's
+default methods executed on the client.
+
+Some of those methods, like selectall_arrayref, may then call other methods
+that are proxied (selectall_arrayref calls fetchall_arrayref which calls fetch
+which is proxied). So things may appear to work but operate more slowly than
+the could.
+
+This may all change in a later version.
+
+=head2 Complex handle attributes
+
+Sometimes handles are having complex attributes like hash refs or
+array refs and not simple strings or integers. For example, with
+DBD::CSV, you would like to write something like
+
+ $dbh->{"csv_tables"}->{"passwd"} =
+ { "sep_char" => ":", "eol" => "\n";
+
+The above example would advice the CSV driver to assume the file
+"passwd" to be in the format of the /etc/passwd file: Colons as
+separators and a line feed without carriage return as line
+terminator.
+
+Surprisingly this example doesn't work with the proxy driver. To understand
+the reasons, you should consider the following: The Perl compiler is
+executing the above example in two steps:
+
+=over
+
+=item 1
+
+The first step is fetching the value of the key "csv_tables" in the
+handle $dbh. The value returned is complex, a hash ref.
+
+=item 2
+
+The second step is storing some value (the right hand side of the
+assignment) as the key "passwd" in the hash ref from step 1.
+
+=back
+
+This becomes a little bit clearer, if we rewrite the above code:
+
+ $tables = $dbh->{"csv_tables"};
+ $tables->{"passwd"} = { "sep_char" => ":", "eol" => "\n";
+
+While the examples work fine without the proxy, the fail due to a
+subtle difference in step 1: By DBI magic, the hash ref
+$dbh->{'csv_tables'} is returned from the server to the client.
+The client creates a local copy. This local copy is the result of
+step 1. In other words, step 2 modifies a local copy of the hash ref,
+but not the server's hash ref.
+
+The workaround is storing the modified local copy back to the server:
+
+ $tables = $dbh->{"csv_tables"};
+ $tables->{"passwd"} = { "sep_char" => ":", "eol" => "\n";
+ $dbh->{"csv_tables"} = $tables;
+
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is Copyright (c) 1997, 1998
+
+ Jochen Wiedmann
+ Am Eisteich 9
+ 72555 Metzingen
+ Germany
+
+ Email: joe@ispsoft.de
+ Phone: +49 7123 14887
+
+The DBD::Proxy module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. In particular permission
+is granted to Tim Bunce for distributing this as a part of the DBI.
+
+
+=head1 SEE ALSO
+
+L<DBI>, L<RPC::PlClient>, L<Storable>
+
+=cut
diff --git a/lib/DBD/Sponge.pm b/lib/DBD/Sponge.pm
new file mode 100644
index 0000000..2413bc0
--- /dev/null
+++ b/lib/DBD/Sponge.pm
@@ -0,0 +1,305 @@
+{
+ package DBD::Sponge;
+
+ require DBI;
+ require Carp;
+
+ our @EXPORT = qw(); # Do NOT @EXPORT anything.
+ our $VERSION = sprintf("12.%06d", q$Revision: 10002 $ =~ /(\d+)/o);
+
+
+# $Id: Sponge.pm 10002 2007-09-26 21:03:25Z timbo $
+#
+# Copyright (c) 1994-2003 Tim Bunce Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+ $drh = undef; # holds driver handle once initialised
+ my $methods_already_installed;
+
+ sub driver{
+ return $drh if $drh;
+
+ DBD::Sponge::db->install_method("sponge_test_installed_method")
+ unless $methods_already_installed++;
+
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'Sponge',
+ 'Version' => $VERSION,
+ 'Attribution' => "DBD::Sponge $VERSION (fake cursor driver) by Tim Bunce",
+ });
+ $drh;
+ }
+
+ sub CLONE {
+ undef $drh;
+ }
+}
+
+
+{ package DBD::Sponge::dr; # ====== DRIVER ======
+ $imp_data_size = 0;
+ # we use default (dummy) connect method
+}
+
+
+{ package DBD::Sponge::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub prepare {
+ my($dbh, $statement, $attribs) = @_;
+ my $rows = delete $attribs->{'rows'}
+ or return $dbh->set_err($DBI::stderr,"No rows attribute supplied to prepare");
+ my ($outer, $sth) = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ 'rows' => $rows,
+ (map { exists $attribs->{$_} ? ($_=>$attribs->{$_}) : () }
+ qw(execute_hook)
+ ),
+ });
+ if (my $behave_like = $attribs->{behave_like}) {
+ $outer->{$_} = $behave_like->{$_}
+ foreach (qw(RaiseError PrintError HandleError ShowErrorStatement));
+ }
+
+ if ($statement =~ /^\s*insert\b/) { # very basic, just for testing execute_array()
+ $sth->{is_insert} = 1;
+ my $NUM_OF_PARAMS = $attribs->{NUM_OF_PARAMS}
+ or return $dbh->set_err($DBI::stderr,"NUM_OF_PARAMS not specified for INSERT statement");
+ $sth->STORE('NUM_OF_PARAMS' => $attribs->{NUM_OF_PARAMS} );
+ }
+ else { #assume select
+
+ # we need to set NUM_OF_FIELDS
+ my $numFields;
+ if ($attribs->{'NUM_OF_FIELDS'}) {
+ $numFields = $attribs->{'NUM_OF_FIELDS'};
+ } elsif ($attribs->{'NAME'}) {
+ $numFields = @{$attribs->{NAME}};
+ } elsif ($attribs->{'TYPE'}) {
+ $numFields = @{$attribs->{TYPE}};
+ } elsif (my $firstrow = $rows->[0]) {
+ $numFields = scalar @$firstrow;
+ } else {
+ return $dbh->set_err($DBI::stderr, 'Cannot determine NUM_OF_FIELDS');
+ }
+ $sth->STORE('NUM_OF_FIELDS' => $numFields);
+ $sth->{NAME} = $attribs->{NAME}
+ || [ map { "col$_" } 1..$numFields ];
+ $sth->{TYPE} = $attribs->{TYPE}
+ || [ (DBI::SQL_VARCHAR()) x $numFields ];
+ $sth->{PRECISION} = $attribs->{PRECISION}
+ || [ map { length($sth->{NAME}->[$_]) } 0..$numFields -1 ];
+ $sth->{SCALE} = $attribs->{SCALE}
+ || [ (0) x $numFields ];
+ $sth->{NULLABLE} = $attribs->{NULLABLE}
+ || [ (2) x $numFields ];
+ }
+
+ $outer;
+ }
+
+ sub type_info_all {
+ my ($dbh) = @_;
+ my $ti = [
+ { TYPE_NAME => 0,
+ DATA_TYPE => 1,
+ PRECISION => 2,
+ LITERAL_PREFIX => 3,
+ LITERAL_SUFFIX => 4,
+ CREATE_PARAMS => 5,
+ NULLABLE => 6,
+ CASE_SENSITIVE => 7,
+ SEARCHABLE => 8,
+ UNSIGNED_ATTRIBUTE=> 9,
+ MONEY => 10,
+ AUTO_INCREMENT => 11,
+ LOCAL_TYPE_NAME => 12,
+ MINIMUM_SCALE => 13,
+ MAXIMUM_SCALE => 14,
+ },
+ [ 'VARCHAR', DBI::SQL_VARCHAR(), undef, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ],
+ ];
+ return $ti;
+ }
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ return 1 if $attrib eq 'AutoCommit';
+ # else pass up to DBI to handle
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ if ($attrib eq 'AutoCommit') {
+ return 1 if $value; # is already set
+ Carp::croak("Can't disable AutoCommit");
+ }
+ return $dbh->SUPER::STORE($attrib, $value);
+ }
+
+ sub sponge_test_installed_method {
+ my ($dbh, @args) = @_;
+ return $dbh->set_err(42, "not enough parameters") unless @args >= 2;
+ return \@args;
+ }
+}
+
+
+{ package DBD::Sponge::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub execute {
+ my $sth = shift;
+
+ # hack to support ParamValues (when not using bind_param)
+ $sth->{ParamValues} = (@_) ? { map { $_ => $_[$_-1] } 1..@_ } : undef;
+
+ if (my $hook = $sth->{execute_hook}) {
+ &$hook($sth, @_) or return;
+ }
+
+ if ($sth->{is_insert}) {
+ my $row;
+ $row = (@_) ? [ @_ ] : die "bind_param not supported yet" ;
+ my $NUM_OF_PARAMS = $sth->{NUM_OF_PARAMS};
+ return $sth->set_err($DBI::stderr, @$row." values bound (@$row) but $NUM_OF_PARAMS expected")
+ if @$row != $NUM_OF_PARAMS;
+ { local $^W; $sth->trace_msg("inserting (@$row)\n"); }
+ push @{ $sth->{rows} }, $row;
+ }
+ else { # mark select sth as Active
+ $sth->STORE(Active => 1);
+ }
+ # else do nothing for select as data is already in $sth->{rows}
+ return 1;
+ }
+
+ sub fetch {
+ my ($sth) = @_;
+ my $row = shift @{$sth->{'rows'}};
+ unless ($row) {
+ $sth->STORE(Active => 0);
+ return undef;
+ }
+ return $sth->_set_fbav($row);
+ }
+ *fetchrow_arrayref = \&fetch;
+
+ sub FETCH {
+ my ($sth, $attrib) = @_;
+ # would normally validate and only fetch known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::STORE($attrib, $value);
+ }
+}
+
+1;
+
+__END__
+
+=pod
+
+=head1 NAME
+
+DBD::Sponge - Create a DBI statement handle from Perl data
+
+=head1 SYNOPSIS
+
+ my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 });
+ my $sth = $sponge->prepare($statement, {
+ rows => $data,
+ NAME => $names,
+ %attr
+ }
+ );
+
+=head1 DESCRIPTION
+
+DBD::Sponge is useful for making a Perl data structure accessible through a
+standard DBI statement handle. This may be useful to DBD module authors who
+need to transform data in this way.
+
+=head1 METHODS
+
+=head2 connect()
+
+ my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 });
+
+Here's a sample syntax for creating a database handle for the Sponge driver.
+No username and password are needed.
+
+=head2 prepare()
+
+ my $sth = $sponge->prepare($statement, {
+ rows => $data,
+ NAME => $names,
+ %attr
+ }
+ );
+
+=over 4
+
+=item *
+
+The C<$statement> here is an arbitrary statement or name you want
+to provide as identity of your data. If you're using DBI::Profile
+it will appear in the profile data.
+
+Generally it's expected that you are preparing a statement handle
+as if a C<select> statement happened.
+
+=item *
+
+C<$data> is a reference to the data you are providing, given as an array of arrays.
+
+=item *
+
+C<$names> is a reference an array of column names for the C<$data> you are providing.
+The number and order should match the number and ordering of the C<$data> columns.
+
+=item *
+
+C<%attr> is a hash of other standard DBI attributes that you might pass to a prepare statement.
+
+Currently only NAME, TYPE, and PRECISION are supported.
+
+=back
+
+=head1 BUGS
+
+Using this module to prepare INSERT-like statements is not currently documented.
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is Copyright (c) 2003 Tim Bunce
+
+Documentation initially written by Mark Stosberg
+
+The DBD::Sponge module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. In particular permission
+is granted to Tim Bunce for distributing this as a part of the DBI.
+
+=head1 SEE ALSO
+
+L<DBI>
+
+=cut