summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Bundle/DBI.pm51
-rw-r--r--lib/DBD/DBM.pm1461
-rw-r--r--lib/DBD/ExampleP.pm428
-rw-r--r--lib/DBD/File.pm1637
-rw-r--r--lib/DBD/File/Developers.pod556
-rw-r--r--lib/DBD/File/HowTo.pod270
-rw-r--r--lib/DBD/File/Roadmap.pod176
-rw-r--r--lib/DBD/Gofer.pm1292
-rw-r--r--lib/DBD/Gofer/Policy/Base.pm162
-rw-r--r--lib/DBD/Gofer/Policy/classic.pm79
-rw-r--r--lib/DBD/Gofer/Policy/pedantic.pm53
-rw-r--r--lib/DBD/Gofer/Policy/rush.pm90
-rw-r--r--lib/DBD/Gofer/Transport/Base.pm410
-rw-r--r--lib/DBD/Gofer/Transport/corostream.pm144
-rw-r--r--lib/DBD/Gofer/Transport/null.pm111
-rw-r--r--lib/DBD/Gofer/Transport/pipeone.pm253
-rw-r--r--lib/DBD/Gofer/Transport/stream.pm292
-rw-r--r--lib/DBD/NullP.pm166
-rw-r--r--lib/DBD/Proxy.pm997
-rw-r--r--lib/DBD/Sponge.pm305
-rw-r--r--lib/DBI/Const/GetInfo/ANSI.pm236
-rw-r--r--lib/DBI/Const/GetInfo/ODBC.pm1363
-rw-r--r--lib/DBI/Const/GetInfoReturn.pm105
-rw-r--r--lib/DBI/Const/GetInfoType.pm54
-rw-r--r--lib/DBI/DBD.pm3489
-rw-r--r--lib/DBI/DBD/Metadata.pm493
-rw-r--r--lib/DBI/DBD/SqlEngine.pm1232
-rw-r--r--lib/DBI/DBD/SqlEngine/Developers.pod422
-rw-r--r--lib/DBI/DBD/SqlEngine/HowTo.pod218
-rw-r--r--lib/DBI/FAQ.pm966
-rw-r--r--lib/DBI/Gofer/Execute.pm900
-rw-r--r--lib/DBI/Gofer/Request.pm200
-rw-r--r--lib/DBI/Gofer/Response.pm218
-rw-r--r--lib/DBI/Gofer/Serializer/Base.pm64
-rw-r--r--lib/DBI/Gofer/Serializer/DataDumper.pm53
-rw-r--r--lib/DBI/Gofer/Serializer/Storable.pm59
-rw-r--r--lib/DBI/Gofer/Transport/Base.pm176
-rw-r--r--lib/DBI/Gofer/Transport/pipeone.pm61
-rw-r--r--lib/DBI/Gofer/Transport/stream.pm76
-rw-r--r--lib/DBI/Profile.pm949
-rw-r--r--lib/DBI/ProfileData.pm737
-rw-r--r--lib/DBI/ProfileDumper.pm351
-rw-r--r--lib/DBI/ProfileDumper/Apache.pm219
-rw-r--r--lib/DBI/ProfileSubs.pm50
-rw-r--r--lib/DBI/ProxyServer.pm890
-rw-r--r--lib/DBI/PurePerl.pm1259
-rw-r--r--lib/DBI/SQL/Nano.pm1010
-rw-r--r--lib/DBI/Util/CacheMemory.pm117
-rw-r--r--lib/DBI/Util/_accessor.pm65
-rw-r--r--lib/DBI/W32ODBC.pm181
-rw-r--r--lib/Win32/DBIODBC.pm248
51 files changed, 25394 insertions, 0 deletions
diff --git a/lib/Bundle/DBI.pm b/lib/Bundle/DBI.pm
new file mode 100644
index 0000000..50375a3
--- /dev/null
+++ b/lib/Bundle/DBI.pm
@@ -0,0 +1,51 @@
+# -*- perl -*-
+
+package Bundle::DBI;
+
+our $VERSION = sprintf("12.%06d", q$Revision: 8695 $ =~ /(\d+)/o);
+
+1;
+
+__END__
+
+=head1 NAME
+
+Bundle::DBI - A bundle to install DBI and required modules.
+
+=head1 SYNOPSIS
+
+ perl -MCPAN -e 'install Bundle::DBI'
+
+=head1 CONTENTS
+
+DBI - for to get to know thyself
+
+DBI::Shell 11.91 - the DBI command line shell
+
+Storable 2.06 - for DBD::Proxy, DBI::ProxyServer, DBD::Forward
+
+Net::Daemon 0.37 - for DBD::Proxy and DBI::ProxyServer
+
+RPC::PlServer 0.2016 - for DBD::Proxy and DBI::ProxyServer
+
+DBD::Multiplex 1.19 - treat multiple db handles as one
+
+=head1 DESCRIPTION
+
+This bundle includes all the modules used by the Perl Database
+Interface (DBI) module, created by Tim Bunce.
+
+A I<Bundle> is a module that simply defines a collection of other
+modules. It is used by the L<CPAN> module to automate the fetching,
+building and installing of modules from the CPAN ftp archive sites.
+
+This bundle does not deal with the various database drivers (e.g.
+DBD::Informix, DBD::Oracle etc), most of which require software from
+sources other than CPAN. You'll need to fetch and build those drivers
+yourself.
+
+=head1 AUTHORS
+
+Jonathan Leffler, Jochen Wiedmann and Tim Bunce.
+
+=cut
diff --git a/lib/DBD/DBM.pm b/lib/DBD/DBM.pm
new file mode 100644
index 0000000..3c621a3
--- /dev/null
+++ b/lib/DBD/DBM.pm
@@ -0,0 +1,1461 @@
+#######################################################################
+#
+# DBD::DBM - a DBI driver for DBM files
+#
+# Copyright (c) 2004 by Jeff Zucker < jzucker AT cpan.org >
+# Copyright (c) 2010 by Jens Rehsack & H.Merijn Brand
+#
+# All rights reserved.
+#
+# You may freely distribute and/or modify this module under the terms
+# of either the GNU General Public License (GPL) or the Artistic License,
+# as specified in the Perl README file.
+#
+# USERS - see the pod at the bottom of this file
+#
+# DBD AUTHORS - see the comments in the code
+#
+#######################################################################
+require 5.008;
+use strict;
+
+#################
+package DBD::DBM;
+#################
+use base qw( DBD::File );
+use vars qw($VERSION $ATTRIBUTION $drh $methods_already_installed);
+$VERSION = '0.06';
+$ATTRIBUTION = 'DBD::DBM by Jens Rehsack';
+
+# no need to have driver() unless you need private methods
+#
+sub driver ($;$)
+{
+ my ( $class, $attr ) = @_;
+ return $drh if ($drh);
+
+ # do the real work in DBD::File
+ #
+ $attr->{Attribution} = 'DBD::DBM by Jens Rehsack';
+ $drh = $class->SUPER::driver($attr);
+
+ # install private methods
+ #
+ # this requires that dbm_ (or foo_) be a registered prefix
+ # but you can write private methods before official registration
+ # by hacking the $dbd_prefix_registry in a private copy of DBI.pm
+ #
+ unless ( $methods_already_installed++ )
+ {
+ DBD::DBM::st->install_method('dbm_schema');
+ }
+
+ return $drh;
+}
+
+sub CLONE
+{
+ undef $drh;
+}
+
+#####################
+package DBD::DBM::dr;
+#####################
+$DBD::DBM::dr::imp_data_size = 0;
+@DBD::DBM::dr::ISA = qw(DBD::File::dr);
+
+# you could put some :dr private methods here
+
+# you may need to over-ride some DBD::File::dr methods here
+# but you can probably get away with just letting it do the work
+# in most cases
+
+#####################
+package DBD::DBM::db;
+#####################
+$DBD::DBM::db::imp_data_size = 0;
+@DBD::DBM::db::ISA = qw(DBD::File::db);
+
+sub validate_STORE_attr
+{
+ my ( $dbh, $attrib, $value ) = @_;
+
+ if ( $attrib eq "dbm_ext" or $attrib eq "dbm_lockfile" )
+ {
+ ( my $newattrib = $attrib ) =~ s/^dbm_/f_/g;
+ # carp "Attribute '$attrib' is depreciated, use '$newattrib' instead" if( $^W );
+ $attrib = $newattrib;
+ }
+
+ return $dbh->SUPER::validate_STORE_attr( $attrib, $value );
+}
+
+sub validate_FETCH_attr
+{
+ my ( $dbh, $attrib ) = @_;
+
+ if ( $attrib eq "dbm_ext" or $attrib eq "dbm_lockfile" )
+ {
+ ( my $newattrib = $attrib ) =~ s/^dbm_/f_/g;
+ # carp "Attribute '$attrib' is depreciated, use '$newattrib' instead" if( $^W );
+ $attrib = $newattrib;
+ }
+
+ return $dbh->SUPER::validate_FETCH_attr($attrib);
+}
+
+sub set_versions
+{
+ my $this = $_[0];
+ $this->{dbm_version} = $DBD::DBM::VERSION;
+ return $this->SUPER::set_versions();
+}
+
+sub init_valid_attributes
+{
+ my $dbh = shift;
+
+ # define valid private attributes
+ #
+ # attempts to set non-valid attrs in connect() or
+ # with $dbh->{attr} will throw errors
+ #
+ # the attrs here *must* start with dbm_ or foo_
+ #
+ # see the STORE methods below for how to check these attrs
+ #
+ $dbh->{dbm_valid_attrs} = {
+ dbm_type => 1, # the global DBM type e.g. SDBM_File
+ dbm_mldbm => 1, # the global MLDBM serializer
+ dbm_cols => 1, # the global column names
+ dbm_version => 1, # verbose DBD::DBM version
+ dbm_store_metadata => 1, # column names, etc.
+ dbm_berkeley_flags => 1, # for BerkeleyDB
+ dbm_valid_attrs => 1, # DBD::DBM::db valid attrs
+ dbm_readonly_attrs => 1, # DBD::DBM::db r/o attrs
+ dbm_meta => 1, # DBD::DBM public access for f_meta
+ dbm_tables => 1, # DBD::DBM public access for f_meta
+ };
+ $dbh->{dbm_readonly_attrs} = {
+ dbm_version => 1, # verbose DBD::DBM version
+ dbm_valid_attrs => 1, # DBD::DBM::db valid attrs
+ dbm_readonly_attrs => 1, # DBD::DBM::db r/o attrs
+ dbm_meta => 1, # DBD::DBM public access for f_meta
+ };
+
+ $dbh->{dbm_meta} = "dbm_tables";
+
+ return $dbh->SUPER::init_valid_attributes();
+}
+
+sub init_default_attributes
+{
+ my ( $dbh, $phase ) = @_;
+
+ $dbh->SUPER::init_default_attributes($phase);
+ $dbh->{f_lockfile} = '.lck';
+
+ return $dbh;
+}
+
+sub get_dbm_versions
+{
+ my ( $dbh, $table ) = @_;
+ $table ||= '';
+
+ my $meta;
+ my $class = $dbh->{ImplementorClass};
+ $class =~ s/::db$/::Table/;
+ $table and ( undef, $meta ) = $class->get_table_meta( $dbh, $table, 1 );
+ $meta or ( $meta = {} and $class->bootstrap_table_meta( $dbh, $meta, $table ) );
+
+ my $dver;
+ my $dtype = $meta->{dbm_type};
+ eval {
+ $dver = $meta->{dbm_type}->VERSION();
+
+ # *) when we're still alive here, everthing went ok - no need to check for $@
+ $dtype .= " ($dver)";
+ };
+ if ( $meta->{dbm_mldbm} )
+ {
+ $dtype .= ' + MLDBM';
+ eval {
+ $dver = MLDBM->VERSION();
+ $dtype .= " ($dver)"; # (*)
+ };
+ eval {
+ my $ser_class = "MLDBM::Serializer::" . $meta->{dbm_mldbm};
+ my $ser_mod = $ser_class;
+ $ser_mod =~ s|::|/|g;
+ $ser_mod .= ".pm";
+ require $ser_mod;
+ $dver = $ser_class->VERSION();
+ $dtype .= ' + ' . $ser_class; # (*)
+ $dver and $dtype .= " ($dver)"; # (*)
+ };
+ }
+ return sprintf( "%s using %s", $dbh->{dbm_version}, $dtype );
+}
+
+# you may need to over-ride some DBD::File::db methods here
+# but you can probably get away with just letting it do the work
+# in most cases
+
+#####################
+package DBD::DBM::st;
+#####################
+$DBD::DBM::st::imp_data_size = 0;
+@DBD::DBM::st::ISA = qw(DBD::File::st);
+
+sub FETCH
+{
+ my ( $sth, $attr ) = @_;
+
+ if ( $attr eq "NULLABLE" )
+ {
+ my @colnames = $sth->sql_get_colnames();
+
+ # XXX only BerkeleyDB fails having NULL values for non-MLDBM databases,
+ # none accept it for key - but it requires more knowledge between
+ # queries and tables storage to return fully correct information
+ $attr eq "NULLABLE" and return [ map { 0 } @colnames ];
+ }
+
+ return $sth->SUPER::FETCH($attr);
+} # FETCH
+
+sub dbm_schema
+{
+ my ( $sth, $tname ) = @_;
+ return $sth->set_err( $DBI::stderr, 'No table name supplied!' ) unless $tname;
+ return $sth->set_err( $DBI::stderr, "Unknown table '$tname'!" )
+ unless ( $sth->{Database}->{f_meta}
+ and $sth->{Database}->{f_meta}->{$tname} );
+ return $sth->{Database}->{f_meta}->{$tname}->{schema};
+}
+# you could put some :st private methods here
+
+# you may need to over-ride some DBD::File::st methods here
+# but you can probably get away with just letting it do the work
+# in most cases
+
+############################
+package DBD::DBM::Statement;
+############################
+
+@DBD::DBM::Statement::ISA = qw(DBD::File::Statement);
+
+########################
+package DBD::DBM::Table;
+########################
+use Carp;
+use Fcntl;
+
+@DBD::DBM::Table::ISA = qw(DBD::File::Table);
+
+my $dirfext = $^O eq 'VMS' ? '.sdbm_dir' : '.dir';
+
+sub file2table
+{
+ my ( $self, $meta, $file, $file_is_table, $quoted ) = @_;
+
+ my $tbl = $self->SUPER::file2table( $meta, $file, $file_is_table, $quoted ) or return;
+
+ $meta->{f_dontopen} = 1;
+
+ return $tbl;
+}
+
+my %reset_on_modify = (
+ dbm_type => "dbm_tietype",
+ dbm_mldbm => "dbm_tietype",
+ );
+__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+
+my %compat_map = (
+ ( map { $_ => "dbm_$_" } qw(type mldbm store_metadata) ),
+ dbm_ext => 'f_ext',
+ dbm_file => 'f_file',
+ dbm_lockfile => ' f_lockfile',
+ );
+__PACKAGE__->register_compat_map (\%compat_map);
+
+sub bootstrap_table_meta
+{
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ $meta->{dbm_type} ||= $dbh->{dbm_type} || 'SDBM_File';
+ $meta->{dbm_mldbm} ||= $dbh->{dbm_mldbm} if ( $dbh->{dbm_mldbm} );
+ $meta->{dbm_berkeley_flags} ||= $dbh->{dbm_berkeley_flags};
+
+ defined $meta->{f_ext}
+ or $meta->{f_ext} = $dbh->{f_ext};
+ unless ( defined( $meta->{f_ext} ) )
+ {
+ my $ext;
+ if ( $meta->{dbm_type} eq 'SDBM_File' or $meta->{dbm_type} eq 'ODBM_File' )
+ {
+ $ext = '.pag/r';
+ }
+ elsif ( $meta->{dbm_type} eq 'NDBM_File' )
+ {
+ # XXX NDBM_File on FreeBSD (and elsewhere?) may actually be Berkeley
+ # behind the scenes and so create a single .db file.
+ if ( $^O =~ /bsd/i or lc($^O) eq 'darwin' )
+ {
+ $ext = '.db/r';
+ }
+ elsif ( $^O eq 'SunOS' or $^O eq 'Solaris' or $^O eq 'AIX' )
+ {
+ $ext = '.pag/r'; # here it's implemented like dbm - just a bit improved
+ }
+ # else wrapped GDBM
+ }
+ defined($ext) and $meta->{f_ext} = $ext;
+ }
+
+ $self->SUPER::bootstrap_table_meta( $dbh, $meta, $table );
+}
+
+sub init_table_meta
+{
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ unless ( defined( $meta->{dbm_tietype} ) )
+ {
+ my $tie_type = $meta->{dbm_type};
+ $INC{"$tie_type.pm"} or require "$tie_type.pm";
+ $tie_type eq 'BerkeleyDB' and $tie_type = 'BerkeleyDB::Hash';
+
+ if ( $meta->{dbm_mldbm} )
+ {
+ $INC{"MLDBM.pm"} or require "MLDBM.pm";
+ $meta->{dbm_usedb} = $tie_type;
+ $tie_type = 'MLDBM';
+ }
+
+ $meta->{dbm_tietype} = $tie_type;
+ }
+
+ unless ( defined( $meta->{dbm_store_metadata} ) )
+ {
+ my $store = $dbh->{dbm_store_metadata};
+ defined($store) or $store = 1;
+ $meta->{dbm_store_metadata} = $store;
+ }
+
+ unless ( defined( $meta->{col_names} ) )
+ {
+ defined( $dbh->{dbm_cols} ) and $meta->{col_names} = $dbh->{dbm_cols};
+ }
+
+ $self->SUPER::init_table_meta( $dbh, $meta, $table );
+}
+
+sub open_file
+{
+ my ( $self, $meta, $attrs, $flags ) = @_;
+ $self->SUPER::open_file( $meta, $attrs, $flags );
+ unless ( $flags->{dropMode} )
+ {
+ # TIEING
+ #
+ # XXX allow users to pass in a pre-created tied object
+ #
+ my @tie_args;
+ if ( $meta->{dbm_type} eq 'BerkeleyDB' )
+ {
+ my $DB_CREATE = BerkeleyDB::DB_CREATE();
+ my $DB_RDONLY = BerkeleyDB::DB_RDONLY();
+ my %tie_flags;
+ if ( my $f = $meta->{dbm_berkeley_flags} )
+ {
+ defined( $f->{DB_CREATE} ) and $DB_CREATE = delete $f->{DB_CREATE};
+ defined( $f->{DB_RDONLY} ) and $DB_RDONLY = delete $f->{DB_RDONLY};
+ %tie_flags = %$f;
+ }
+ my $open_mode = $flags->{lockMode} || $flags->{createMode} ? $DB_CREATE : $DB_RDONLY;
+ @tie_args = (
+ -Filename => $meta->{f_fqbn},
+ -Flags => $open_mode,
+ %tie_flags
+ );
+ }
+ else
+ {
+ my $open_mode = O_RDONLY;
+ $flags->{lockMode} and $open_mode = O_RDWR;
+ $flags->{createMode} and $open_mode = O_RDWR | O_CREAT | O_TRUNC;
+
+ @tie_args = ( $meta->{f_fqbn}, $open_mode, 0666 );
+ }
+
+ if ( $meta->{dbm_mldbm} )
+ {
+ $MLDBM::UseDB = $meta->{dbm_usedb};
+ $MLDBM::Serializer = $meta->{dbm_mldbm};
+ }
+
+ $meta->{hash} = {};
+ my $tie_class = $meta->{dbm_tietype};
+ eval { tie %{ $meta->{hash} }, $tie_class, @tie_args };
+ $@ and croak "Cannot tie(\%h $tie_class @tie_args): $@";
+ -f $meta->{f_fqfn} or croak( "No such file: '" . $meta->{f_fqfn} . "'" );
+ }
+
+ unless ( $flags->{createMode} )
+ {
+ my ( $meta_data, $schema, $col_names );
+ if ( $meta->{dbm_store_metadata} )
+ {
+ $meta_data = $col_names = $meta->{hash}->{"_metadata \0"};
+ if ( $meta_data and $meta_data =~ m~<dbd_metadata>(.+)</dbd_metadata>~is )
+ {
+ $schema = $col_names = $1;
+ $schema =~ s~.*<schema>(.+)</schema>.*~$1~is;
+ $col_names =~ s~.*<col_names>(.+)</col_names>.*~$1~is;
+ }
+ }
+ $col_names ||= $meta->{col_names} || [ 'k', 'v' ];
+ $col_names = [ split /,/, $col_names ] if ( ref $col_names ne 'ARRAY' );
+ if ( $meta->{dbm_store_metadata} and not $meta->{hash}->{"_metadata \0"} )
+ {
+ $schema or $schema = '';
+ $meta->{hash}->{"_metadata \0"} =
+ "<dbd_metadata>"
+ . "<schema>$schema</schema>"
+ . "<col_names>"
+ . join( ",", @{$col_names} )
+ . "</col_names>"
+ . "</dbd_metadata>";
+ }
+
+ $meta->{schema} = $schema;
+ $meta->{col_names} = $col_names;
+ }
+}
+
+# you must define drop
+# it is called from execute of a SQL DROP statement
+#
+sub drop ($$)
+{
+ my ( $self, $data ) = @_;
+ my $meta = $self->{meta};
+ $meta->{hash} and untie %{ $meta->{hash} };
+ $self->SUPER::drop($data);
+ # XXX extra_files
+ -f $meta->{f_fqbn} . $dirfext
+ and $meta->{f_ext} eq '.pag/r'
+ and unlink( $meta->{f_fqbn} . $dirfext );
+ return 1;
+}
+
+# you must define fetch_row, it is called on all fetches;
+# it MUST return undef when no rows are left to fetch;
+# checking for $ary[0] is specific to hashes so you'll
+# probably need some other kind of check for nothing-left.
+# as Janis might say: "undef's just another word for
+# nothing left to fetch" :-)
+#
+sub fetch_row ($$)
+{
+ my ( $self, $data ) = @_;
+ my $meta = $self->{meta};
+ # fetch with %each
+ #
+ my @ary = each %{ $meta->{hash} };
+ $meta->{dbm_store_metadata}
+ and $ary[0]
+ and $ary[0] eq "_metadata \0"
+ and @ary = each %{ $meta->{hash} };
+
+ my ( $key, $val ) = @ary;
+ unless ($key)
+ {
+ delete $self->{row};
+ return;
+ }
+ my @row = ( ref($val) eq 'ARRAY' ) ? ( $key, @$val ) : ( $key, $val );
+ $self->{row} = @row ? \@row : undef;
+ return wantarray ? @row : \@row;
+}
+
+# you must define push_row except insert_new_row and update_specific_row is defined
+# it is called on inserts and updates as primitive
+#
+sub insert_new_row ($$$)
+{
+ my ( $self, $data, $row_aryref ) = @_;
+ my $meta = $self->{meta};
+ my $ncols = scalar( @{ $meta->{col_names} } );
+ my $nitems = scalar( @{$row_aryref} );
+ $ncols == $nitems
+ or croak "You tried to insert $nitems, but table is created with $ncols columns";
+
+ my $key = shift @$row_aryref;
+ my $exists;
+ eval { $exists = exists( $meta->{hash}->{$key} ); };
+ $exists and croak "Row with PK '$key' already exists";
+
+ $meta->{hash}->{$key} = $meta->{dbm_mldbm} ? $row_aryref : $row_aryref->[0];
+
+ return 1;
+}
+
+# this is where you grab the column names from a CREATE statement
+# if you don't need to do that, it must be defined but can be empty
+#
+sub push_names ($$$)
+{
+ my ( $self, $data, $row_aryref ) = @_;
+ my $meta = $self->{meta};
+
+ # some sanity checks ...
+ my $ncols = scalar(@$row_aryref);
+ $ncols < 2 and croak "At least 2 columns are required for DBD::DBM tables ...";
+ !$meta->{dbm_mldbm}
+ and $ncols > 2
+ and croak "Without serializing with MLDBM only 2 columns are supported, you give $ncols";
+ $meta->{col_names} = $row_aryref;
+ return unless $meta->{dbm_store_metadata};
+
+ my $stmt = $data->{sql_stmt};
+ my $col_names = join( ',', @{$row_aryref} );
+ my $schema = $data->{Database}->{Statement};
+ $schema =~ s/^[^\(]+\((.+)\)$/$1/s;
+ $schema = $stmt->schema_str() if ( $stmt->can('schema_str') );
+ $meta->{hash}->{"_metadata \0"} =
+ "<dbd_metadata>"
+ . "<schema>$schema</schema>"
+ . "<col_names>$col_names</col_names>"
+ . "</dbd_metadata>";
+}
+
+# fetch_one_row, delete_one_row, update_one_row
+# are optimized for hash-style lookup without looping;
+# if you don't need them, omit them, they're optional
+# but, in that case you may need to define
+# truncate() and seek(), see below
+#
+sub fetch_one_row ($$;$)
+{
+ my ( $self, $key_only, $key ) = @_;
+ my $meta = $self->{meta};
+ $key_only and return $meta->{col_names}->[0];
+ exists $meta->{hash}->{$key} or return;
+ my $val = $meta->{hash}->{$key};
+ $val = ( ref($val) eq 'ARRAY' ) ? $val : [$val];
+ my $row = [ $key, @$val ];
+ return wantarray ? @{$row} : $row;
+}
+
+sub delete_one_row ($$$)
+{
+ my ( $self, $data, $aryref ) = @_;
+ my $meta = $self->{meta};
+ delete $meta->{hash}->{ $aryref->[0] };
+}
+
+sub update_one_row ($$$)
+{
+ my ( $self, $data, $aryref ) = @_;
+ my $meta = $self->{meta};
+ my $key = shift @$aryref;
+ defined $key or return;
+ my $row = ( ref($aryref) eq 'ARRAY' ) ? $aryref : [$aryref];
+ $meta->{hash}->{$key} = $meta->{dbm_mldbm} ? $row : $row->[0];
+}
+
+sub update_specific_row ($$$$)
+{
+ my ( $self, $data, $aryref, $origary ) = @_;
+ my $meta = $self->{meta};
+ my $key = shift @$origary;
+ my $newkey = shift @$aryref;
+ return unless ( defined $key );
+ $key eq $newkey or delete $meta->{hash}->{$key};
+ my $row = ( ref($aryref) eq 'ARRAY' ) ? $aryref : [$aryref];
+ $meta->{hash}->{$newkey} = $meta->{dbm_mldbm} ? $row : $row->[0];
+}
+
+# you may not need to explicitly DESTROY the ::Table
+# put cleanup code to run when the execute is done
+#
+sub DESTROY ($)
+{
+ my $self = shift;
+ my $meta = $self->{meta};
+ $meta->{hash} and untie %{ $meta->{hash} };
+
+ $self->SUPER::DESTROY();
+}
+
+# truncate() and seek() must be defined to satisfy DBI::SQL::Nano
+# *IF* you define the *_one_row methods above, truncate() and
+# seek() can be empty or you can use them without actually
+# truncating or seeking anything but if you don't define the
+# *_one_row methods, you may need to define these
+
+# if you need to do something after a series of
+# deletes or updates, you can put it in truncate()
+# which is called at the end of executing
+#
+sub truncate ($$)
+{
+ # my ( $self, $data ) = @_;
+ return 1;
+}
+
+# seek() is only needed if you use IO::File
+# though it could be used for other non-file operations
+# that you need to do before "writes" or truncate()
+#
+sub seek ($$$$)
+{
+ # my ( $self, $data, $pos, $whence ) = @_;
+ return 1;
+}
+
+# Th, th, th, that's all folks! See DBD::File and DBD::CSV for other
+# examples of creating pure perl DBDs. I hope this helped.
+# Now it's time to go forth and create your own DBD!
+# Remember to check in with dbi-dev@perl.org before you get too far.
+# We may be able to make suggestions or point you to other related
+# projects.
+
+1;
+__END__
+
+=pod
+
+=head1 NAME
+
+DBD::DBM - a DBI driver for DBM & MLDBM files
+
+=head1 SYNOPSIS
+
+ use DBI;
+ $dbh = DBI->connect('dbi:DBM:'); # defaults to SDBM_File
+ $dbh = DBI->connect('DBI:DBM(RaiseError=1):'); # defaults to SDBM_File
+ $dbh = DBI->connect('dbi:DBM:dbm_type=DB_File'); # defaults to DB_File
+ $dbh = DBI->connect('dbi:DBM:dbm_mldbm=Storable'); # MLDBM with SDBM_File
+
+ # or
+ $dbh = DBI->connect('dbi:DBM:', undef, undef);
+ $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ f_ext => '.db/r',
+ f_dir => '/path/to/dbfiles/',
+ f_lockfile => '.lck',
+ dbm_type => 'BerkeleyDB',
+ dbm_mldbm => 'FreezeThaw',
+ dbm_store_metadata => 1,
+ dbm_berkeley_flags => {
+ '-Cachesize' => 1000, # set a ::Hash flag
+ },
+ });
+
+and other variations on connect() as shown in the L<DBI> docs,
+L<DBD::File/Metadata|DBD::File metadata> and L</Metadata>
+shown below.
+
+Use standard DBI prepare, execute, fetch, placeholders, etc.,
+see L<QUICK START> for an example.
+
+=head1 DESCRIPTION
+
+DBD::DBM is a database management system that works right out of the
+box. If you have a standard installation of Perl and DBI you can
+begin creating, accessing, and modifying simple database tables
+without any further modules. You can add other modules (e.g.,
+SQL::Statement, DB_File etc) for improved functionality.
+
+The module uses a DBM file storage layer. DBM file storage is common on
+many platforms and files can be created with it in many programming
+languages using different APIs. That means, in addition to creating
+files with DBI/SQL, you can also use DBI/SQL to access and modify files
+created by other DBM modules and programs and vice versa. B<Note> that
+in those cases it might be necessary to use a common subset of the
+provided features.
+
+DBM files are stored in binary format optimized for quick retrieval
+when using a key field. That optimization can be used advantageously
+to make DBD::DBM SQL operations that use key fields very fast. There
+are several different "flavors" of DBM which use different storage
+formats supported by perl modules such as SDBM_File and MLDBM. This
+module supports all of the flavors that perl supports and, when used
+with MLDBM, supports tables with any number of columns and insertion
+of Perl objects into tables.
+
+DBD::DBM has been tested with the following DBM types: SDBM_File,
+NDBM_File, ODBM_File, GDBM_File, DB_File, BerkeleyDB. Each type was
+tested both with and without MLDBM and with the Data::Dumper,
+Storable, FreezeThaw, YAML and JSON serializers using the DBI::SQL::Nano
+or the SQL::Statement engines.
+
+=head1 QUICK START
+
+DBD::DBM operates like all other DBD drivers - it's basic syntax and
+operation is specified by DBI. If you're not familiar with DBI, you should
+start by reading L<DBI> and the documents it points to and then come back
+and read this file. If you are familiar with DBI, you already know most of
+what you need to know to operate this module. Just jump in and create a
+test script something like the one shown below.
+
+You should be aware that there are several options for the SQL engine
+underlying DBD::DBM, see L<Supported SQL syntax>. There are also many
+options for DBM support, see especially the section on L<Adding
+multi-column support with MLDBM>.
+
+But here's a sample to get you started.
+
+ use DBI;
+ my $dbh = DBI->connect('dbi:DBM:');
+ $dbh->{RaiseError} = 1;
+ for my $sql( split /;\n+/,"
+ CREATE TABLE user ( user_name TEXT, phone TEXT );
+ INSERT INTO user VALUES ('Fred Bloggs','233-7777');
+ INSERT INTO user VALUES ('Sanjay Patel','777-3333');
+ INSERT INTO user VALUES ('Junk','xxx-xxxx');
+ DELETE FROM user WHERE user_name = 'Junk';
+ UPDATE user SET phone = '999-4444' WHERE user_name = 'Sanjay Patel';
+ SELECT * FROM user
+ "){
+ my $sth = $dbh->prepare($sql);
+ $sth->execute;
+ $sth->dump_results if $sth->{NUM_OF_FIELDS};
+ }
+ $dbh->disconnect;
+
+=head1 USAGE
+
+This section will explain some useage cases in more detail. To get an
+overview about the available attributes, see L</Metadata>.
+
+=head2 Specifying Files and Directories
+
+DBD::DBM will automatically supply an appropriate file extension for the
+type of DBM you are using. For example, if you use SDBM_File, a table
+called "fruit" will be stored in two files called "fruit.pag" and
+"fruit.dir". You should B<never> specify the file extensions in your SQL
+statements.
+
+DBD::DBM recognizes following default extensions for following types:
+
+=over 4
+
+=item .pag/r
+
+Chosen for dbm_type C<< SDBM_File >>, C<< ODBM_File >> and C<< NDBM_File >>
+when an implementation is detected which wraps C<< -ldbm >> for
+C<< NDBM_File >> (e.g. Solaris, AIX, ...).
+
+For those types, the C<< .dir >> extension is recognized, too (for being
+deleted when dropping a table).
+
+=item .db/r
+
+Chosen for dbm_type C<< NDBM_File >> when an implementation is detected
+which wraps BerkeleyDB 1.x for C<< NDBM_File >> (typically BSD's, Darwin).
+
+=back
+
+C<< GDBM_File >>, C<< DB_File >> and C<< BerkeleyDB >> don't usually
+use a file extension.
+
+If your DBM type uses an extension other than one of the recognized
+types of extensions, you should set the I<f_ext> attribute to the
+extension B<and> file a bug report as described in DBI with the name
+of the implementation and extension so we can add it to DBD::DBM.
+Thanks in advance for that :-).
+
+ $dbh = DBI->connect('dbi:DBM:f_ext=.db'); # .db extension is used
+ $dbh = DBI->connect('dbi:DBM:f_ext='); # no extension is used
+
+ # or
+ $dbh->{f_ext}='.db'; # global setting
+ $dbh->{f_meta}->{'qux'}->{f_ext}='.db'; # setting for table 'qux'
+
+By default files are assumed to be in the current working directory.
+To use other directories specify the I<f_dir> attribute in either the
+connect string or by setting the database handle attribute.
+
+For example, this will look for the file /foo/bar/fruit (or
+/foo/bar/fruit.pag for DBM types that use that extension)
+
+ my $dbh = DBI->connect('dbi:DBM:f_dir=/foo/bar');
+ # and this will too:
+ my $dbh = DBI->connect('dbi:DBM:');
+ $dbh->{f_dir} = '/foo/bar';
+ # but this is recommended
+ my $dbh = DBI->connect('dbi:DBM:', undef, undef, { f_dir => '/foo/bar' } );
+
+ # now you can do
+ my $ary = $dbh->selectall_arrayref(q{ SELECT x FROM fruit });
+
+You can also use delimited identifiers to specify paths directly in SQL
+statements. This looks in the same place as the two examples above but
+without setting I<f_dir>:
+
+ my $dbh = DBI->connect('dbi:DBM:');
+ my $ary = $dbh->selectall_arrayref(q{
+ SELECT x FROM "/foo/bar/fruit"
+ });
+
+You can also tell DBD::DBM to use a specified path for a specific table:
+
+ $dbh->{dbm_tables}->{f}->{file} = q(/foo/bar/fruit);
+
+Please be aware that you cannot specify this during connection.
+
+If you have SQL::Statement installed, you can use table aliases:
+
+ my $dbh = DBI->connect('dbi:DBM:');
+ my $ary = $dbh->selectall_arrayref(q{
+ SELECT f.x FROM "/foo/bar/fruit" AS f
+ });
+
+See the L<GOTCHAS AND WARNINGS> for using DROP on tables.
+
+=head2 Table locking and flock()
+
+Table locking is accomplished using a lockfile which has the same
+basename as the table's file but with the file extension '.lck' (or a
+lockfile extension that you supply, see below). This lock file is
+created with the table during a CREATE and removed during a DROP.
+Every time the table itself is opened, the lockfile is flocked(). For
+SELECT, this is a shared lock. For all other operations, it is an
+exclusive lock (except when you specify something different using the
+I<f_lock> attribute).
+
+Since the locking depends on flock(), it only works on operating
+systems that support flock(). In cases where flock() is not
+implemented, DBD::DBM will simply behave as if the flock() had
+occurred although no actual locking will happen. Read the
+documentation for flock() for more information.
+
+Even on those systems that do support flock(), locking is only
+advisory - as is always the case with flock(). This means that if
+another program tries to access the table file while DBD::DBM has the
+table locked, that other program will *succeed* at opening unless
+it is also using flock on the '.lck' file. As a result DBD::DBM's
+locking only really applies to other programs using DBD::DBM or other
+program written to cooperate with DBD::DBM locking.
+
+=head2 Specifying the DBM type
+
+Each "flavor" of DBM stores its files in a different format and has
+different capabilities and limitations. See L<AnyDBM_File> for a
+comparison of DBM types.
+
+By default, DBD::DBM uses the C<< SDBM_File >> type of storage since
+C<< SDBM_File >> comes with Perl itself. If you have other types of
+DBM storage available, you can use any of them with DBD::DBM. It is
+strongly recommended to use at least C<< DB_File >>, because C<<
+SDBM_File >> has quirks and limitations and C<< ODBM_file >>, C<<
+NDBM_File >> and C<< GDBM_File >> are not always available.
+
+You can specify the DBM type using the I<dbm_type> attribute which can
+be set in the connection string or with C<< $dbh->{dbm_type} >> and
+C<< $dbh->{f_meta}->{$table_name}->{type} >> for per-table settings in
+cases where a single script is accessing more than one kind of DBM
+file.
+
+In the connection string, just set C<< dbm_type=TYPENAME >> where
+C<< TYPENAME >> is any DBM type such as GDBM_File, DB_File, etc. Do I<not>
+use MLDBM as your I<dbm_type> as that is set differently, see below.
+
+ my $dbh=DBI->connect('dbi:DBM:'); # uses the default SDBM_File
+ my $dbh=DBI->connect('dbi:DBM:dbm_type=GDBM_File'); # uses the GDBM_File
+
+ # You can also use $dbh->{dbm_type} to set the DBM type for the connection:
+ $dbh->{dbm_type} = 'DB_File'; # set the global DBM type
+ print $dbh->{dbm_type}; # display the global DBM type
+
+If you have several tables in your script that use different DBM
+types, you can use the $dbh->{dbm_tables} hash to store different
+settings for the various tables. You can even use this to perform
+joins on files that have completely different storage mechanisms.
+
+ # sets global default of GDBM_File
+ my $dbh->('dbi:DBM:type=GDBM_File');
+
+ # overrides the global setting, but only for the tables called
+ # I<foo> and I<bar>
+ my $dbh->{f_meta}->{foo}->{dbm_type} = 'DB_File';
+ my $dbh->{f_meta}->{bar}->{dbm_type} = 'BerkeleyDB';
+
+ # prints the dbm_type for the table "foo"
+ print $dbh->{f_meta}->{foo}->{dbm_type};
+
+B<Note> that you must change the I<dbm_type> of a table before you access
+it for first time.
+
+=head2 Adding multi-column support with MLDBM
+
+Most of the DBM types only support two columns and even if it would
+support more, DBD::DBM would only use two. However a CPAN module
+called MLDBM overcomes this limitation by allowing more than two
+columns. MLDBM does this by serializing the data - basically it puts
+a reference to an array into the second column. It can also put almost
+any kind of Perl object or even B<Perl coderefs> into columns.
+
+If you want more than two columns, you B<must> install MLDBM. It's available
+for many platforms and is easy to install.
+
+MLDBM is by default distributed with three serializers - Data::Dumper,
+Storable, and FreezeThaw. Data::Dumper is the default and Storable is the
+fastest. MLDBM can also make use of user-defined serialization methods or
+other serialization modules (e.g. L<YAML::MLDBM> or
+L<MLDBM::Serializer::JSON>. You select the serializer using the
+I<dbm_mldbm> attribute.
+
+Some examples:
+
+ $dbh=DBI->connect('dbi:DBM:dbm_mldbm=Storable'); # use MLDBM with Storable
+ $dbh=DBI->connect(
+ 'dbi:DBM:dbm_mldbm=MySerializer' # use MLDBM with a user defined module
+ );
+ $dbh=DBI->connect('dbi::dbm:', undef,
+ undef, { dbm_mldbm => 'YAML' }); # use 3rd party serializer
+ $dbh->{dbm_mldbm} = 'YAML'; # same as above
+ print $dbh->{dbm_mldbm} # show the MLDBM serializer
+ $dbh->{f_meta}->{foo}->{dbm_mldbm}='Data::Dumper'; # set Data::Dumper for table "foo"
+ print $dbh->{f_meta}->{foo}->{mldbm}; # show serializer for table "foo"
+
+MLDBM works on top of other DBM modules so you can also set a DBM type
+along with setting dbm_mldbm. The examples above would default to using
+SDBM_File with MLDBM. If you wanted GDBM_File instead, here's how:
+
+ # uses DB_File with MLDBM and Storable
+ $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ dbm_type => 'DB_File',
+ dbm_mldbm => 'Storable',
+ });
+
+SDBM_File, the default I<dbm_type> is quite limited, so if you are going to
+use MLDBM, you should probably use a different type, see L<AnyDBM_File>.
+
+See below for some L<GOTCHAS AND WARNINGS> about MLDBM.
+
+=head2 Support for Berkeley DB
+
+The Berkeley DB storage type is supported through two different Perl
+modules - DB_File (which supports only features in old versions of Berkeley
+DB) and BerkeleyDB (which supports all versions). DBD::DBM supports
+specifying either "DB_File" or "BerkeleyDB" as a I<dbm_type>, with or
+without MLDBM support.
+
+The "BerkeleyDB" dbm_type is experimental and it's interface is likely to
+change. It currently defaults to BerkeleyDB::Hash and does not currently
+support ::Btree or ::Recno.
+
+With BerkeleyDB, you can specify initialization flags by setting them in
+your script like this:
+
+ use BerkeleyDB;
+ my $env = new BerkeleyDB::Env -Home => $dir; # and/or other Env flags
+ $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ dbm_type => 'BerkeleyDB',
+ dbm_mldbm => 'Storable',
+ dbm_berkeley_flags => {
+ 'DB_CREATE' => DB_CREATE, # pass in constants
+ 'DB_RDONLY' => DB_RDONLY, # pass in constants
+ '-Cachesize' => 1000, # set a ::Hash flag
+ '-Env' => $env, # pass in an environment
+ },
+ });
+
+Do I<not> set the -Flags or -Filename flags as those are determined and
+overwritten by the SQL (e.g. -Flags => DB_RDONLY is set automatically
+when you issue a SELECT statement).
+
+Time has not permitted us to provide support in this release of DBD::DBM
+for further Berkeley DB features such as transactions, concurrency,
+locking, etc. We will be working on these in the future and would value
+suggestions, patches, etc.
+
+See L<DB_File> and L<BerkeleyDB> for further details.
+
+=head2 Optimizing the use of key fields
+
+Most "flavors" of DBM have only two physical columns (but can contain
+multiple logical columns as explained above in
+L<Adding multi-column support with MLDBM>). They work similarly to a
+Perl hash with the first column serving as the key. Like a Perl hash, DBM
+files permit you to do quick lookups by specifying the key and thus avoid
+looping through all records (supported by DBI::SQL::Nano only). Also like
+a Perl hash, the keys must be unique. It is impossible to create two
+records with the same key. To put this more simply and in SQL terms,
+the key column functions as the I<PRIMARY KEY> or UNIQUE INDEX.
+
+In DBD::DBM, you can take advantage of the speed of keyed lookups by using
+DBI::SQL::Nano and a WHERE clause with a single equal comparison on the key
+field. For example, the following SQL statements are optimized for keyed
+lookup:
+
+ CREATE TABLE user ( user_name TEXT, phone TEXT);
+ INSERT INTO user VALUES ('Fred Bloggs','233-7777');
+ # ... many more inserts
+ SELECT phone FROM user WHERE user_name='Fred Bloggs';
+
+The "user_name" column is the key column since it is the first
+column. The SELECT statement uses the key column in a single equal
+comparison - "user_name='Fred Bloggs'" - so the search will find it
+very quickly without having to loop through all the names which were
+inserted into the table.
+
+In contrast, these searches on the same table are not optimized:
+
+ 1. SELECT phone FROM user WHERE user_name < 'Fred';
+ 2. SELECT user_name FROM user WHERE phone = '233-7777';
+
+In #1, the operation uses a less-than (<) comparison rather than an equals
+comparison, so it will not be optimized for key searching. In #2, the key
+field "user_name" is not specified in the WHERE clause, and therefore the
+search will need to loop through all rows to find the requested row(s).
+
+B<Note> that the underlying DBM storage needs to loop over all I<key/value>
+pairs when the optimized fetch is used. SQL::Statement has a massively
+improved where clause evaluation which costs around 15% of the evaluation
+in DBI::SQL::Nano - combined with the loop in the DBM storage the speed
+improvement isn't so impressive.
+
+Even if lookups are faster by around 50%, DBI::SQL::Nano and
+SQL::Statement can benefit from the key field optimizations on
+updating and deleting rows - and here the improved where clause
+evaluation of SQL::Statement might beat DBI::SQL::Nano every time the
+where clause contains not only the key field (or more than one).
+
+=head2 Supported SQL syntax
+
+DBD::DBM uses a subset of SQL. The robustness of that subset depends on
+what other modules you have installed. Both options support basic SQL
+operations including CREATE TABLE, DROP TABLE, INSERT, DELETE, UPDATE, and
+SELECT.
+
+B<Option #1:> By default, this module inherits its SQL support from
+DBI::SQL::Nano that comes with DBI. Nano is, as its name implies, a *very*
+small SQL engine. Although limited in scope, it is faster than option #2
+for some operations (especially single I<primary key> lookups). See
+L<DBI::SQL::Nano> for a description of the SQL it supports and comparisons
+of it with option #2.
+
+B<Option #2:> If you install the pure Perl CPAN module SQL::Statement,
+DBD::DBM will use it instead of Nano. This adds support for table aliases,
+functions, joins, and much more. If you're going to use DBD::DBM
+for anything other than very simple tables and queries, you should install
+SQL::Statement. You don't have to change DBD::DBM or your scripts in any
+way, simply installing SQL::Statement will give you the more robust SQL
+capabilities without breaking scripts written for DBI::SQL::Nano. See
+L<SQL::Statement> for a description of the SQL it supports.
+
+To find out which SQL module is working in a given script, you can use the
+dbm_versions() method or, if you don't need the full output and version
+numbers, just do this:
+
+ print $dbh->{sql_handler}, "\n";
+
+That will print out either "SQL::Statement" or "DBI::SQL::Nano".
+
+Baring the section about optimized access to the DBM storage in mind,
+comparing the benefits of both engines:
+
+ # DBI::SQL::Nano is faster
+ $sth = $dbh->prepare( "update foo set value='new' where key=15" );
+ $sth->execute();
+ $sth = $dbh->prepare( "delete from foo where key=27" );
+ $sth->execute();
+ $sth = $dbh->prepare( "select * from foo where key='abc'" );
+
+ # SQL::Statement might faster (depending on DB size)
+ $sth = $dbh->prepare( "update foo set value='new' where key=?" );
+ $sth->execute(15);
+ $sth = $dbh->prepare( "update foo set value=? where key=15" );
+ $sth->execute('new');
+ $sth = $dbh->prepare( "delete from foo where key=?" );
+ $sth->execute(27);
+
+ # SQL::Statement is faster
+ $sth = $dbh->prepare( "update foo set value='new' where value='old'" );
+ $sth->execute();
+ # must be expressed using "where key = 15 or key = 27 or key = 42 or key = 'abc'"
+ # in DBI::SQL::Nano
+ $sth = $dbh->prepare( "delete from foo where key in (15,27,42,'abc')" );
+ $sth->execute();
+ # must be expressed using "where key > 10 and key < 90" in DBI::SQL::Nano
+ $sth = $dbh->prepare( "select * from foo where key between (10,90)" );
+ $sth->execute();
+
+ # only SQL::Statement can handle
+ $sth->prepare( "select * from foo,bar where foo.name = bar.name" );
+ $sth->execute();
+ $sth->prepare( "insert into foo values ( 1, 'foo' ), ( 2, 'bar' )" );
+ $sth->execute();
+
+=head2 Specifying Column Names
+
+DBM files don't have a standard way to store column names. DBD::DBM gets
+around this issue with a DBD::DBM specific way of storing the column names.
+B<If you are working only with DBD::DBM and not using files created by or
+accessed with other DBM programs, you can ignore this section.>
+
+DBD::DBM stores column names as a row in the file with the key I<_metadata
+\0>. So this code
+
+ my $dbh = DBI->connect('dbi:DBM:');
+ $dbh->do("CREATE TABLE baz (foo CHAR(10), bar INTEGER)");
+ $dbh->do("INSERT INTO baz (foo,bar) VALUES ('zippy',1)");
+
+Will create a file that has a structure something like this:
+
+ _metadata \0 | <dbd_metadata><schema></schema><col_names>foo,bar</col_names></dbd_metadata>
+ zippy | 1
+
+The next time you access this table with DBD::DBM, it will treat the
+I<_metadata \0> row as a header rather than as data and will pull the column
+names from there. However, if you access the file with something other
+than DBD::DBM, the row will be treated as a regular data row.
+
+If you do not want the column names stored as a data row in the table you
+can set the I<dbm_store_metadata> attribute to 0.
+
+ my $dbh = DBI->connect('dbi:DBM:', undef, undef, { dbm_store_metadata => 0 });
+
+ # or
+ $dbh->{dbm_store_metadata} = 0;
+
+ # or for per-table setting
+ $dbh->{f_meta}->{qux}->{dbm_store_metadata} = 0;
+
+By default, DBD::DBM assumes that you have two columns named "k" and "v"
+(short for "key" and "value"). So if you have I<dbm_store_metadata> set to
+1 and you want to use alternate column names, you need to specify the
+column names like this:
+
+ my $dbh = DBI->connect('dbi:DBM:', undef, undef, {
+ dbm_store_metadata => 0,
+ dbm_cols => [ qw(foo bar) ],
+ });
+
+ # or
+ $dbh->{dbm_store_metadata} = 0;
+ $dbh->{dbm_cols} = 'foo,bar';
+
+ # or to set the column names on per-table basis, do this:
+ # sets the column names only for table "qux"
+ $dbh->{f_meta}->{qux}->{dbm_store_metadata} = 0;
+ $dbh->{f_meta}->{qux}->{col_names} = [qw(foo bar)];
+
+If you have a file that was created by another DBM program or created with
+I<dbm_store_metadata> set to zero and you want to convert it to using
+DBD::DBM's column name storage, just use one of the methods above to name
+the columns but *without* specifying I<dbm_store_metadata> as zero. You
+only have to do that once - thereafter you can get by without setting
+either I<dbm_store_metadata> or setting I<dbm_cols> because the names will
+be stored in the file.
+
+=head1 DBI database handle attributes
+
+=head2 Metadata
+
+=head3 Statement handle ($sth) attributes and methods
+
+Most statement handle attributes such as NAME, NUM_OF_FIELDS, etc. are
+available only after an execute. The same is true of $sth->rows which is
+available after the execute but does I<not> require a fetch.
+
+=head3 Driver handle ($dbh) attributes
+
+It is not supported anymore to use dbm-attributes without the dbm_-prefix.
+Currently, if an DBD::DBM private attribute is accessed without an
+underscore in it's name, dbm_ is prepended to that attribute and it's
+processed further. If the resulting attribute name is invalid, an error is
+thrown.
+
+=head4 dbm_cols
+
+Contains a comma separated list of column names or an array reference to
+the column names.
+
+=head4 dbm_type
+
+Contains the DBM storage type. Currently known supported type are
+C<< ODBM_File >>, C<< NDBM_File >>, C<< SDBM_File >>, C<< GDBM_File >>,
+C<< DB_File >> and C<< BerkeleyDB >>. It is not recommended to use one
+of the first three types - even if C<< SDBM_File >> is the most commonly
+available I<dbm_type>.
+
+=head4 dbm_mldbm
+
+Contains the serializer for DBM storage (value column). Requires the
+CPAN module L<MLDBM> installed. Currently known supported serializers
+are:
+
+=over 8
+
+=item Data::Dumper
+
+Default serializer. Deployed with Perl core.
+
+=item Storable
+
+Faster serializer. Deployed with Perl core.
+
+=item FreezeThaw
+
+Pure Perl serializer, requires L<FreezeThaw> to be installed.
+
+=item YAML
+
+Portable serializer (between languages but not architectures).
+Requires L<YAML::MLDBM> installation.
+
+=item JSON
+
+Portable, fast serializer (between languages but not architectures).
+Requires L<MLDBM::Serializer::JSON> installation.
+
+=back
+
+=head4 dbm_store_metadata
+
+Boolean value which determines if the metadata in DBM is stored or not.
+
+=head4 dbm_berkeley_flags
+
+Hash reference with additional flags for BerkeleyDB::Hash instantiation.
+
+=head4 dbm_version
+
+Readonly attribute containing the version of DBD::DBM.
+
+=head4 f_meta
+
+In addition to the attributes L<DBD::File> recognizes, DBD::DBM knows
+about the (public) attributes C<col_names> (B<Note> not I<dbm_cols>
+here!), C<dbm_type>, C<dbm_mldbm>, C<dbm_store_metadata> and
+C<dbm_berkeley_flags>. As in DBD::File, there are undocumented,
+internal attributes in DBD::DBM. Be very careful when modifying
+attributes you do not know; the consequence might a destroyed or
+corrupted table.
+
+=head4 dbm_tables
+
+This attribute provides restricted access to the table meta data. See
+L<f_meta> and L<DBD::File/f_meta> for attribute details.
+
+dbm_tables is a tied hash providing the internal table names as keys
+(accessing unknown tables might create an entry) and their meta
+data as another tied hash. The table meta storage is obtained via
+the C<get_table_meta> method from the table implementation (see
+L<DBD::File::Developers>). Attribute setting and getting within the
+table meta data is handled via the methods C<set_table_meta_attr> and
+C<get_table_meta_attr>.
+
+=head3 Following attributes are no longer handled by DBD::DBM:
+
+=head4 dbm_ext
+
+This attribute is silently mapped to DBD::File's attribute I<f_ext>.
+Later versions of DBI might show a depreciated warning when this attribute
+is used and eventually it will be removed.
+
+=head4 dbm_lockfile
+
+This attribute is silently mapped to DBD::File's attribute I<f_lockfile>.
+Later versions of DBI might show a depreciated warning when this attribute
+is used and eventually it will be removed.
+
+=head1 DBI database handle methods
+
+=head2 The $dbh->dbm_versions() method
+
+The private method dbm_versions() returns a summary of what other modules
+are being used at any given time. DBD::DBM can work with or without many
+other modules - it can use either SQL::Statement or DBI::SQL::Nano as its
+SQL engine, it can be run with DBI or DBI::PurePerl, it can use many kinds
+of DBM modules, and many kinds of serializers when run with MLDBM. The
+dbm_versions() method reports all of that and more.
+
+ print $dbh->dbm_versions; # displays global settings
+ print $dbh->dbm_versions($table_name); # displays per table settings
+
+An important thing to note about this method is that when it called
+with no arguments, it displays the *global* settings. If you override
+these by setting per-table attributes, these will I<not> be shown
+unless you specify a table name as an argument to the method call.
+
+=head2 Storing Objects
+
+If you are using MLDBM, you can use DBD::DBM to take advantage of its
+serializing abilities to serialize any Perl object that MLDBM can handle.
+To store objects in columns, you should (but don't absolutely need to)
+declare it as a column of type BLOB (the type is *currently* ignored by
+the SQL engine, but it's good form).
+
+=head1 EXTENSIBILITY
+
+=over 8
+
+=item C<SQL::Statement>
+
+Improved SQL engine compared to the built-in DBI::SQL::Nano - see
+L<Supported SQL syntax>.
+
+=item C<DB_File>
+
+Berkeley DB version 1. This database library is available on many
+systems without additional installation and most systems are
+supported.
+
+=item C<GDBM_File>
+
+Simple dbm type (comparable to C<DB_File>) under the GNU license.
+Typically not available (or requires extra installation) on non-GNU
+operating systems.
+
+=item C<BerkeleyDB>
+
+Berkeley DB version up to v4 (and maybe higher) - requires additional
+installation but is easier than GDBM_File on non-GNU systems.
+
+db4 comes with a many tools which allow repairing and migrating
+databases. This is the B<recommended> dbm type for production use.
+
+=item C<MLDBM>
+
+Serializer wrapper to support more than one column for the files.
+Comes with serializers using C<Data::Dumper>, C<FreezeThaw> and
+C<Storable>.
+
+=item C<YAML::MLDBM>
+
+Additional serializer for MLDBM. YAML is very portable between languanges.
+
+=item C<MLDBM::Serializer::JSON>
+
+Additional serializer for MLDBM. JSON is very portable between languanges,
+probably more than YAML.
+
+=back
+
+=head1 GOTCHAS AND WARNINGS
+
+Using the SQL DROP command will remove any file that has the name specified
+in the command with either '.pag' and '.dir', '.db' or your {f_ext} appended
+to it. So this be dangerous if you aren't sure what file it refers to:
+
+ $dbh->do(qq{DROP TABLE "/path/to/any/file"});
+
+Each DBM type has limitations. SDBM_File, for example, can only store
+values of less than 1,000 characters. *You* as the script author must
+ensure that you don't exceed those bounds. If you try to insert a value
+that is larger than DBM can store, the results will be unpredictable.
+See the documentation for whatever DBM you are using for details.
+
+Different DBM implementations return records in different orders.
+That means that you I<should not> rely on the order of records unless
+you use an ORDER BY statement.
+
+DBM data files are platform-specific. To move them from one platform to
+another, you'll need to do something along the lines of dumping your data
+to CSV on platform #1 and then dumping from CSV to DBM on platform #2.
+DBD::AnyData and DBD::CSV can help with that. There may also be DBM
+conversion tools for your platforms which would probably be quicker.
+
+When using MLDBM, there is a very powerful serializer - it will allow
+you to store Perl code or objects in database columns. When these get
+de-serialized, they may be eval'ed - in other words MLDBM (or actually
+Data::Dumper when used by MLDBM) may take the values and try to
+execute them in Perl. Obviously, this can present dangers, so if you
+do not know what is in a file, be careful before you access it with
+MLDBM turned on!
+
+See the entire section on L<Table locking and flock()> for gotchas and
+warnings about the use of flock().
+
+=head1 BUGS AND LIMITATIONS
+
+This module uses hash interfaces of two column file databases. While
+none of supported SQL engines have support for indices, the following
+statements really do the same (even if they mean something completely
+different) for each dbm type which lacks C<EXISTS> support:
+
+ $sth->do( "insert into foo values (1, 'hello')" );
+
+ # this statement does ...
+ $sth->do( "update foo set v='world' where k=1" );
+ # ... the same as this statement
+ $sth->do( "insert into foo values (1, 'world')" );
+
+This is considered to be a bug and might change in a future release.
+
+Known affected dbm types are C<ODBM_File> and C<NDBM_File>. We highly
+recommended you use a more modern dbm type such as C<DB_File>.
+
+=head1 GETTING HELP, MAKING SUGGESTIONS, AND REPORTING BUGS
+
+If you need help installing or using DBD::DBM, please write to the DBI
+users mailing list at dbi-users@perl.org or to the
+comp.lang.perl.modules newsgroup on usenet. I cannot always answer
+every question quickly but there are many on the mailing list or in
+the newsgroup who can.
+
+DBD developers for DBD's which rely on DBD::File or DBD::DBM or use
+one of them as an example are suggested to join the DBI developers
+mailing list at dbi-dev@perl.org and strongly encouraged to join our
+IRC channel at L<irc://irc.perl.org/dbi>.
+
+If you have suggestions, ideas for improvements, or bugs to report, please
+report a bug as described in DBI. Do not mail any of the authors directly,
+you might not get an answer.
+
+When reporting bugs, please send the output of $dbh->dbm_versions($table)
+for a table that exhibits the bug and as small a sample as you can make of
+the code that produces the bug. And of course, patches are welcome, too
+:-).
+
+If you need enhancements quickly, you can get commercial support as
+described at L<http://dbi.perl.org/support/> or you can contact Jens Rehsack
+at rehsack@cpan.org for commercial support in Germany.
+
+Please don't bother Jochen Wiedmann or Jeff Zucker for support - they
+handed over further maintenance to H.Merijn Brand and Jens Rehsack.
+
+=head1 ACKNOWLEDGEMENTS
+
+Many, many thanks to Tim Bunce for prodding me to write this, and for
+copious, wise, and patient suggestions all along the way. (Jeff Zucker)
+
+I send my thanks and acknowledgements to H.Merijn Brand for his
+initial refactoring of DBD::File and his strong and ongoing support of
+SQL::Statement. Without him, the current progress would never have
+been made. And I have to name Martin J. Evans for each laugh (and
+correction) of all those funny word creations I (as non-native
+speaker) made to the documentation. And - of course - I have to thank
+all those unnamed contributors and testers from the Perl
+community. (Jens Rehsack)
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is written by Jeff Zucker < jzucker AT cpan.org >, who also
+maintained it till 2007. After that, in 2010, Jens Rehsack & H.Merijn Brand
+took over maintenance.
+
+ Copyright (c) 2004 by Jeff Zucker, all rights reserved.
+ Copyright (c) 2010 by Jens Rehsack & H.Merijn Brand, all rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=head1 SEE ALSO
+
+L<DBI>,
+L<SQL::Statement>, L<DBI::SQL::Nano>,
+L<AnyDBM_File>, L<DB_File>, L<BerkeleyDB>,
+L<MLDBM>, L<YAML::MLDBM>, L<MLDBM::Serializer::JSON>
+
+=cut
diff --git a/lib/DBD/ExampleP.pm b/lib/DBD/ExampleP.pm
new file mode 100644
index 0000000..0bbace0
--- /dev/null
+++ b/lib/DBD/ExampleP.pm
@@ -0,0 +1,428 @@
+{
+ package DBD::ExampleP;
+
+ use Symbol;
+
+ use DBI qw(:sql_types);
+
+ require File::Spec;
+
+ @EXPORT = qw(); # Do NOT @EXPORT anything.
+ $VERSION = sprintf("12.%06d", q$Revision: 14310 $ =~ /(\d+)/o);
+
+
+# $Id: ExampleP.pm 14310 2010-08-02 06:35:25Z REHSACK $
+#
+# Copyright (c) 1994,1997,1998 Tim Bunce
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+ @statnames = qw(dev ino mode nlink
+ uid gid rdev size
+ atime mtime ctime
+ blksize blocks name);
+ @statnames{@statnames} = (0 .. @statnames-1);
+
+ @stattypes = (SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
+ SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
+ SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
+ SQL_INTEGER, SQL_INTEGER, SQL_VARCHAR);
+ @stattypes{@statnames} = @stattypes;
+ @statprec = ((10) x (@statnames-1), 1024);
+ @statprec{@statnames} = @statprec;
+ die unless @statnames == @stattypes;
+ die unless @statprec == @stattypes;
+
+ $drh = undef; # holds driver handle once initialised
+ #$gensym = "SYM000"; # used by st::execute() for filehandles
+
+ sub driver{
+ return $drh if $drh;
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'ExampleP',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD Example Perl stub by Tim Bunce',
+ }, ['example implementors private data '.__PACKAGE__]);
+ $drh;
+ }
+
+ sub CLONE {
+ undef $drh;
+ }
+}
+
+
+{ package DBD::ExampleP::dr; # ====== DRIVER ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub connect { # normally overridden, but a handy default
+ my($drh, $dbname, $user, $auth)= @_;
+ my ($outer, $dbh) = DBI::_new_dbh($drh, {
+ Name => $dbname,
+ examplep_private_dbh_attrib => 42, # an example, for testing
+ });
+ $dbh->{examplep_get_info} = {
+ 29 => '"', # SQL_IDENTIFIER_QUOTE_CHAR
+ 41 => '.', # SQL_CATALOG_NAME_SEPARATOR
+ 114 => 1, # SQL_CATALOG_LOCATION
+ };
+ #$dbh->{Name} = $dbname;
+ $dbh->STORE('Active', 1);
+ return $outer;
+ }
+
+ sub data_sources {
+ return ("dbi:ExampleP:dir=."); # possibly usefully meaningless
+ }
+
+}
+
+
+{ package DBD::ExampleP::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub prepare {
+ my($dbh, $statement)= @_;
+ my @fields;
+ my($fields, $dir) = $statement =~ m/^\s*select\s+(.*?)\s+from\s+(\S*)/i;
+
+ if (defined $fields and defined $dir) {
+ @fields = ($fields eq '*')
+ ? keys %DBD::ExampleP::statnames
+ : split(/\s*,\s*/, $fields);
+ }
+ else {
+ return $dbh->set_err($DBI::stderr, "Syntax error in select statement (\"$statement\")")
+ unless $statement =~ m/^\s*set\s+/;
+ # the SET syntax is just a hack so the ExampleP driver can
+ # be used to test non-select statements.
+ # Now we have DBI::DBM etc., ExampleP should be deprecated
+ }
+
+ my ($outer, $sth) = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ examplep_private_sth_attrib => 24, # an example, for testing
+ }, ['example implementors private data '.__PACKAGE__]);
+
+ my @bad = map {
+ defined $DBD::ExampleP::statnames{$_} ? () : $_
+ } @fields;
+ return $dbh->set_err($DBI::stderr, "Unknown field names: @bad")
+ if @bad;
+
+ $outer->STORE('NUM_OF_FIELDS' => scalar(@fields));
+
+ $sth->{examplep_ex_dir} = $dir if defined($dir) && $dir !~ /\?/;
+ $outer->STORE('NUM_OF_PARAMS' => ($dir) ? $dir =~ tr/?/?/ : 0);
+
+ if (@fields) {
+ $outer->STORE('NAME' => \@fields);
+ $outer->STORE('NULLABLE' => [ (0) x @fields ]);
+ $outer->STORE('SCALE' => [ (0) x @fields ]);
+ }
+
+ $outer;
+ }
+
+
+ sub table_info {
+ my $dbh = shift;
+ my ($catalog, $schema, $table, $type) = @_;
+
+ my @types = split(/["']*,["']/, $type || 'TABLE');
+ my %types = map { $_=>$_ } @types;
+
+ # Return a list of all subdirectories
+ my $dh = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym;
+ my $dir = $catalog || File::Spec->curdir();
+ my @list;
+ if ($types{VIEW}) { # for use by test harness
+ push @list, [ undef, "schema", "table", 'VIEW', undef ];
+ push @list, [ undef, "sch-ema", "table", 'VIEW', undef ];
+ push @list, [ undef, "schema", "ta-ble", 'VIEW', undef ];
+ push @list, [ undef, "sch ema", "table", 'VIEW', undef ];
+ push @list, [ undef, "schema", "ta ble", 'VIEW', undef ];
+ }
+ if ($types{TABLE}) {
+ no strict 'refs';
+ opendir($dh, $dir)
+ or return $dbh->set_err(int($!), "Failed to open directory $dir: $!");
+ while (defined(my $item = readdir($dh))) {
+ if ($^O eq 'VMS') {
+ # if on VMS then avoid warnings from catdir if you use a file
+ # (not a dir) as the item below
+ next if $item !~ /\.dir$/oi;
+ }
+ my $file = File::Spec->catdir($dir,$item);
+ next unless -d $file;
+ my($dev, $ino, $mode, $nlink, $uid) = lstat($file);
+ my $pwnam = undef; # eval { scalar(getpwnam($uid)) } || $uid;
+ push @list, [ $dir, $pwnam, $item, 'TABLE', undef ];
+ }
+ close($dh);
+ }
+ # We would like to simply do a DBI->connect() here. However,
+ # this is wrong if we are in a subclass like DBI::ProxyServer.
+ $dbh->{'dbd_sponge_dbh'} ||= DBI->connect("DBI:Sponge:", '','')
+ or return $dbh->set_err($DBI::err,
+ "Failed to connect to DBI::Sponge: $DBI::errstr");
+
+ my $attr = {
+ 'rows' => \@list,
+ 'NUM_OF_FIELDS' => 5,
+ 'NAME' => ['TABLE_CAT', 'TABLE_SCHEM', 'TABLE_NAME',
+ 'TABLE_TYPE', 'REMARKS'],
+ 'TYPE' => [DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(),
+ DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR() ],
+ 'NULLABLE' => [1, 1, 1, 1, 1]
+ };
+ my $sdbh = $dbh->{'dbd_sponge_dbh'};
+ my $sth = $sdbh->prepare("SHOW TABLES FROM $dir", $attr)
+ or return $dbh->set_err($sdbh->err(), $sdbh->errstr());
+ $sth;
+ }
+
+
+ sub type_info_all {
+ my ($dbh) = @_;
+ my $ti = [
+ { TYPE_NAME => 0,
+ DATA_TYPE => 1,
+ COLUMN_SIZE => 2,
+ LITERAL_PREFIX => 3,
+ LITERAL_SUFFIX => 4,
+ CREATE_PARAMS => 5,
+ NULLABLE => 6,
+ CASE_SENSITIVE => 7,
+ SEARCHABLE => 8,
+ UNSIGNED_ATTRIBUTE=> 9,
+ FIXED_PREC_SCALE=> 10,
+ AUTO_UNIQUE_VALUE => 11,
+ LOCAL_TYPE_NAME => 12,
+ MINIMUM_SCALE => 13,
+ MAXIMUM_SCALE => 14,
+ },
+ [ 'VARCHAR', DBI::SQL_VARCHAR, 1024, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ],
+ [ 'INTEGER', DBI::SQL_INTEGER, 10, "","", undef, 0, 0, 1, 0, 0,0,undef,0,0 ],
+ ];
+ return $ti;
+ }
+
+
+ sub ping {
+ (shift->FETCH('Active')) ? 2 : 0; # the value 2 is checked for by t/80proxy.t
+ }
+
+
+ sub disconnect {
+ shift->STORE(Active => 0);
+ return 1;
+ }
+
+
+ sub get_info {
+ my ($dbh, $info_type) = @_;
+ return $dbh->{examplep_get_info}->{$info_type};
+ }
+
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ # else pass up to DBI to handle
+ return $INC{"DBD/ExampleP.pm"} if $attrib eq 'example_driver_path';
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ if ($attrib eq 'AutoCommit') {
+ # convert AutoCommit values to magic ones to let DBI
+ # know that the driver has 'handled' the AutoCommit attribute
+ $value = ($value) ? -901 : -900;
+ }
+ return $dbh->{$attrib} = $value if $attrib =~ /^examplep_/;
+ return $dbh->SUPER::STORE($attrib, $value);
+ }
+
+ sub DESTROY {
+ my $dbh = shift;
+ $dbh->disconnect if $dbh->FETCH('Active');
+ undef
+ }
+
+
+ # This is an example to demonstrate the use of driver-specific
+ # methods via $dbh->func().
+ # Use it as follows:
+ # my @tables = $dbh->func($re, 'examplep_tables');
+ #
+ # Returns all the tables that match the regular expression $re.
+ sub examplep_tables {
+ my $dbh = shift; my $re = shift;
+ grep { $_ =~ /$re/ } $dbh->tables();
+ }
+
+ sub parse_trace_flag {
+ my ($h, $name) = @_;
+ return 0x01000000 if $name eq 'foo';
+ return 0x02000000 if $name eq 'bar';
+ return 0x04000000 if $name eq 'baz';
+ return 0x08000000 if $name eq 'boo';
+ return 0x10000000 if $name eq 'bop';
+ return $h->SUPER::parse_trace_flag($name);
+ }
+
+ sub private_attribute_info {
+ return { example_driver_path => undef };
+ }
+}
+
+
+{ package DBD::ExampleP::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict; no strict 'refs'; # cause problems with filehandles
+
+ sub bind_param {
+ my($sth, $param, $value, $attribs) = @_;
+ $sth->{'dbd_param'}->[$param-1] = $value;
+ return 1;
+ }
+
+
+ sub execute {
+ my($sth, @dir) = @_;
+ my $dir;
+
+ if (@dir) {
+ $sth->bind_param($_, $dir[$_-1]) or return
+ foreach (1..@dir);
+ }
+
+ my $dbd_param = $sth->{'dbd_param'} || [];
+ return $sth->set_err(2, @$dbd_param." values bound when $sth->{NUM_OF_PARAMS} expected")
+ unless @$dbd_param == $sth->{NUM_OF_PARAMS};
+
+ return 0 unless $sth->{NUM_OF_FIELDS}; # not a select
+
+ $dir = $dbd_param->[0] || $sth->{examplep_ex_dir};
+ return $sth->set_err(2, "No bind parameter supplied")
+ unless defined $dir;
+
+ $sth->finish;
+
+ #
+ # If the users asks for directory "long_list_4532", then we fake a
+ # directory with files "file4351", "file4350", ..., "file0".
+ # This is a special case used for testing, especially DBD::Proxy.
+ #
+ if ($dir =~ /^long_list_(\d+)$/) {
+ $sth->{dbd_dir} = [ $1 ]; # array ref indicates special mode
+ $sth->{dbd_datahandle} = undef;
+ }
+ else {
+ $sth->{dbd_dir} = $dir;
+ my $sym = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym;
+ opendir($sym, $dir)
+ or return $sth->set_err(2, "opendir($dir): $!");
+ $sth->{dbd_datahandle} = $sym;
+ }
+ $sth->STORE(Active => 1);
+ return 1;
+ }
+
+
+ sub fetch {
+ my $sth = shift;
+ my $dir = $sth->{dbd_dir};
+ my %s;
+
+ if (ref $dir) { # special fake-data test mode
+ my $num = $dir->[0]--;
+ unless ($num > 0) {
+ $sth->finish();
+ return;
+ }
+ my $time = time;
+ @s{@DBD::ExampleP::statnames} =
+ ( 2051, 1000+$num, 0644, 2, $>, $), 0, 1024,
+ $time, $time, $time, 512, 2, "file$num")
+ }
+ else { # normal mode
+ my $dh = $sth->{dbd_datahandle}
+ or return $sth->set_err($DBI::stderr, "fetch without successful execute");
+ my $f = readdir($dh);
+ unless ($f) {
+ $sth->finish;
+ return;
+ }
+ # untaint $f so that we can use this for DBI taint tests
+ ($f) = ($f =~ m/^(.*)$/);
+ my $file = File::Spec->catfile($dir, $f);
+ # put in all the data fields
+ @s{ @DBD::ExampleP::statnames } = (lstat($file), $f);
+ }
+
+ # return just what fields the query asks for
+ my @new = @s{ @{$sth->{NAME}} };
+
+ return $sth->_set_fbav(\@new);
+ }
+ *fetchrow_arrayref = \&fetch;
+
+
+ sub finish {
+ my $sth = shift;
+ closedir($sth->{dbd_datahandle}) if $sth->{dbd_datahandle};
+ $sth->{dbd_datahandle} = undef;
+ $sth->{dbd_dir} = undef;
+ $sth->SUPER::finish();
+ return 1;
+ }
+
+
+ sub FETCH {
+ my ($sth, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ if ($attrib eq 'TYPE'){
+ return [ @DBD::ExampleP::stattypes{ @{ $sth->FETCH(q{NAME_lc}) } } ];
+ }
+ elsif ($attrib eq 'PRECISION'){
+ return [ @DBD::ExampleP::statprec{ @{ $sth->FETCH(q{NAME_lc}) } } ];
+ }
+ elsif ($attrib eq 'ParamValues') {
+ my $dbd_param = $sth->{dbd_param} || [];
+ my %pv = map { $_ => $dbd_param->[$_-1] } 1..@$dbd_param;
+ return \%pv;
+ }
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+ }
+
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ return $sth->{$attrib} = $value
+ if $attrib eq 'NAME' or $attrib eq 'NULLABLE' or $attrib eq 'SCALE' or $attrib eq 'PRECISION';
+ return $sth->SUPER::STORE($attrib, $value);
+ }
+
+ *parse_trace_flag = \&DBD::ExampleP::db::parse_trace_flag;
+}
+
+1;
+# vim: sw=4:ts=8
diff --git a/lib/DBD/File.pm b/lib/DBD/File.pm
new file mode 100644
index 0000000..d4d57ae
--- /dev/null
+++ b/lib/DBD/File.pm
@@ -0,0 +1,1637 @@
+# -*- perl -*-
+#
+# DBD::File - A base class for implementing DBI drivers that
+# act on plain files
+#
+# This module is currently maintained by
+#
+# H.Merijn Brand & Jens Rehsack
+#
+# The original author is Jochen Wiedmann.
+#
+# Copyright (C) 2009,2010 by H.Merijn Brand & Jens Rehsack
+# Copyright (C) 2004 by Jeff Zucker
+# Copyright (C) 1998 by Jochen Wiedmann
+#
+# All rights reserved.
+#
+# You may distribute this module under the terms of either the GNU
+# General Public License or the Artistic License, as specified in
+# the Perl README file.
+
+require 5.008;
+
+use strict;
+use warnings;
+
+use DBI ();
+
+package DBD::File;
+
+use strict;
+use warnings;
+
+use base qw(DBI::DBD::SqlEngine);
+use Carp;
+use vars qw(@ISA $VERSION $drh);
+
+$VERSION = "0.40";
+
+$drh = undef; # holds driver handle(s) once initialized
+
+my %accessors = (
+ get_meta => "get_file_meta",
+ set_meta => "set_file_meta",
+ clear_meta => "clear_file_meta",
+ );
+
+sub driver ($;$)
+{
+ my ($class, $attr) = @_;
+
+ # Drivers typically use a singleton object for the $drh
+ # We use a hash here to have one singleton per subclass.
+ # (Otherwise DBD::CSV and DBD::DBM, for example, would
+ # share the same driver object which would cause problems.)
+ # An alternative would be not not cache the $drh here at all
+ # and require that subclasses do that. Subclasses should do
+ # their own caching, so caching here just provides extra safety.
+ $drh->{$class} and return $drh->{$class};
+
+ $attr ||= {};
+ { no strict "refs";
+ unless ($attr->{Attribution}) {
+ $class eq "DBD::File" and
+ $attr->{Attribution} = "$class by Jeff Zucker";
+ $attr->{Attribution} ||= ${$class . "::ATTRIBUTION"} ||
+ "oops the author of $class forgot to define this";
+ }
+ $attr->{Version} ||= ${$class . "::VERSION"};
+ $attr->{Name} or ($attr->{Name} = $class) =~ s/^DBD\:\://;
+ }
+
+ $drh->{$class} = $class->SUPER::driver ($attr);
+
+ my $prefix = DBI->driver_prefix ($class);
+ if ($prefix) {
+ my $dbclass = $class . "::db";
+ while (my ($accessor, $funcname) = each %accessors) {
+ my $method = $prefix . $accessor;
+ $dbclass->can ($method) and next;
+ my $inject = sprintf <<'EOI', $dbclass, $method, $dbclass, $funcname;
+sub %s::%s
+{
+ my $func = %s->can (q{%s});
+ goto &$func;
+ }
+EOI
+ eval $inject;
+ $dbclass->install_method ($method);
+ }
+ }
+
+ # XXX inject DBD::XXX::Statement unless exists
+
+ return $drh->{$class};
+ } # driver
+
+sub CLONE
+{
+ undef $drh;
+ } # CLONE
+
+# ====== DRIVER ================================================================
+
+package DBD::File::dr;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+@DBD::File::dr::ISA = qw(DBI::DBD::SqlEngine::dr);
+$DBD::File::dr::imp_data_size = 0;
+
+sub dsn_quote
+{
+ my $str = shift;
+ ref $str and return "";
+ defined $str or return "";
+ $str =~ s/([;:\\])/\\$1/g;
+ return $str;
+ } # dsn_quote
+
+sub data_sources ($;$)
+{
+ my ($drh, $attr) = @_;
+ my $dir = $attr && exists $attr->{f_dir}
+ ? $attr->{f_dir}
+ : File::Spec->curdir ();
+ my %attrs;
+ $attr and %attrs = %$attr;
+ delete $attrs{f_dir};
+ my $dsnextra = join ";", map { $_ . "=" . dsn_quote ($attrs{$_}) } keys %attrs;
+ my ($dirh) = Symbol::gensym ();
+ unless (opendir $dirh, $dir) {
+ $drh->set_err ($DBI::stderr, "Cannot open directory $dir: $!");
+ return;
+ }
+
+ my ($file, @dsns, %names, $driver);
+ $driver = $drh->{ImplementorClass} =~ m/^dbd\:\:([^\:]+)\:\:/i ? $1 : "File";
+
+ while (defined ($file = readdir ($dirh))) {
+ my $d = File::Spec->catdir ($dir, $file);
+ # allow current dir ... it can be a data_source too
+ $file ne File::Spec->updir () && -d $d and
+ push @dsns, "DBI:$driver:f_dir=" . dsn_quote ($d) . ($dsnextra ? ";$dsnextra" : "");
+ }
+ return @dsns;
+ } # data_sources
+
+sub disconnect_all
+{
+ } # disconnect_all
+
+sub DESTROY
+{
+ undef;
+ } # DESTROY
+
+# ====== DATABASE ==============================================================
+
+package DBD::File::db;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+use Carp;
+require File::Spec;
+require Cwd;
+use Scalar::Util qw(refaddr); # in CORE since 5.7.3
+
+@DBD::File::db::ISA = qw(DBI::DBD::SqlEngine::db);
+$DBD::File::db::imp_data_size = 0;
+
+sub set_versions
+{
+ my $dbh = shift;
+ $dbh->{f_version} = $DBD::File::VERSION;
+
+ return $dbh->SUPER::set_versions ();
+ } # set_versions
+
+sub init_valid_attributes
+{
+ my $dbh = shift;
+
+ $dbh->{f_valid_attrs} = {
+ f_version => 1, # DBD::File version
+ f_dir => 1, # base directory
+ f_ext => 1, # file extension
+ f_schema => 1, # schema name
+ f_meta => 1, # meta data for tables
+ f_meta_map => 1, # mapping table for identifier case
+ f_lock => 1, # Table locking mode
+ f_lockfile => 1, # Table lockfile extension
+ f_encoding => 1, # Encoding of the file
+ f_valid_attrs => 1, # File valid attributes
+ f_readonly_attrs => 1, # File readonly attributes
+ };
+ $dbh->{f_readonly_attrs} = {
+ f_version => 1, # DBD::File version
+ f_valid_attrs => 1, # File valid attributes
+ f_readonly_attrs => 1, # File readonly attributes
+ };
+
+ return $dbh->SUPER::init_valid_attributes ();
+ } # init_valid_attributes
+
+sub init_default_attributes
+{
+ my ($dbh, $phase) = @_;
+
+ # must be done first, because setting flags implicitly calls $dbdname::db->STORE
+ $dbh->SUPER::init_default_attributes ($phase);
+
+ # DBI::BD::SqlEngine::dr::connect will detect old-style drivers and
+ # don't call twice
+ unless (defined $phase) {
+ # we have an "old" driver here
+ $phase = defined $dbh->{sql_init_phase};
+ $phase and $phase = $dbh->{sql_init_phase};
+ }
+
+ if (0 == $phase) {
+ # check whether we're running in a Gofer server or not (see
+ # validate_FETCH_attr for details)
+ $dbh->{f_in_gofer} = (defined $INC{"DBD/Gofer.pm"} && (caller(5))[0] eq "DBI::Gofer::Execute");
+ # f_ext should not be initialized
+ # f_map is deprecated (but might return)
+ $dbh->{f_dir} = Cwd::abs_path (File::Spec->curdir ());
+ $dbh->{f_meta} = {};
+ $dbh->{f_meta_map} = {}; # choose new name because it contains other keys
+
+ # complete derived attributes, if required
+ (my $drv_class = $dbh->{ImplementorClass}) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix ($drv_class);
+ my $valid_attrs = $drv_prefix . "valid_attrs";
+ my $ro_attrs = $drv_prefix . "readonly_attrs";
+
+ my @comp_attrs = ();
+ if (exists $dbh->{$drv_prefix . "meta"} and !$dbh->{f_in_gofer}) {
+ my $attr = $dbh->{$drv_prefix . "meta"};
+ defined $attr and defined $dbh->{$valid_attrs} and
+ !defined $dbh->{$valid_attrs}{$attr} and
+ $dbh->{$valid_attrs}{$attr} = 1;
+
+ my %h;
+ tie %h, "DBD::File::TieTables", $dbh;
+ $dbh->{$attr} = \%h;
+
+ push @comp_attrs, "meta";
+ }
+
+ foreach my $comp_attr (@comp_attrs) {
+ my $attr = $drv_prefix . $comp_attr;
+ defined $dbh->{$valid_attrs} and !defined $dbh->{$valid_attrs}{$attr} and
+ $dbh->{$valid_attrs}{$attr} = 1;
+ defined $dbh->{$ro_attrs} and !defined $dbh->{$ro_attrs}{$attr} and
+ $dbh->{$ro_attrs}{$attr} = 1;
+ }
+ }
+
+ return $dbh;
+ } # init_default_attributes
+
+sub disconnect ($)
+{
+ %{$_[0]->{f_meta}} = ();
+ return $_[0]->SUPER::disconnect ();
+ } # disconnect
+
+sub validate_FETCH_attr
+{
+ my ($dbh, $attrib) = @_;
+
+ # If running in a Gofer server, access to our tied compatibility hash
+ # would force Gofer to serialize the tieing object including it's
+ # private $dbh reference used to do the driver function calls.
+ # This will result in nasty exceptions. So return a copy of the
+ # f_meta structure instead, which is the source of for the compatibility
+ # tie-hash. It's not as good as liked, but the best we can do in this
+ # situation.
+ if ($dbh->{f_in_gofer}) {
+ (my $drv_class = $dbh->{ImplementorClass}) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix ($drv_class);
+ exists $dbh->{$drv_prefix . "meta"} && $attrib eq $dbh->{$drv_prefix . "meta"} and
+ $attrib = "f_meta";
+ }
+
+ return $attrib;
+ } # validate_FETCH_attr
+
+sub validate_STORE_attr
+{
+ my ($dbh, $attrib, $value) = @_;
+
+ if ($attrib eq "f_dir") {
+ -d $value or
+ return $dbh->set_err ($DBI::stderr, "No such directory '$value'");
+ File::Spec->file_name_is_absolute ($value) or
+ $value = Cwd::abs_path ($value);
+ }
+
+ if ($attrib eq "f_ext") {
+ $value eq "" || $value =~ m{^\.\w+(?:/[rR]*)?$} or
+ carp "'$value' doesn't look like a valid file extension attribute\n";
+ }
+
+ (my $drv_class = $dbh->{ImplementorClass}) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix ($drv_class);
+
+ if (exists $dbh->{$drv_prefix . "meta"}) {
+ my $attr = $dbh->{$drv_prefix . "meta"};
+ if ($attrib eq $attr) {
+ while (my ($k, $v) = each %$value) {
+ $dbh->{$attrib}{$k} = $v;
+ }
+ }
+ }
+
+ return $dbh->SUPER::validate_STORE_attr ($attrib, $value);
+ } # validate_STORE_attr
+
+sub get_f_versions
+{
+ my ($dbh, $table) = @_;
+
+ my $class = $dbh->{ImplementorClass};
+ $class =~ s/::db$/::Table/;
+ my (undef, $meta);
+ $table and (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ unless ($meta) {
+ $meta = {};
+ $class->bootstrap_table_meta ($dbh, $meta, $table);
+ }
+
+ my $dver;
+ my $dtype = "IO::File";
+ eval {
+ $dver = IO::File->VERSION ();
+
+ # when we're still alive here, everthing went ok - no need to check for $@
+ $dtype .= " ($dver)";
+ };
+
+ $meta->{f_encoding} and $dtype .= " + " . $meta->{f_encoding} . " encoding";
+
+ return sprintf "%s using %s", $dbh->{f_version}, $dtype;
+ } # get_f_versions
+
+sub get_single_table_meta
+{
+ my ($dbh, $table, $attr) = @_;
+ my $meta;
+
+ $table eq "." and
+ return $dbh->FETCH ($attr);
+
+ (my $class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ $meta or croak "No such table '$table'";
+
+ # prevent creation of undef attributes
+ return $class->get_table_meta_attr ($meta, $attr);
+ } # get_single_table_meta
+
+sub get_file_meta
+{
+ my ($dbh, $table, $attr) = @_;
+
+ my $gstm = $dbh->{ImplementorClass}->can ("get_single_table_meta");
+
+ $table eq "*" and
+ $table = [ ".", keys %{$dbh->{f_meta}} ];
+ $table eq "+" and
+ $table = [ grep { m/^[_A-Za-z0-9]+$/ } keys %{$dbh->{f_meta}} ];
+ ref $table eq "Regexp" and
+ $table = [ grep { $_ =~ $table } keys %{$dbh->{f_meta}} ];
+
+ ref $table || ref $attr or
+ return &$gstm ($dbh, $table, $attr);
+
+ ref $table or $table = [ $table ];
+ ref $attr or $attr = [ $attr ];
+ "ARRAY" eq ref $table or
+ croak "Invalid argument for \$table - SCALAR, Regexp or ARRAY expected but got " . ref $table;
+ "ARRAY" eq ref $attr or
+ croak "Invalid argument for \$attr - SCALAR or ARRAY expected but got " . ref $attr;
+
+ my %results;
+ foreach my $tname (@{$table}) {
+ my %tattrs;
+ foreach my $aname (@{$attr}) {
+ $tattrs{$aname} = &$gstm ($dbh, $tname, $aname);
+ }
+ $results{$tname} = \%tattrs;
+ }
+
+ return \%results;
+ } # get_file_meta
+
+sub set_single_table_meta
+{
+ my ($dbh, $table, $attr, $value) = @_;
+ my $meta;
+
+ $table eq "." and
+ return $dbh->STORE ($attr, $value);
+
+ (my $class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ $meta or croak "No such table '$table'";
+ $class->set_table_meta_attr ($meta, $attr, $value);
+
+ return $dbh;
+ } # set_single_table_meta
+
+sub set_file_meta
+{
+ my ($dbh, $table, $attr, $value) = @_;
+
+ my $sstm = $dbh->{ImplementorClass}->can ("set_single_table_meta");
+
+ $table eq "*" and
+ $table = [ ".", keys %{$dbh->{f_meta}} ];
+ $table eq "+" and
+ $table = [ grep { m/^[_A-Za-z0-9]+$/ } keys %{$dbh->{f_meta}} ];
+ ref ($table) eq "Regexp" and
+ $table = [ grep { $_ =~ $table } keys %{$dbh->{f_meta}} ];
+
+ ref $table || ref $attr or
+ return &$sstm ($dbh, $table, $attr, $value);
+
+ ref $table or $table = [ $table ];
+ ref $attr or $attr = { $attr => $value };
+ "ARRAY" eq ref $table or
+ croak "Invalid argument for \$table - SCALAR, Regexp or ARRAY expected but got " . ref $table;
+ "HASH" eq ref $attr or
+ croak "Invalid argument for \$attr - SCALAR or HASH expected but got " . ref $attr;
+
+ foreach my $tname (@{$table}) {
+ my %tattrs;
+ while (my ($aname, $aval) = each %$attr) {
+ &$sstm ($dbh, $tname, $aname, $aval);
+ }
+ }
+
+ return $dbh;
+ } # set_file_meta
+
+sub clear_file_meta
+{
+ my ($dbh, $table) = @_;
+
+ (my $class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ my (undef, $meta) = $class->get_table_meta ($dbh, $table, 1);
+ $meta and %{$meta} = ();
+
+ return;
+ } # clear_file_meta
+
+sub get_avail_tables
+{
+ my $dbh = shift;
+
+ my @tables = $dbh->SUPER::get_avail_tables ();
+ my $dir = $dbh->{f_dir};
+ my $dirh = Symbol::gensym ();
+
+ unless (opendir $dirh, $dir) {
+ $dbh->set_err ($DBI::stderr, "Cannot open directory $dir: $!");
+ return @tables;
+ }
+
+ my $class = $dbh->FETCH ("ImplementorClass");
+ $class =~ s/::db$/::Table/;
+ my ($file, %names);
+ my $schema = exists $dbh->{f_schema}
+ ? defined $dbh->{f_schema} && $dbh->{f_schema} ne ""
+ ? $dbh->{f_schema} : undef
+ : eval { getpwuid ((stat $dir)[4]) }; # XXX Win32::pwent
+ my %seen;
+ while (defined ($file = readdir ($dirh))) {
+ my ($tbl, $meta) = $class->get_table_meta ($dbh, $file, 0, 0) or next; # XXX
+ # $tbl && $meta && -f $meta->{f_fqfn} or next;
+ $seen{defined $schema ? $schema : "\0"}{$tbl}++ or
+ push @tables, [ undef, $schema, $tbl, "TABLE", "FILE" ];
+ }
+ closedir $dirh or
+ $dbh->set_err ($DBI::stderr, "Cannot close directory $dir: $!");
+
+ return @tables;
+ } # get_avail_tables
+
+# ====== Tie-Meta ==============================================================
+
+package DBD::File::TieMeta;
+
+use Carp qw(croak);
+require Tie::Hash;
+@DBD::File::TieMeta::ISA = qw(Tie::Hash);
+
+sub TIEHASH
+{
+ my ($class, $tblClass, $tblMeta) = @_;
+
+ my $self = bless ({ tblClass => $tblClass, tblMeta => $tblMeta, }, $class);
+ return $self;
+ } # new
+
+sub STORE
+{
+ my ($self, $meta_attr, $meta_val) = @_;
+
+ $self->{tblClass}->set_table_meta_attr ($self->{tblMeta}, $meta_attr, $meta_val);
+
+ return;
+ } # STORE
+
+sub FETCH
+{
+ my ($self, $meta_attr) = @_;
+
+ return $self->{tblClass}->get_table_meta_attr ($self->{tblMeta}, $meta_attr);
+ } # FETCH
+
+sub FIRSTKEY
+{
+ my $a = scalar keys %{$_[0]->{tblMeta}};
+ each %{$_[0]->{tblMeta}};
+ } # FIRSTKEY
+
+sub NEXTKEY
+{
+ each %{$_[0]->{tblMeta}};
+ } # NEXTKEY
+
+sub EXISTS
+{
+ exists $_[0]->{tblMeta}{$_[1]};
+ } # EXISTS
+
+sub DELETE
+{
+ croak "Can't delete single attributes from table meta structure";
+ } # DELETE
+
+sub CLEAR
+{
+ %{$_[0]->{tblMeta}} = ()
+ } # CLEAR
+
+sub SCALAR
+{
+ scalar %{$_[0]->{tblMeta}}
+ } # SCALAR
+
+# ====== Tie-Tables ============================================================
+
+package DBD::File::TieTables;
+
+use Carp qw(croak);
+require Tie::Hash;
+@DBD::File::TieTables::ISA = qw(Tie::Hash);
+
+sub TIEHASH
+{
+ my ($class, $dbh) = @_;
+
+ (my $tbl_class = $dbh->{ImplementorClass}) =~ s/::db$/::Table/;
+ my $self = bless ({ dbh => $dbh, tblClass => $tbl_class, }, $class);
+ return $self;
+ } # new
+
+sub STORE
+{
+ my ($self, $table, $tbl_meta) = @_;
+
+ "HASH" eq ref $tbl_meta or
+ croak "Invalid data for storing as table meta data (must be hash)";
+
+ (undef, my $meta) = $self->{tblClass}->get_table_meta ($self->{dbh}, $table, 1);
+ $meta or croak "Invalid table name '$table'";
+
+ while (my ($meta_attr, $meta_val) = each %$tbl_meta) {
+ $self->{tblClass}->set_table_meta_attr ($meta, $meta_attr, $meta_val);
+ }
+
+ return;
+ } # STORE
+
+sub FETCH
+{
+ my ($self, $table) = @_;
+
+ (undef, my $meta) = $self->{tblClass}->get_table_meta ($self->{dbh}, $table, 1);
+ $meta or croak "Invalid table name '$table'";
+
+ my %h;
+ tie %h, "DBD::File::TieMeta", $self->{tblClass}, $meta;
+
+ return \%h;
+ } # FETCH
+
+sub FIRSTKEY
+{
+ my $a = scalar keys %{$_[0]->{dbh}->{f_meta}};
+ each %{$_[0]->{dbh}->{f_meta}};
+ } # FIRSTKEY
+
+sub NEXTKEY
+{
+ each %{$_[0]->{dbh}->{f_meta}};
+ } # NEXTKEY
+
+sub EXISTS
+{
+ exists $_[0]->{dbh}->{f_meta}->{$_[1]} or
+ exists $_[0]->{dbh}->{f_meta_map}->{$_[1]};
+ } # EXISTS
+
+sub DELETE
+{
+ my ($self, $table) = @_;
+
+ (undef, my $meta) = $self->{tblClass}->get_table_meta ($self->{dbh}, $table, 1);
+ $meta or croak "Invalid table name '$table'";
+
+ delete $_[0]->{dbh}->{f_meta}->{$meta->{table_name}};
+ } # DELETE
+
+sub CLEAR
+{
+ %{$_[0]->{dbh}->{f_meta}} = ();
+ %{$_[0]->{dbh}->{f_meta_map}} = ();
+ } # CLEAR
+
+sub SCALAR
+{
+ scalar %{$_[0]->{dbh}->{f_meta}}
+ } # SCALAR
+
+# ====== STATEMENT =============================================================
+
+package DBD::File::st;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+@DBD::File::st::ISA = qw(DBI::DBD::SqlEngine::st);
+$DBD::File::st::imp_data_size = 0;
+
+my %supported_attrs = (
+ TYPE => 1,
+ PRECISION => 1,
+ NULLABLE => 1,
+ );
+
+sub FETCH
+{
+ my ($sth, $attr) = @_;
+
+ if ($supported_attrs{$attr}) {
+ my $stmt = $sth->{sql_stmt};
+
+ if (exists $sth->{ImplementorClass} &&
+ exists $sth->{sql_stmt} &&
+ $sth->{sql_stmt}->isa ("SQL::Statement")) {
+
+ # fill overall_defs unless we know
+ unless (exists $sth->{f_overall_defs} && ref $sth->{f_overall_defs}) {
+ my $all_meta =
+ $sth->{Database}->func ("*", "table_defs", "get_file_meta");
+ while (my ($tbl, $meta) = each %$all_meta) {
+ exists $meta->{table_defs} && ref $meta->{table_defs} or next;
+ foreach (keys %{$meta->{table_defs}{columns}}) {
+ $sth->{f_overall_defs}{$_} = $meta->{table_defs}{columns}{$_};
+ }
+ }
+ }
+
+ my @colnames = $sth->sql_get_colnames ();
+
+ $attr eq "TYPE" and
+ return [ map { $sth->{f_overall_defs}{$_}{data_type} || "CHAR" }
+ @colnames ];
+
+ $attr eq "PRECISION" and
+ return [ map { $sth->{f_overall_defs}{$_}{data_length} || 0 }
+ @colnames ];
+
+ $attr eq "NULLABLE" and
+ return [ map { ( grep m/^NOT NULL$/ =>
+ @{ $sth->{f_overall_defs}{$_}{constraints} || [] })
+ ? 0 : 1 }
+ @colnames ];
+ }
+ }
+
+ return $sth->SUPER::FETCH ($attr);
+ } # FETCH
+
+# ====== SQL::STATEMENT ========================================================
+
+package DBD::File::Statement;
+
+use strict;
+use warnings;
+
+@DBD::File::Statement::ISA = qw( DBI::DBD::SqlEngine::Statement );
+
+sub open_table ($$$$$)
+{
+ my ($self, $data, $table, $createMode, $lockMode) = @_;
+
+ my $class = ref $self;
+ $class =~ s/::Statement/::Table/;
+
+ my $flags = {
+ createMode => $createMode,
+ lockMode => $lockMode,
+ };
+ $self->{command} eq "DROP" and $flags->{dropMode} = 1;
+
+ return $class->new ($data, { table => $table }, $flags);
+ } # open_table
+
+# ====== SQL::TABLE ============================================================
+
+package DBD::File::Table;
+
+use strict;
+use warnings;
+
+use Carp;
+require IO::File;
+require File::Basename;
+require File::Spec;
+require Cwd;
+
+# We may have a working flock () built-in but that doesn't mean that locking
+# will work on NFS (flock () may hang hard)
+my $locking = eval { flock STDOUT, 0; 1 };
+
+@DBD::File::Table::ISA = qw( DBI::DBD::SqlEngine::Table );
+
+# ====== FLYWEIGHT SUPPORT =====================================================
+
+my $fn_any_ext_regex = qr/\.[^.]*/;
+
+# Flyweight support for table_info
+# The functions file2table, init_table_meta, default_table_meta and
+# get_table_meta are using $self arguments for polymorphism only. The
+# must not rely on an instantiated DBD::File::Table
+sub file2table
+{
+ my ($self, $meta, $file, $file_is_table, $respect_case) = @_;
+
+ $file eq "." || $file eq ".." and return; # XXX would break a possible DBD::Dir
+
+ my ($ext, $req) = ("", 0);
+ if ($meta->{f_ext}) {
+ ($ext, my $opt) = split m/\//, $meta->{f_ext};
+ if ($ext && $opt) {
+ $opt =~ m/r/i and $req = 1;
+ }
+ }
+
+ # (my $tbl = $file) =~ s/$ext$//i;
+ my ($tbl, $basename, $dir, $fn_ext, $user_spec_file);
+ if ($file_is_table and defined $meta->{f_file}) {
+ $tbl = $file;
+ ($basename, $dir, $fn_ext) = File::Basename::fileparse ($meta->{f_file}, $fn_any_ext_regex);
+ $file = $basename . $fn_ext;
+ $user_spec_file = 1;
+ }
+ else {
+ ($basename, $dir, undef) = File::Basename::fileparse ($file, $ext);
+ $file = $tbl = $basename;
+ $user_spec_file = 0;
+ }
+
+ if (!$respect_case and $meta->{sql_identifier_case} == 1) { # XXX SQL_IC_UPPER
+ $basename = uc $basename;
+ $tbl = uc $tbl;
+ }
+ if( !$respect_case and $meta->{sql_identifier_case} == 2) { # XXX SQL_IC_LOWER
+ $basename = lc $basename;
+ $tbl = lc $tbl;
+ }
+
+ my $searchdir = File::Spec->file_name_is_absolute ($dir)
+ ? ($dir =~ s|/$||, $dir)
+ : Cwd::abs_path (File::Spec->catdir ($meta->{f_dir}, $dir));
+ -d $searchdir or
+ croak "-d $searchdir: $!";
+
+ $searchdir eq $meta->{f_dir} and
+ $dir = "";
+
+ unless ($user_spec_file) {
+ $file_is_table and $file = "$basename$ext";
+
+ # Fully Qualified File Name
+ my $cmpsub;
+ if ($respect_case) {
+ $cmpsub = sub {
+ my ($fn, undef, $sfx) = File::Basename::fileparse ($_, $fn_any_ext_regex);
+ $sfx = '' if $^O eq 'VMS' and $sfx eq '.'; # no extension turns up as a dot
+ $fn eq $basename and
+ return (lc $sfx eq lc $ext or !$req && !$sfx);
+ return 0;
+ }
+ }
+ else {
+ $cmpsub = sub {
+ my ($fn, undef, $sfx) = File::Basename::fileparse ($_, $fn_any_ext_regex);
+ $sfx = '' if $^O eq 'VMS' and $sfx eq '.'; # no extension turns up as a dot
+ lc $fn eq lc $basename and
+ return (lc $sfx eq lc $ext or !$req && !$sfx);
+ return 0;
+ }
+ }
+
+ opendir my $dh, $searchdir or croak "Can't open '$searchdir': $!";
+ my @f = sort { length $b <=> length $a } grep { &$cmpsub ($_) } readdir $dh;
+ @f > 0 && @f <= 2 and $file = $f[0];
+ !$respect_case && $meta->{sql_identifier_case} == 4 and # XXX SQL_IC_MIXED
+ ($tbl = $file) =~ s/$ext$//i;
+ closedir $dh or croak "Can't close '$searchdir': $!";
+
+ my $tmpfn = $file;
+ if ($ext && $req) {
+ # File extension required
+ $tmpfn =~ s/$ext$//i or return;
+ }
+ }
+
+ my $fqfn = File::Spec->catfile ($searchdir, $file);
+ my $fqbn = File::Spec->catfile ($searchdir, $basename);
+
+ $meta->{f_fqfn} = $fqfn;
+ $meta->{f_fqbn} = $fqbn;
+ defined $meta->{f_lockfile} && $meta->{f_lockfile} and
+ $meta->{f_fqln} = $meta->{f_fqbn} . $meta->{f_lockfile};
+
+ $dir && !$user_spec_file and $tbl = File::Spec->catfile ($dir, $tbl);
+ $meta->{table_name} = $tbl;
+
+ return $tbl;
+ } # file2table
+
+sub bootstrap_table_meta
+{
+ my ($self, $dbh, $meta, $table) = @_;
+
+ exists $meta->{f_dir} or $meta->{f_dir} = $dbh->{f_dir};
+ defined $meta->{f_ext} or $meta->{f_ext} = $dbh->{f_ext};
+ defined $meta->{f_encoding} or $meta->{f_encoding} = $dbh->{f_encoding};
+ exists $meta->{f_lock} or $meta->{f_lock} = $dbh->{f_lock};
+ exists $meta->{f_lockfile} or $meta->{f_lockfile} = $dbh->{f_lockfile};
+ defined $meta->{f_schema} or $meta->{f_schema} = $dbh->{f_schema};
+ defined $meta->{sql_identifier_case} or
+ $meta->{sql_identifier_case} = $dbh->{sql_identifier_case};
+ } # bootstrap_table_meta
+
+sub init_table_meta
+{
+ my ($self, $dbh, $meta, $table) = @_;
+
+ return;
+ } # init_table_meta
+
+sub get_table_meta ($$$$;$)
+{
+ my ($self, $dbh, $table, $file_is_table, $respect_case) = @_;
+ unless (defined $respect_case) {
+ $respect_case = 0;
+ $table =~ s/^\"// and $respect_case = 1; # handle quoted identifiers
+ $table =~ s/\"$//;
+ }
+
+ unless ($respect_case) {
+ defined $dbh->{f_meta_map}{$table} and $table = $dbh->{f_meta_map}{$table};
+ }
+
+ my $meta = {};
+ defined $dbh->{f_meta}{$table} and $meta = $dbh->{f_meta}{$table};
+
+ unless ($meta->{initialized}) {
+ $self->bootstrap_table_meta ($dbh, $meta, $table);
+
+ unless (defined $meta->{f_fqfn}) {
+ $self->file2table ($meta, $table, $file_is_table, $respect_case) or return;
+ }
+
+ if (defined $meta->{table_name} and $table ne $meta->{table_name}) {
+ $dbh->{f_meta_map}{$table} = $meta->{table_name};
+ $table = $meta->{table_name};
+ }
+
+ # now we know a bit more - let's check if user can't use consequent spelling
+ # XXX add know issue about reset sql_identifier_case here ...
+ if (defined $dbh->{f_meta}{$table} && defined $dbh->{f_meta}{$table}{initialized}) {
+ $meta = $dbh->{f_meta}{$table};
+ $self->file2table ($meta, $table, $file_is_table, $respect_case) or
+ return unless $dbh->{f_meta}{$table}{initialized};
+ }
+ unless ($dbh->{f_meta}{$table}{initialized}) {
+ $self->init_table_meta ($dbh, $meta, $table);
+ $meta->{initialized} = 1;
+ $dbh->{f_meta}{$table} = $meta;
+ }
+ }
+
+ return ($table, $meta);
+ } # get_table_meta
+
+my %reset_on_modify = (
+ f_file => "f_fqfn",
+ f_dir => "f_fqfn",
+ f_ext => "f_fqfn",
+ f_lockfile => "f_fqfn", # forces new file2table call
+ );
+
+my %compat_map = map { $_ => "f_$_" } qw( file ext lock lockfile );
+
+sub register_reset_on_modify
+{
+ my ($proto, $extra_resets) = @_;
+ %reset_on_modify = (%reset_on_modify, %$extra_resets);
+ return;
+ } # register_reset_on_modify
+
+sub register_compat_map
+{
+ my ($proto, $extra_compat_map) = @_;
+ %compat_map = (%compat_map, %$extra_compat_map);
+ return;
+ } # register_compat_map
+
+sub get_table_meta_attr
+{
+ my ($class, $meta, $attrib) = @_;
+ exists $compat_map{$attrib} and
+ $attrib = $compat_map{$attrib};
+ exists $meta->{$attrib} and
+ return $meta->{$attrib};
+ return;
+ } # get_table_meta_attr
+
+sub set_table_meta_attr
+{
+ my ($class, $meta, $attrib, $value) = @_;
+ exists $compat_map{$attrib} and
+ $attrib = $compat_map{$attrib};
+ $class->table_meta_attr_changed ($meta, $attrib, $value);
+ $meta->{$attrib} = $value;
+ } # set_table_meta_attr
+
+sub table_meta_attr_changed
+{
+ my ($class, $meta, $attrib, $value) = @_;
+ defined $reset_on_modify{$attrib} and
+ delete $meta->{$reset_on_modify{$attrib}} and
+ $meta->{initialized} = 0;
+ } # table_meta_attr_changed
+
+# ====== FILE OPEN =============================================================
+
+sub open_file ($$$)
+{
+ my ($self, $meta, $attrs, $flags) = @_;
+
+ defined $meta->{f_fqfn} && $meta->{f_fqfn} ne "" or croak "No filename given";
+
+ my ($fh, $fn);
+ unless ($meta->{f_dontopen}) {
+ $fn = $meta->{f_fqfn};
+ if ($flags->{createMode}) {
+ -f $meta->{f_fqfn} and
+ croak "Cannot create table $attrs->{table}: Already exists";
+ $fh = IO::File->new ($fn, "a+") or
+ croak "Cannot open $fn for writing: $! (" . ($!+0) . ")";
+ }
+ else {
+ unless ($fh = IO::File->new ($fn, ($flags->{lockMode} ? "r+" : "r"))) {
+ croak "Cannot open $fn: $! (" . ($!+0) . ")";
+ }
+ }
+
+ if ($fh) {
+ $fh->seek (0, 0) or
+ croak "Error while seeking back: $!";
+ if (my $enc = $meta->{f_encoding}) {
+ binmode $fh, ":encoding($enc)" or
+ croak "Failed to set encoding layer '$enc' on $fn: $!";
+ }
+ else {
+ binmode $fh or croak "Failed to set binary mode on $fn: $!";
+ }
+ }
+
+ $meta->{fh} = $fh;
+ }
+ if ($meta->{f_fqln}) {
+ $fn = $meta->{f_fqln};
+ if ($flags->{createMode}) {
+ -f $fn and
+ croak "Cannot create table lock for $attrs->{table}: Already exists";
+ $fh = IO::File->new ($fn, "a+") or
+ croak "Cannot open $fn for writing: $! (" . ($!+0) . ")";
+ }
+ else {
+ unless ($fh = IO::File->new ($fn, ($flags->{lockMode} ? "r+" : "r"))) {
+ croak "Cannot open $fn: $! (" . ($!+0) . ")";
+ }
+ }
+
+ $meta->{lockfh} = $fh;
+ }
+
+ if ($locking && $fh) {
+ my $lm = defined $flags->{f_lock}
+ && $flags->{f_lock} =~ m/^[012]$/
+ ? $flags->{f_lock}
+ : $flags->{lockMode} ? 2 : 1;
+ if ($lm == 2) {
+ flock $fh, 2 or croak "Cannot obtain exclusive lock on $fn: $!";
+ }
+ elsif ($lm == 1) {
+ flock $fh, 1 or croak "Cannot obtain shared lock on $fn: $!";
+ }
+ # $lm = 0 is forced no locking at all
+ }
+ } # open_file
+
+# ====== SQL::Eval API =========================================================
+
+sub new
+{
+ my ($className, $data, $attrs, $flags) = @_;
+ my $dbh = $data->{Database};
+
+ my ($tblnm, $meta) = $className->get_table_meta ($dbh, $attrs->{table}, 1) or
+ croak "Cannot find appropriate file for table '$attrs->{table}'";
+ $attrs->{table} = $tblnm;
+
+ # Being a bit dirty here, as SQL::Statement::Structure does not offer
+ # me an interface to the data I want
+ $flags->{createMode} && $data->{sql_stmt}{table_defs} and
+ $meta->{table_defs} = $data->{sql_stmt}{table_defs};
+
+ $className->open_file ($meta, $attrs, $flags);
+
+ my $columns = {};
+ my $array = [];
+ my $tbl = {
+ %{$attrs},
+ meta => $meta,
+ col_names => $meta->{col_names} || [],
+ };
+ return $className->SUPER::new ($tbl);
+ } # new
+
+sub drop ($)
+{
+ my ($self, $data) = @_;
+ my $meta = $self->{meta};
+ # We have to close the file before unlinking it: Some OS'es will
+ # refuse the unlink otherwise.
+ $meta->{fh} and $meta->{fh}->close ();
+ $meta->{lockfh} and $meta->{lockfh}->close ();
+ undef $meta->{fh};
+ undef $meta->{lockfh};
+ $meta->{f_fqfn} and unlink $meta->{f_fqfn};
+ $meta->{f_fqln} and unlink $meta->{f_fqln};
+ delete $data->{Database}{f_meta}{$self->{table}};
+ return 1;
+ } # drop
+
+sub seek ($$$$)
+{
+ my ($self, $data, $pos, $whence) = @_;
+ my $meta = $self->{meta};
+ if ($whence == 0 && $pos == 0) {
+ $pos = defined $meta->{first_row_pos} ? $meta->{first_row_pos} : 0;
+ }
+ elsif ($whence != 2 || $pos != 0) {
+ croak "Illegal seek position: pos = $pos, whence = $whence";
+ }
+
+ $meta->{fh}->seek ($pos, $whence) or
+ croak "Error while seeking in " . $meta->{f_fqfn} . ": $!";
+ } # seek
+
+sub truncate ($$)
+{
+ my ($self, $data) = @_;
+ my $meta = $self->{meta};
+ $meta->{fh}->truncate ($meta->{fh}->tell ()) or
+ croak "Error while truncating " . $meta->{f_fqfn} . ": $!";
+ return 1;
+ } # truncate
+
+sub DESTROY
+{
+ my $self = shift;
+ my $meta = $self->{meta};
+ $meta->{fh} and $meta->{fh}->close ();
+ $meta->{lockfh} and $meta->{lockfh}->close ();
+ undef $meta->{fh};
+ undef $meta->{lockfh};
+ } # DESTROY
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::File - Base class for writing file based DBI drivers
+
+=head1 SYNOPSIS
+
+This module is a base class for writing other L<DBD|DBI::DBD>s.
+It is not intended to function as a DBD itself (though it is possible).
+If you want to access flat files, use L<DBD::AnyData|DBD::AnyData>, or
+L<DBD::CSV|DBD::CSV> (both of which are subclasses of DBD::File).
+
+=head1 DESCRIPTION
+
+The DBD::File module is not a true L<DBI|DBI> driver, but an abstract
+base class for deriving concrete DBI drivers from it. The implication
+is, that these drivers work with plain files, for example CSV files or
+INI files. The module is based on the L<SQL::Statement|SQL::Statement>
+module, a simple SQL engine.
+
+See L<DBI|DBI> for details on DBI, L<SQL::Statement|SQL::Statement> for
+details on SQL::Statement and L<DBD::CSV|DBD::CSV>, L<DBD::DBM|DBD::DBM>
+or L<DBD::AnyData|DBD::AnyData> for example drivers.
+
+=head2 Metadata
+
+The following attributes are handled by DBI itself and not by DBD::File,
+thus they all work as expected:
+
+ Active
+ ActiveKids
+ CachedKids
+ CompatMode (Not used)
+ InactiveDestroy
+ AutoInactiveDestroy
+ Kids
+ PrintError
+ RaiseError
+ Warn (Not used)
+
+=head3 The following DBI attributes are handled by DBD::File:
+
+=head4 AutoCommit
+
+Always on.
+
+=head4 ChopBlanks
+
+Works.
+
+=head4 NUM_OF_FIELDS
+
+Valid after C<< $sth->execute >>.
+
+=head4 NUM_OF_PARAMS
+
+Valid after C<< $sth->prepare >>.
+
+=head4 NAME
+
+Valid after C<< $sth->execute >>; undef for Non-Select statements.
+
+=head4 NULLABLE
+
+Not really working, always returns an array ref of ones, except the
+affected table has been created in this session. Valid after
+C<< $sth->execute >>; undef for non-select statements.
+
+=head3 The following DBI attributes and methods are not supported:
+
+=over 4
+
+=item bind_param_inout
+
+=item CursorName
+
+=item LongReadLen
+
+=item LongTruncOk
+
+=back
+
+=head3 DBD::File specific attributes
+
+In addition to the DBI attributes, you can use the following dbh
+attributes:
+
+=head4 f_dir
+
+This attribute is used for setting the directory where the files are
+opened and it defaults to the current directory (F<.>). Usually you set
+it on the dbh but it may be overridden per table (see L<f_meta>).
+
+When the value for C<f_dir> is a relative path, it is converted into
+the appropriate absolute path name (based on the current working
+directory) when the dbh attribute is set.
+
+See L<KNOWN BUGS AND LIMITATIONS>.
+
+=head4 f_ext
+
+This attribute is used for setting the file extension. The format is:
+
+ extension{/flag}
+
+where the /flag is optional and the extension is case-insensitive.
+C<f_ext> allows you to specify an extension which:
+
+=over
+
+=item *
+
+makes DBD::File prefer F<table.extension> over F<table>.
+
+=item *
+
+makes the table name the filename minus the extension.
+
+=back
+
+ DBI:CSV:f_dir=data;f_ext=.csv
+
+In the above example and when C<f_dir> contains both F<table.csv> and
+F<table>, DBD::File will open F<table.csv> and the table will be
+named "table". If F<table.csv> does not exist but F<table> does
+that file is opened and the table is also called "table".
+
+If C<f_ext> is not specified and F<table.csv> exists it will be opened
+and the table will be called "table.csv" which is probably not what
+you want.
+
+NOTE: even though extensions are case-insensitive, table names are
+not.
+
+ DBI:CSV:f_dir=data;f_ext=.csv/r
+
+The C<r> flag means the file extension is required and any filename
+that does not match the extension is ignored.
+
+Usually you set it on the dbh but it may be overridden per table
+(see L<f_meta>).
+
+=head4 f_schema
+
+This will set the schema name and defaults to the owner of the
+directory in which the table file resides. You can set C<f_schema> to
+C<undef>.
+
+ my $dbh = DBI->connect ("dbi:CSV:", "", "", {
+ f_schema => undef,
+ f_dir => "data",
+ f_ext => ".csv/r",
+ }) or die $DBI::errstr;
+
+By setting the schema you affect the results from the tables call:
+
+ my @tables = $dbh->tables ();
+
+ # no f_schema
+ "merijn".foo
+ "merijn".bar
+
+ # f_schema => "dbi"
+ "dbi".foo
+ "dbi".bar
+
+ # f_schema => undef
+ foo
+ bar
+
+Defining C<f_schema> to the empty string is equal to setting it to C<undef>
+so the DSN can be C<"dbi:CSV:f_schema=;f_dir=.">.
+
+=head4 f_lock
+
+The C<f_lock> attribute is used to set the locking mode on the opened
+table files. Note that not all platforms support locking. By default,
+tables are opened with a shared lock for reading, and with an
+exclusive lock for writing. The supported modes are:
+
+ 0: No locking at all.
+
+ 1: Shared locks will be used.
+
+ 2: Exclusive locks will be used.
+
+But see L<KNOWN BUGS|/"KNOWN BUGS AND LIMITATIONS"> below.
+
+=head4 f_lockfile
+
+If you wish to use a lockfile extension other than C<.lck>, simply specify
+the C<f_lockfile> attribute:
+
+ $dbh = DBI->connect ("dbi:DBM:f_lockfile=.foo");
+ $dbh->{f_lockfile} = ".foo";
+ $dbh->{f_meta}{qux}{f_lockfile} = ".foo";
+
+If you wish to disable locking, set the C<f_lockfile> to C<0>.
+
+ $dbh = DBI->connect ("dbi:DBM:f_lockfile=0");
+ $dbh->{f_lockfile} = 0;
+ $dbh->{f_meta}{qux}{f_lockfile} = 0;
+
+=head4 f_encoding
+
+With this attribute, you can set the encoding in which the file is opened.
+This is implemented using C<< binmode $fh, ":encoding(<f_encoding>)" >>.
+
+=head4 f_meta
+
+Private data area which contains information about the tables this
+module handles. Table meta data might not be available until the
+table has been accessed for the first time e.g., by issuing a select
+on it however it is possible to pre-initialize attributes for each table
+you use.
+
+DBD::File recognizes the (public) attributes C<f_ext>, C<f_dir>,
+C<f_file>, C<f_encoding>, C<f_lock>, C<f_lockfile>, C<f_schema>,
+C<col_names>, C<table_name> and C<sql_identifier_case>. Be very careful
+when modifying attributes you do not know, the consequence might be a
+destroyed or corrupted table.
+
+C<f_file> is an attribute applicable to table meta data only and you
+will not find a corresponding attribute in the dbh. Whilst it may be
+reasonable to have several tables with the same column names, it is
+not for the same file name. If you need access to the same file using
+different table names, use C<SQL::Statement> as the SQL engine and the
+C<AS> keyword:
+
+ SELECT * FROM tbl AS t1, tbl AS t2 WHERE t1.id = t2.id
+
+C<f_file> can be an absolute path name or a relative path name but if
+it is relative, it is interpreted as being relative to the C<f_dir>
+attribute of the table meta data. When C<f_file> is set DBD::File will
+use C<f_file> as specified and will not attempt to work out an
+alternative for C<f_file> using the C<table name> and C<f_ext>
+attribute.
+
+While C<f_meta> is a private and readonly attribute (which means, you
+cannot modify it's values), derived drivers might provide restricted
+write access through another attribute. Well known accessors are
+C<csv_tables> for L<DBD::CSV>, C<ad_tables> for L<DBD::AnyData> and
+C<dbm_tables> for L<DBD::DBM>.
+
+=head3 Internally private attributes to deal with SQL backends:
+
+Do not modify any of these private attributes unless you understand
+the implications of doing so. The behavior of DBD::File and derived
+DBDs might be unpredictable when one or more of those attributes are
+modified.
+
+=head4 sql_nano_version
+
+Contains the version of loaded DBI::SQL::Nano.
+
+=head4 sql_statement_version
+
+Contains the version of loaded SQL::Statement.
+
+=head4 sql_handler
+
+Contains either the text 'SQL::Statement' or 'DBI::SQL::Nano'.
+
+=head4 sql_ram_tables
+
+Contains optionally temporary tables.
+
+=head4 sql_flags
+
+Contains optional flags to instantiate the SQL::Parser parsing engine
+when SQL::Statement is used as SQL engine. See L<SQL::Parser> for valid
+flags.
+
+=head2 Driver private methods
+
+=head3 Default DBI methods
+
+=head4 data_sources
+
+The C<data_sources> method returns a list of subdirectories of the current
+directory in the form "dbi:CSV:f_dir=$dirname".
+
+If you want to read the subdirectories of another directory, use
+
+ my ($drh) = DBI->install_driver ("CSV");
+ my (@list) = $drh->data_sources (f_dir => "/usr/local/csv_data");
+
+=head4 list_tables
+
+This method returns a list of file names inside $dbh->{f_dir}.
+Example:
+
+ my ($dbh) = DBI->connect ("dbi:CSV:f_dir=/usr/local/csv_data");
+ my (@list) = $dbh->func ("list_tables");
+
+Note that the list includes all files contained in the directory, even
+those that have non-valid table names, from the view of SQL.
+
+=head3 Additional methods
+
+The following methods are only available via their documented name when
+DBD::File is used directly. Because this is only reasonable for testing
+purposes, the real names must be used instead. Those names can be computed
+by replacing the C<f_> in the method name with the driver prefix.
+
+=head4 f_versions
+
+Signature:
+
+ sub f_versions (;$)
+ {
+ my ($table_name) = @_;
+ $table_name ||= ".";
+ ...
+ }
+
+Returns the versions of the driver, including the DBI version, the Perl
+version, DBI::PurePerl version (if DBI::PurePerl is active) and the version
+of the SQL engine in use.
+
+ my $dbh = DBI->connect ("dbi:File:");
+ my $f_versions = $dbh->f_versions ();
+ print "$f_versions\n";
+ __END__
+ # DBD::File 0.39 using SQL::Statement 1.28
+ # DBI 1.612
+ # OS netbsd (5.99.24)
+ # Perl 5.010001 (x86_64-netbsd-thread-multi)
+
+Called in list context, f_versions will return an array containing each
+line as single entry.
+
+Some drivers might use the optional (table name) argument and modify
+version information related to the table (e.g. DBD::DBM provides storage
+backend information for the requested table, when it has a table name).
+
+=head4 f_get_meta
+
+Signature:
+
+ sub f_get_meta ($$)
+ {
+ my ($table_name, $attrib) = @_;
+ ...
+ }
+
+Returns the value of a meta attribute set for a specific table, if any.
+See L<f_meta> for the possible attributes.
+
+A table name of C<"."> (single dot) is interpreted as the default table.
+This will retrieve the appropriate attribute globally from the dbh.
+This has the same restrictions as C<< $dbh->{$attrib} >>.
+
+=head4 f_set_meta
+
+Signature:
+
+ sub f_set_meta ($$$)
+ {
+ my ($table_name, $attrib, $value) = @_;
+ ...
+ }
+
+Sets the value of a meta attribute set for a specific table.
+See L<f_meta> for the possible attributes.
+
+A table name of C<"."> (single dot) is interpreted as the default table
+which will set the specified attribute globally for the dbh.
+This has the same restrictions as C<< $dbh->{$attrib} = $value >>.
+
+=head4 f_clear_meta
+
+Signature:
+
+ sub f_clear_meta ($)
+ {
+ my ($table_name) = @_;
+ ...
+ }
+
+Clears the table specific meta information in the private storage of the
+dbh.
+
+=head1 SQL ENGINES
+
+DBD::File currently supports two SQL engines: L<SQL::Statement|SQL::Statement>
+and L<DBI::SQL::Nano::Statement_|DBI::SQL::Nano>. DBI::SQL::Nano supports a
+I<very> limited subset of SQL statements, but it might be faster for some
+very simple tasks. SQL::Statement in contrast supports a much larger subset
+of ANSI SQL.
+
+To use SQL::Statement, you need at least version 1.28 of
+SQL::Statement and the environment variable C<DBI_SQL_NANO> must not
+be set to a true value.
+
+=head1 KNOWN BUGS AND LIMITATIONS
+
+=over 4
+
+=item *
+
+This module uses flock () internally but flock is not available on all
+platforms. On MacOS and Windows 95 there is no locking at all (perhaps
+not so important on MacOS and Windows 95, as there is only a single
+user).
+
+=item *
+
+The module stores details about the handled tables in a private area
+of the driver handle (C<$drh>). This data area is not shared between
+different driver instances, so several C<< DBI->connect () >> calls will
+cause different table instances and private data areas.
+
+This data area is filled for the first time when a table is accessed,
+either via an SQL statement or via C<table_info> and is not
+destroyed until the table is dropped or the driver handle is released.
+Manual destruction is possible via L<f_clear_meta>.
+
+The following attributes are preserved in the data area and will
+evaluated instead of driver globals:
+
+=over 8
+
+=item f_ext
+
+=item f_dir
+
+=item f_lock
+
+=item f_lockfile
+
+=item f_encoding
+
+=item f_schema
+
+=item col_names
+
+=item sql_identifier_case
+
+=back
+
+The following attributes are preserved in the data area only and
+cannot be set globally.
+
+=over 8
+
+=item f_file
+
+=back
+
+The following attributes are preserved in the data area only and are
+computed when initializing the data area:
+
+=over 8
+
+=item f_fqfn
+
+=item f_fqbn
+
+=item f_fqln
+
+=item table_name
+
+=back
+
+For DBD::CSV tables this means, once opened "foo.csv" as table named "foo",
+another table named "foo" accessing the file "foo.txt" cannot be opened.
+Accessing "foo" will always access the file "foo.csv" in memorized
+C<f_dir>, locking C<f_lockfile> via memorized C<f_lock>.
+
+You can use L<f_clear_meta> or the C<f_file> attribute for a specific table
+to work around this.
+
+=item *
+
+When used with SQL::Statement and temporary tables e.g.,
+
+ CREATE TEMP TABLE ...
+
+the table data processing bypasses DBD::File::Table. No file system
+calls will be made and there are no clashes with existing (file based)
+tables with the same name. Temporary tables are chosen over file
+tables, but they will not covered by C<table_info>.
+
+=back
+
+=head1 AUTHOR
+
+This module is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+The original author is Jochen Wiedmann.
+
+=head1 COPYRIGHT AND LICENSE
+
+ Copyright (C) 2009-2010 by H.Merijn Brand & Jens Rehsack
+ Copyright (C) 2004-2009 by Jeff Zucker
+ Copyright (C) 1998-2004 by Jochen Wiedmann
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=head1 SEE ALSO
+
+L<DBI|DBI>, L<DBD::DBM|DBD::DBM>, L<DBD::CSV|DBD::CSV>, L<Text::CSV|Text::CSV>,
+L<Text::CSV_XS|Text::CSV_XS>, L<SQL::Statement|SQL::Statement>, and
+L<DBI::SQL::Nano|DBI::SQL::Nano>
+
+=cut
diff --git a/lib/DBD/File/Developers.pod b/lib/DBD/File/Developers.pod
new file mode 100644
index 0000000..a9bef85
--- /dev/null
+++ b/lib/DBD/File/Developers.pod
@@ -0,0 +1,556 @@
+=head1 NAME
+
+DBD::File::Developers - Developers documentation for DBD::File
+
+=head1 SYNOPSIS
+
+ package DBD::myDriver;
+
+ use base qw(DBD::File);
+
+ sub driver
+ {
+ ...
+ my $drh = $proto->SUPER::driver($attr);
+ ...
+ return $drh->{class};
+ }
+
+ sub CLONE { ... }
+
+ package DBD::myDriver::dr;
+
+ @ISA = qw(DBD::File::dr);
+
+ sub data_sources { ... }
+ ...
+
+ package DBD::myDriver::db;
+
+ @ISA = qw(DBD::File::db);
+
+ sub init_valid_attributes { ... }
+ sub init_default_attributes { ... }
+ sub set_versions { ... }
+ sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
+ sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
+ sub get_myd_versions { ... }
+
+ package DBD::myDriver::st;
+
+ @ISA = qw(DBD::File::st);
+
+ sub FETCH { ... }
+ sub STORE { ... }
+
+ package DBD::myDriver::Statement;
+
+ @ISA = qw(DBD::File::Statement);
+
+ package DBD::myDriver::Table;
+
+ @ISA = qw(DBD::File::Table);
+
+ my %reset_on_modify = (
+ myd_abc => "myd_foo",
+ myd_mno => "myd_bar",
+ );
+ __PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+ my %compat_map = (
+ abc => 'foo_abc',
+ xyz => 'foo_xyz',
+ );
+ __PACKAGE__->register_compat_map( \%compat_map );
+
+ sub bootstrap_table_meta { ... }
+ sub init_table_meta { ... }
+ sub table_meta_attr_changed { ... }
+ sub open_file { ... }
+
+ sub fetch_row { ... }
+ sub push_row { ... }
+ sub push_names { ... }
+
+ # optimize the SQL engine by add one or more of
+ sub update_current_row { ... }
+ # or
+ sub update_specific_row { ... }
+ # or
+ sub update_one_row { ... }
+ # or
+ sub insert_new_row { ... }
+ # or
+ sub delete_current_row { ... }
+ # or
+ sub delete_one_row { ... }
+
+=head1 DESCRIPTION
+
+This document describes how DBD developers can write DBD::File based DBI
+drivers. It supplements L<DBI::DBD> and L<DBI::DBD::SqlEngine::Developers>,
+which you should read first.
+
+=head1 CLASSES
+
+Each DBI driver must provide a package global C<driver> method and three
+DBI related classes:
+
+=over 4
+
+=item DBD::File::dr
+
+Driver package, contains the methods DBI calls indirectly via DBI
+interface:
+
+ DBI->connect ('DBI:DBM:', undef, undef, {})
+
+ # invokes
+ package DBD::DBM::dr;
+ @DBD::DBM::dr::ISA = qw(DBD::File::dr);
+
+ sub connect ($$;$$$)
+ {
+ ...
+ }
+
+Similar for C<< data_sources () >> and C<< disconnect_all() >>.
+
+Pure Perl DBI drivers derived from DBD::File do not usually need to
+override any of the methods provided through the DBD::XXX::dr package
+however if you need additional initialization in the connect method
+you may need to.
+
+=item DBD::File::db
+
+Contains the methods which are called through DBI database handles
+(C<< $dbh >>). e.g.,
+
+ $sth = $dbh->prepare ("select * from foo");
+ # returns the f_encoding setting for table foo
+ $dbh->csv_get_meta ("foo", "f_encoding");
+
+DBD::File provides the typical methods required here. Developers who
+write DBI drivers based on DBD::File need to override the methods C<<
+set_versions >> and C<< init_valid_attributes >>.
+
+=item DBD::File::st
+
+Contains the methods to deal with prepared statement handles. e.g.,
+
+ $sth->execute () or die $sth->errstr;
+
+=back
+
+=head2 DBD::File
+
+This is the main package containing the routines to initialize
+DBD::File based DBI drivers. Primarily the C<< DBD::File::driver >>
+method is invoked, either directly from DBI when the driver is
+initialized or from the derived class.
+
+ package DBD::DBM;
+
+ use base qw( DBD::File );
+
+ sub driver
+ {
+ my ( $class, $attr ) = @_;
+ ...
+ my $drh = $class->SUPER::driver( $attr );
+ ...
+ return $drh;
+ }
+
+It is not necessary to implement your own driver method as long as
+additional initialization (e.g. installing more private driver
+methods) is not required. You do not need to call C<< setup_driver >>
+as DBD::File takes care of it.
+
+=head2 DBD::File::dr
+
+The driver package contains the methods DBI calls indirectly via the DBI
+interface (see L<DBI/DBI Class Methods>).
+
+DBD::File based DBI drivers usually do not need to implement anything here,
+it is enough to do the basic initialization:
+
+ package DBD:XXX::dr;
+
+ @DBD::XXX::dr::ISA = qw (DBD::File::dr);
+ $DBD::XXX::dr::imp_data_size = 0;
+ $DBD::XXX::dr::data_sources_attr = undef;
+ $DBD::XXX::ATTRIBUTION = "DBD::XXX $DBD::XXX::VERSION by Hans Mustermann";
+
+=head2 DBD::File::db
+
+This package defines the database methods, which are called via the DBI
+database handle C<< $dbh >>.
+
+Methods provided by DBD::File:
+
+=over 4
+
+=item ping
+
+Simply returns the content of the C<< Active >> attribute. Override
+when your driver needs more complicated actions here.
+
+=item prepare
+
+Prepares a new SQL statement to execute. Returns a statement handle,
+C<< $sth >> - instance of the DBD:XXX::st. It is neither required nor
+recommended to override this method.
+
+=item FETCH
+
+Fetches an attribute of a DBI database object. Private handle attributes
+must have a prefix (this is mandatory). If a requested attribute is
+detected as a private attribute without a valid prefix, the driver prefix
+(written as C<$drv_prefix>) is added.
+
+The driver prefix is extracted from the attribute name and verified against
+C<< $dbh->{ $drv_prefix . "valid_attrs" } >> (when it exists). If the
+requested attribute value is not listed as a valid attribute, this method
+croaks. If the attribute is valid and readonly (listed in C<< $dbh->{
+$drv_prefix . "readonly_attrs" } >> when it exists), a real copy of the
+attribute value is returned. So it's not possible to modify
+C<f_valid_attrs> from outside of DBD::File::db or a derived class.
+
+=item STORE
+
+Stores a database private attribute. Private handle attributes must have a
+prefix (this is mandatory). If a requested attribute is detected as a private
+attribute without a valid prefix, the driver prefix (written as
+C<$drv_prefix>) is added. If the database handle has an attribute
+C<${drv_prefix}_valid_attrs> - for attribute names which are not listed in
+that hash, this method croaks. If the database handle has an attribute
+C<${drv_prefix}_readonly_attrs>, only attributes which are not listed there
+can be stored (once they are initialized). Trying to overwrite such an
+immutable attribute forces this method to croak.
+
+An example of a valid attributes list can be found in
+C<< DBD::File::db::init_valid_attributes >>.
+
+=item set_versions
+
+This method sets the attribute C<f_version> with the version of DBD::File.
+
+This method is called at the begin of the C<connect ()> phase.
+
+When overriding this method, do not forget to invoke the superior one.
+
+=item init_valid_attributes
+
+This method is called after the database handle is instantiated as the
+first attribute initialization.
+
+C<< DBD::File::db::init_valid_attributes >> initializes the attributes
+C<f_valid_attrs> and C<f_readonly_attrs>.
+
+When overriding this method, do not forget to invoke the superior one,
+preferably before doing anything else. Compatibility table attribute
+access must be initialized here to allow DBD::File to instantiate the
+map tie:
+
+ # for DBD::CSV
+ $dbh->{csv_meta} = "csv_tables";
+ # for DBD::DBM
+ $dbh->{dbm_meta} = "dbm_tables";
+ # for DBD::AnyData
+ $dbh->{ad_meta} = "ad_tables";
+
+=item init_default_attributes
+
+This method is called after the database handle is instantiated to
+initialize the default attributes.
+
+C<< DBD::File::db::init_default_attributes >> initializes the attributes
+C<f_dir>, C<f_meta>, C<f_meta_map>, C<f_version>.
+
+When the derived implementor class provides the attribute to validate
+attributes (e.g. C<< $dbh->{dbm_valid_attrs} = {...}; >>) or the attribute
+containing the immutable attributes (e.g. C<< $dbh->{dbm_readonly_attrs}
+= {...}; >>), the attributes C<drv_valid_attrs>, C<drv_readonly_attrs>,
+C<drv_version> and C<drv_meta> are added (when available) to the list of
+valid and immutable attributes (where C<drv_> is interpreted as the driver
+prefix).
+
+If C<drv_meta> is set, an attribute with the name in C<drv_meta> is
+initialized providing restricted read/write access to the meta data of the
+tables using C<DBD::File::TieTables> in the first (table) level and
+C<DBD::File::TieMeta> for the meta attribute level. C<DBD::File::TieTables>
+uses C<DBD::DRV::Table::get_table_meta> to initialize the second level
+tied hash on FETCH/STORE. The C<DBD::File::TieMeta> class uses
+C<DBD::DRV::Table::get_table_meta_attr> to FETCH attribute values and
+C<DBD::DRV::Table::set_table_meta_attr> to STORE attribute values. This
+allows it to map meta attributes for compatibility reasons.
+
+=item get_single_table_meta
+
+=item get_file_meta
+
+Retrieve an attribute from a table's meta information. The method
+signature is C<< get_file_meta ($dbh, $table, $attr) >>. This method
+is called by the injected db handle method C<< ${drv_prefix}get_meta
+>>.
+
+While get_file_meta allows C<$table> or C<$attr> to be a list of tables or
+attributes to retrieve, get_single_table_meta allows only one table name
+and only one attribute name. A table name of C<'.'> (single dot) is
+interpreted as the default table and this will retrieve the appropriate
+attribute globally from the dbh. This has the same restrictions as
+C<< $dbh->{$attrib} >>.
+
+get_file_meta allows C<'+'> and C<'*'> as wildcards for table names and
+C<$table> being a regular expression matching against the table names
+(evaluated without the default table). The table name C<'*'> is
+I<all currently known tables, including the default one>. The table
+name C<'+'> is I<all table names which conform to
+ANSI file name restrictions> (/^[_A-Za-z0-9]+$/).
+
+The table meta information is retrieved using the get_table_meta and
+get_table_meta_attr methods of the table class of the implementation.
+
+=item set_single_table_meta
+
+=item set_file_meta
+
+Sets an attribute in a table's meta information. The method signature is
+C<< set_file_meta ($dbh, $table, $attr, $value) >>. This method is called
+by the injected db handle method C<< ${drv_prefix}set_meta >>.
+
+While set_file_meta allows C<$table> to be a list of tables and C<$attr>
+to be a hash of several attributes to set, set_single_table_meta allows
+only one table name and only one attribute name/value pair.
+
+The wildcard characters for the table name are the same as for
+get_file_meta.
+
+The table meta information is updated using the get_table_meta and
+set_table_meta_attr methods of the table class of the implementation.
+
+=item clear_file_meta
+
+Clears all meta information cached about a table. The method signature is
+C<< clear_file_meta ($dbh, $table) >>. This method is called
+by the injected db handle method C<< ${drv_prefix}clear_meta >>.
+
+=back
+
+=head2 DBD::File::st
+
+Contains the methods to deal with prepared statement handles:
+
+=over 4
+
+=item FETCH
+
+Fetches statement handle attributes. Supported attributes (for full overview
+see L<DBI/Statement Handle Attributes>) are C<NAME>, C<TYPE>, C<PRECISION>
+and C<NULLABLE> in case that SQL::Statement is used as SQL execution engine
+and a statement is successful prepared. When SQL::Statement has additional
+information about a table, those information are returned. Otherwise, the
+same defaults as in L<DBI::DBD::SqlEngine> are used.
+
+This method usually requires extending in a derived implementation.
+See L<DBD::CSV> or L<DBD::DBM> for some example.
+
+=back
+
+=head2 DBD::File::Statement
+
+Derives from DBI::SQL::Nano::Statement to provide following method:
+
+=over 4
+
+=item open_table
+
+Implements the open_table method required by L<SQL::Statement> and
+L<DBI::SQL::Nano>. All the work for opening the file(s) belonging to the
+table is handled and parameterized in DBD::File::Table. Unless you intend
+to add anything to the following implementation, an empty DBD::XXX::Statement
+package satisfies DBD::File.
+
+ sub open_table ($$$$$)
+ {
+ my ($self, $data, $table, $createMode, $lockMode) = @_;
+
+ my $class = ref $self;
+ $class =~ s/::Statement/::Table/;
+
+ my $flags = {
+ createMode => $createMode,
+ lockMode => $lockMode,
+ };
+ $self->{command} eq "DROP" and $flags->{dropMode} = 1;
+
+ return $class->new ($data, { table => $table }, $flags);
+ } # open_table
+
+=back
+
+=head2 DBD::File::Table
+
+Derives from DBI::SQL::Nano::Table and provides physical file access for
+the table data which are stored in the files.
+
+=over 4
+
+=item file2table
+
+This method tries to map a filename to the associated table
+name. It is called with a partially filled meta structure for the
+resulting table containing at least the following attributes:
+C<< f_ext >>, C<< f_dir >>, C<< f_lockfile >> and C<< sql_identifier_case >>.
+
+If a file/table map can be found then this method sets the C<< f_fqfn
+>>, C<< f_fqbn >>, C<< f_fqln >> and C<< table_name >> attributes in
+the meta structure. If a map cannot be found the table name will be
+undef.
+
+=item bootstrap_table_meta
+
+Initializes a table meta structure. Can be safely overridden in a
+derived class, as long as the C<< SUPER >> method is called at the end
+of the overridden method.
+
+It copies the following attributes from the database into the table meta data
+C<< f_dir >>, C<< f_ext >>, C<< f_encoding >>, C<< f_lock >>, C<< f_schema >>,
+C<< f_lockfile >> and C<< sql_identifier_case >> and makes them sticky to the
+table.
+
+This method should be called before you attempt to map between file
+name and table name to ensure the correct directory, extension etc. are
+used.
+
+=item init_table_meta
+
+Initializes more attributes of the table meta data - usually more
+expensive ones (e.g. those which require class instantiations) - when
+the file name and the table name could mapped.
+
+=item get_table_meta
+
+Returns the table meta data. If there are none for the required
+table, a new one is initialized. When it fails, nothing is
+returned. On success, the name of the table and the meta data
+structure is returned.
+
+=item get_table_meta_attr
+
+Returns a single attribute from the table meta data. If the attribute
+name appears in C<%compat_map>, the attribute name is updated from
+there.
+
+=item set_table_meta_attr
+
+Sets a single attribute in the table meta data. If the attribute
+name appears in C<%compat_map>, the attribute name is updated from
+there.
+
+=item table_meta_attr_changed
+
+Called when an attribute of the meta data is modified.
+
+If the modified attribute requires to reset a calculated attribute, the
+calculated attribute is reset (deleted from meta data structure) and
+the I<initialized> flag is removed, too. The decision is made based on
+C<%register_reset_on_modify>.
+
+=item register_reset_on_modify
+
+Allows C<set_table_meta_attr> to reset meta attributes when special
+attributes are modified. For DBD::File, modifying one of C<f_file>, C<f_dir>,
+C<f_ext> or C<f_lockfile> will reset C<f_fqfn>. DBD::DBM extends the
+list for C<dbm_type> and C<dbm_mldbm> to reset the value of C<dbm_tietype>.
+
+If your DBD has calculated values in the meta data area, then call
+C<register_reset_on_modify>:
+
+ my %reset_on_modify = ( "xxx_foo" => "xxx_bar" );
+ __PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+
+=item register_compat_map
+
+Allows C<get_table_meta_attr> and C<set_table_meta_attr> to update the
+attribute name to the current favored one:
+
+ # from DBD::DBM
+ my %compat_map = ( "dbm_ext" => "f_ext" );
+ __PACKAGE__->register_compat_map( \%compat_map );
+
+=item open_file
+
+Called to open the table's data file.
+
+Depending on the attributes set in the table's meta data, the
+following steps are performed. Unless C<< f_dontopen >> is set to a
+true value, C<< f_fqfn >> must contain the full qualified file name
+for the table to work on (file2table ensures this). The encoding in
+C<< f_encoding >> is applied if set and the file is opened. If
+C<<f_fqln >> (full qualified lock name) is set, this file is opened,
+too. Depending on the value in C<< f_lock >>, the appropriate lock is
+set on the opened data file or lock file.
+
+After this is done, a derived class might add more steps in an overridden
+C<< open_file >> method.
+
+=item new
+
+Instantiates the table. This is done in 3 steps:
+
+ 1. get the table meta data
+ 2. open the data file
+ 3. bless the table data structure using inherited constructor new
+
+It is not recommended to override the constructor of the table class.
+Find a reasonable place to add you extensions in one of the above four
+methods.
+
+=item drop
+
+Implements the abstract table method for the C<< DROP >>
+command. Discards table meta data after all files belonging to the
+table are closed and unlinked.
+
+Overriding this method might be reasonable in very rare cases.
+
+=item seek
+
+Implements the abstract table method used when accessing the table from the
+engine. C<< seek >> is called every time the engine uses dumb algorithms
+for iterating over the table content.
+
+=item truncate
+
+Implements the abstract table method used when dumb table algorithms
+for C<< UPDATE >> or C<< DELETE >> need to truncate the table storage
+after the last written row.
+
+=back
+
+You should consult the documentation of C<< SQL::Eval::Table >> (see
+L<SQL::Eval>) to get more information about the abstract methods of the
+table's base class you have to override and a description of the table
+meta information expected by the SQL engines.
+
+=head1 AUTHOR
+
+The module DBD::File is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+The original author is Jochen Wiedmann.
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=cut
diff --git a/lib/DBD/File/HowTo.pod b/lib/DBD/File/HowTo.pod
new file mode 100644
index 0000000..3d45257
--- /dev/null
+++ b/lib/DBD/File/HowTo.pod
@@ -0,0 +1,270 @@
+=head1 NAME
+
+DBD::File::HowTo - Guide to create DBD::File based driver
+
+=head1 SYNOPSIS
+
+ perldoc DBD::File::HowTo
+ perldoc DBI
+ perldoc DBI::DBD
+ perldoc DBD::File::Developers
+ perldoc DBI::DBD::SqlEngine::Developers
+ perldoc DBI::DBD::SqlEngine
+ perldoc SQL::Eval
+ perldoc DBI::DBD::SqlEngine::HowTo
+ perldoc SQL::Statement::Embed
+ perldoc DBD::File
+ perldoc DBD::File::HowTo
+ perldoc DBD::File::Developers
+
+=head1 DESCRIPTION
+
+This document provides a step-by-step guide, how to create a new
+C<DBD::File> based DBD. It expects that you carefully read the L<DBI>
+documentation and that you're familiar with L<DBI::DBD> and had read and
+understood L<DBD::ExampleP>.
+
+This document addresses experienced developers who are really sure that
+they need to invest time when writing a new DBI Driver. Writing a DBI
+Driver is neither a weekend project nor an easy job for hobby coders
+after work. Expect one or two man-month of time for the first start.
+
+Those who are still reading, should be able to sing the rules of
+L<DBI::DBD/CREATING A NEW DRIVER>.
+
+Of course, DBD::File is a DBI::DBD::SqlEngine and you surely read
+L<DBI::DBD::SqlEngine::HowTo> before continuing here.
+
+=head1 CREATING DRIVER CLASSES
+
+Do you have an entry in DBI's DBD registry? For this guide, a prefix of
+C<foo_> is assumed.
+
+=head2 Sample Skeleton
+
+ package DBD::Foo;
+
+ use strict;
+ use warnings;
+ use vars qw(@ISA $VERSION);
+ use base qw(DBD::File);
+
+ use DBI ();
+
+ $VERSION = "0.001";
+
+ package DBD::Foo::dr;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBD::File::dr);
+ $imp_data_size = 0;
+
+ package DBD::Foo::db;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBD::File::db);
+ $imp_data_size = 0;
+
+ package DBD::Foo::st;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBD::File::st);
+ $imp_data_size = 0;
+
+ package DBD::Foo::Statement;
+
+ use vars qw(@ISA);
+
+ @ISA = qw(DBD::File::Statement);
+
+ package DBD::Foo::Table;
+
+ use vars qw(@ISA);
+
+ @ISA = qw(DBD::File::Table);
+
+ 1;
+
+Tiny, eh? And all you have now is a DBD named foo which will is able to
+deal with temporary tables, as long as you use L<SQL::Statement>. In
+L<DBI::SQL::Nano> environments, this DBD can do nothing.
+
+=head2 Start over
+
+Based on L<DBI::DBD::SqlEngine::HowTo>, we're now having a driver which
+could do basic things. Of course, it should now derive from DBD::File
+instead of DBI::DBD::SqlEngine, shouldn't it?
+
+DBD::File extends DBI::DBD::SqlEngine to deal with any kind of files.
+In principle, the only extensions required are to the table class:
+
+ package DBD::Foo::Table;
+
+ sub bootstrap_table_meta
+ {
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ # initialize all $meta attributes which might be relevant for
+ # file2table
+
+ return $self->SUPER::bootstrap_table_meta($dbh, $meta, $table);
+ }
+
+ sub init_table_meta
+ {
+ my ( $self, $dbh, $meta, $table ) = @_;
+
+ # called after $meta contains the results from file2table
+ # initialize all missing $meta attributes
+
+ $self->SUPER::init_table_meta( $dbh, $meta, $table );
+ }
+
+In case C<DBD::File::Table::open_file> doesn't open the files as the driver
+needs that, override it!
+
+ sub open_file
+ {
+ my ( $self, $meta, $attrs, $flags ) = @_;
+ # ensure that $meta->{f_dontopen} is set
+ $self->SUPER::open_file( $meta, $attrs, $flags );
+ # now do what ever needs to be done
+ }
+
+Combined with the methods implemented using the L<SQL::Statement::Embed>
+guide, the table is full working and you could try a start over.
+
+=head2 User comfort
+
+C<DBD::File> since C<0.39> consolidates all persistent meta data of a table
+into a single structure stored in C<< $dbh->{f_meta} >>. While DBD::File
+provides only readonly access to this structure, modifications are still
+allowed.
+
+Primarily DBD::File provides access via setters C<get_file_meta>,
+C<set_file_meta> and C<clear_file_meta>. Those methods are easily
+accessible by the users via the C<< $dbh->func () >> interface provided
+by DBI. Well, many users don't feel comfortize when calling
+
+ # don't require extension for tables cars
+ $dbh->func ("cars", "f_ext", ".csv", "set_file_meta");
+
+DBD::File will inject a method into your driver to increase the user
+comfort to allow:
+
+ # don't require extension for tables cars
+ $dbh->foo_set_meta ("cars", "f_ext", ".csv");
+
+Better, but here and there users likes to do:
+
+ # don't require extension for tables cars
+ $dbh->{foo_tables}->{cars}->{f_ext} = ".csv";
+
+This interface is provided when derived DBD's define following in
+C<init_valid_attributes> (please compare carefully with the example in
+DBI::DBD::SqlEngine::HowTo):
+
+ sub init_valid_attributes
+ {
+ my $dbh = $_[0];
+
+ $dbh->SUPER::init_valid_attributes ();
+
+ $dbh->{foo_valid_attrs} = {
+ foo_version => 1, # contains version of this driver
+ foo_valid_attrs => 1, # contains the valid attributes of foo drivers
+ foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
+ foo_bar => 1, # contains the bar attribute
+ foo_baz => 1, # contains the baz attribute
+ foo_manager => 1, # contains the manager of the driver instance
+ foo_manager_type => 1, # contains the manager class of the driver instance
+ foo_meta => 1, # contains the public interface to modify table meta attributes
+ };
+ $dbh->{foo_readonly_attrs} = {
+ foo_version => 1, # ensure no-one modifies the driver version
+ foo_valid_attrs => 1, # do not permit to add more valid attributes ...
+ foo_readonly_attrs => 1, # ... or make the immutable mutable
+ foo_manager => 1, # manager is set internally only
+ foo_meta => 1, # ensure public interface to modify table meta attributes are immutable
+ };
+
+ $dbh->{foo_meta} = "foo_tables";
+
+ return $dbh;
+ }
+
+This provides a tied hash in C<< $dbh->{foo_tables} >> and a tied hash for
+each table's meta data in C<< $dbh->{foo_tables}->{$table_name} >>.
+Modifications on the table meta attributes are done using the table
+methods:
+
+ sub get_table_meta_attr { ... }
+ sub set_table_meta_attr { ... }
+
+Both methods can adjust the attribute name for compatibility reasons, e.g.
+when former versions of the DBD allowed different names to be used for the
+same flag:
+
+ my %compat_map = (
+ abc => 'foo_abc',
+ xyz => 'foo_xyz',
+ );
+ __PACKAGE__->register_compat_map( \%compat_map );
+
+If any user modification on a meta attribute needs reinitialization of
+the meta structure (in case of C<DBD::File> these are the attributes
+C<f_file>, C<f_dir>, C<f_ext> and C<f_lockfile>), inform DBD::File by
+doing
+
+ my %reset_on_modify = (
+ foo_xyz => "foo_bar",
+ foo_abc => "foo_bar",
+ );
+ __PACKAGE__->register_reset_on_modify( \%reset_on_modify );
+
+The next access to the table meta data will force DBD::File to re-do the
+entire meta initialization process.
+
+Any further action which needs to be taken can handled in
+C<table_meta_attr_changed>:
+
+ sub table_meta_attr_changed
+ {
+ my ($class, $meta, $attrib, $value) = @_;
+ ...
+ $class->SUPER::table_meta_attr_changed ($meta, $attrib, $value);
+ }
+
+This is done before the new value is set in C<$meta>, so the attribute
+changed handler can act depending on the old value.
+
+=head2 Testing
+
+Now you should have your own DBD::File based driver. Was easy, wasn't it?
+But does it work well? Prove it by writing tests and remember to use
+dbd_edit_mm_attribs from L<DBI::DBD> to ensure testing even rare cases.
+
+=head1 AUTHOR
+
+This guide is written by Jens Rehsack. DBD::File is written by Jochen
+Wiedmann and Jeff Zucker.
+
+The module DBD::File is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=cut
diff --git a/lib/DBD/File/Roadmap.pod b/lib/DBD/File/Roadmap.pod
new file mode 100644
index 0000000..804d759
--- /dev/null
+++ b/lib/DBD/File/Roadmap.pod
@@ -0,0 +1,176 @@
+=head1 NAME
+
+DBD::File::Roadmap - Planned Enhancements for DBD::File and pure Perl DBD's
+
+Jens Rehsack - May 2010
+
+=head1 SYNOPSIS
+
+This document gives a high level overview of the future of the DBD::File DBI
+driver and groundwork for pure Perl DBI drivers.
+
+The planned enhancements cover features, testing, performance, reliability,
+extensibility and more.
+
+=head1 CHANGES AND ENHANCEMENTS
+
+=head2 Features
+
+There are some features missing we would like to add, but there is
+no time plan:
+
+=over 4
+
+=item LOCK TABLE
+
+The newly implemented internal common table meta storage area would allow
+us to implement LOCK TABLE support based on file system C<flock ()>
+support.
+
+=item Transaction support
+
+While DBD::AnyData recommends explicitly committing by importing and
+exporting tables, DBD::File might be enhanced in a future version to allow
+transparent transactions using the temporary tables of SQL::Statement as
+shadow (dirty) tables.
+
+Transaction support will heavily rely on lock table support.
+
+=item Data Dictionary Persistence
+
+SQL::Statement provides dictionary information when a "CREATE TABLE ..."
+statement is executed. This dictionary is preserved for some statement
+handle attribute fetches (as C<NULLABLE> or C<PRECISION>).
+
+It is planned to extend DBD::File to support data dictionaries to work
+on the tables in it. It is not planned to support one table in different
+dictionaries, but you can have several dictionaries in one directory.
+
+=item SQL Engine selecting on connect
+
+Currently the SQL engine selected is chosen during the loading of the module
+L<DBI::SQL::Nano>. Ideally end users should be able to select the engine
+used in C<< DBI->connect () >> with a special DBD::File attribute.
+
+=back
+
+Other points of view to the planned features (and more features for the
+SQL::Statement engine) are shown in L<SQL::Statement::Roadmap>.
+
+=head2 Testing
+
+DBD::File and the dependent DBD::DBM requires a lot more automated tests
+covering API stability and compatibility with optional modules
+like SQL::Statement.
+
+=head2 Performance
+
+Several arguments for support of features like indexes on columns
+and cursors are made for DBD::CSV (which is a DBD::File based driver,
+too). Similar arguments could be made for DBD::DBM, DBD::AnyData,
+DBD::RAM or DBD::PO etc.
+
+To improve the performance of the underlying SQL engines, a clean
+reimplementation seems to be required. Currently both engines are
+prematurely optimized and therefore it is not trivial to provide
+further optimization without the risk of breaking existing features.
+
+Join the DBI developers IRC channel at L<irc://irc.perl.org/dbi> to
+participate or post to the DBI Developers Mailing List.
+
+=head2 Reliability
+
+DBD::File currently lacks the following points:
+
+=over 4
+
+=item duplicate table names
+
+It is currently possible to access a table quoted with a relative path
+(a) and additionally using an absolute path (b). If (a) and (b) are
+the same file that is not recognized (except for
+flock protection handled by the Operating System) and two independent
+tables are handled.
+
+=item invalid table names
+
+The current implementation does not prevent someone choosing a
+directory name as a physical file name for the table to open.
+
+=back
+
+=head2 Extensibility
+
+I (Jens Rehsack) have some (partially for example only) DBD's in mind:
+
+=over 4
+
+=item DBD::Sys
+
+Derive DBD::Sys from a common code base shared with DBD::File which handles
+all the emulation DBI needs (as getinfo, SQL engine handling, ...)
+
+=item DBD::Dir
+
+Provide a DBD::File derived to work with fixed table definitions through the
+file system to demonstrate how DBI / Pure Perl DBDs could handle databases
+with hierarchical structures.
+
+=item DBD::Join
+
+Provide a DBI driver which is able to manage multiple connections to other
+Databases (as DBD::Multiplex), but allow them to point to different data
+sources and allow joins between the tables of them:
+
+ # Example
+ # Let table 'lsof' being a table in DBD::Sys giving a list of open files using lsof utility
+ # Let table 'dir' being a atable from DBD::Dir
+ $sth = $dbh->prepare( "select * from dir,lsof where path='/documents' and dir.entry = lsof.filename" )
+ $sth->execute(); # gives all open files in '/documents'
+ ...
+
+ # Let table 'filesys' a DBD::Sys table of known file systems on current host
+ # Let table 'applications' a table of your Configuration Management Database
+ # where current applications (relocatable, with mountpoints for filesystems)
+ # are stored
+ $sth = dbh->prepare( "select * from applications,filesys where " .
+ "application.mountpoint = filesys.mountpoint and ".
+ "filesys.mounted is true" );
+ $sth->execute(); # gives all currently mounted applications on this host
+
+=back
+
+=head1 PRIORITIES
+
+Our priorities are focussed on current issues. Initially many new test
+cases for DBD::File and DBD::DBM should be added to the DBI test
+suite. After that some additional documentation on how to use the
+DBD::File API will be provided.
+
+Any additional priorities will come later and can be modified by (paying)
+users.
+
+=head1 RESOURCES AND CONTRIBUTIONS
+
+See L<http://dbi.perl.org/contributing> for I<how you can help>.
+
+If your company has benefited from DBI, please consider if
+it could make a donation to The Perl Foundation "DBI Development"
+fund at L<http://dbi.perl.org/donate> to secure future development.
+
+Alternatively, if your company would benefit from a specific new
+DBI feature, please consider sponsoring it's development through
+the options listed in the section "Commercial Support from the Author"
+on L<http://dbi.perl.org/support/>.
+
+Using such targeted financing allows you to contribute to DBI
+development and rapidly get something specific and directly valuable
+to you in return.
+
+My company also offers annual support contracts for the DBI, which
+provide another way to support the DBI and get something specific
+in return. Contact me for details.
+
+Thank you.
+
+=cut
diff --git a/lib/DBD/Gofer.pm b/lib/DBD/Gofer.pm
new file mode 100644
index 0000000..afd8201
--- /dev/null
+++ b/lib/DBD/Gofer.pm
@@ -0,0 +1,1292 @@
+{
+ package DBD::Gofer;
+
+ use strict;
+
+ require DBI;
+ require DBI::Gofer::Request;
+ require DBI::Gofer::Response;
+ require Carp;
+
+ our $VERSION = sprintf("0.%06d", q$Revision: 15326 $ =~ /(\d+)/o);
+
+# $Id: Gofer.pm 15326 2012-06-06 16:32:38Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+
+
+ # attributes we'll allow local STORE
+ our %xxh_local_store_attrib = map { $_=>1 } qw(
+ Active
+ CachedKids
+ Callbacks
+ DbTypeSubclass
+ ErrCount Executed
+ FetchHashKeyName
+ HandleError HandleSetErr
+ InactiveDestroy
+ AutoInactiveDestroy
+ PrintError PrintWarn
+ Profile
+ RaiseError
+ RootClass
+ ShowErrorStatement
+ Taint TaintIn TaintOut
+ TraceLevel
+ Warn
+ dbi_quote_identifier_cache
+ dbi_connect_closure
+ dbi_go_execute_unique
+ );
+ our %xxh_local_store_attrib_if_same_value = map { $_=>1 } qw(
+ Username
+ dbi_connect_method
+ );
+
+ our $drh = undef; # holds driver handle once initialized
+ our $methods_already_installed;
+
+ sub driver{
+ return $drh if $drh;
+
+ DBI->setup_driver('DBD::Gofer');
+
+ unless ($methods_already_installed++) {
+ my $opts = { O=> 0x0004 }; # IMA_KEEP_ERR
+ DBD::Gofer::db->install_method('go_dbh_method', $opts);
+ DBD::Gofer::st->install_method('go_sth_method', $opts);
+ DBD::Gofer::st->install_method('go_clone_sth', $opts);
+ DBD::Gofer::db->install_method('go_cache', $opts);
+ DBD::Gofer::st->install_method('go_cache', $opts);
+ }
+
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'Gofer',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD Gofer by Tim Bunce',
+ });
+
+ $drh;
+ }
+
+
+ sub CLONE {
+ undef $drh;
+ }
+
+
+ sub go_cache {
+ my $h = shift;
+ $h->{go_cache} = shift if @_;
+ # return handle's override go_cache, if it has one
+ return $h->{go_cache} if defined $h->{go_cache};
+ # or else the transports default go_cache
+ return $h->{go_transport}->{go_cache};
+ }
+
+
+ sub set_err_from_response { # set error/warn/info and propagate warnings
+ my $h = shift;
+ my $response = shift;
+ if (my $warnings = $response->warnings) {
+ warn $_ for @$warnings;
+ }
+ my ($err, $errstr, $state) = $response->err_errstr_state;
+ # Only set_err() if there's an error else leave the current values
+ # (The current values will normally be set undef by the DBI dispatcher
+ # except for methods marked KEEPERR such as ping.)
+ $h->set_err($err, $errstr, $state) if defined $err;
+ return undef;
+ }
+
+
+ sub install_methods_proxy {
+ my ($installed_methods) = @_;
+ while ( my ($full_method, $attr) = each %$installed_methods ) {
+ # need to install both a DBI dispatch stub and a proxy stub
+ # (the dispatch stub may be already here due to local driver use)
+
+ DBI->_install_method($full_method, "", $attr||{})
+ unless defined &{$full_method};
+
+ # now install proxy stubs on the driver side
+ $full_method =~ m/^DBI::(\w\w)::(\w+)$/
+ or die "Invalid method name '$full_method' for install_method";
+ my ($type, $method) = ($1, $2);
+ my $driver_method = "DBD::Gofer::${type}::${method}";
+ next if defined &{$driver_method};
+ my $sub;
+ if ($type eq 'db') {
+ $sub = sub { return shift->go_dbh_method(undef, $method, @_) };
+ }
+ else {
+ $sub = sub { shift->set_err($DBI::stderr, "Can't call \$${type}h->$method when using DBD::Gofer"); return; };
+ }
+ no strict 'refs';
+ *$driver_method = $sub;
+ }
+ }
+}
+
+
+{ package DBD::Gofer::dr; # ====== DRIVER ======
+
+ $imp_data_size = 0;
+ use strict;
+
+ sub connect_cached {
+ my ($drh, $dsn, $user, $auth, $attr)= @_;
+ $attr ||= {};
+ return $drh->SUPER::connect_cached($dsn, $user, $auth, {
+ (%$attr),
+ go_connect_method => $attr->{go_connect_method} || 'connect_cached',
+ });
+ }
+
+
+ sub connect {
+ my($drh, $dsn, $user, $auth, $attr)= @_;
+ my $orig_dsn = $dsn;
+
+ # first remove dsn= and everything after it
+ my $remote_dsn = ($dsn =~ s/;?\bdsn=(.*)$// && $1)
+ or return $drh->set_err($DBI::stderr, "No dsn= argument in '$orig_dsn'");
+
+ if ($attr->{go_bypass}) { # don't use DBD::Gofer for this connection
+ # useful for testing with DBI_AUTOPROXY, e.g., t/03handle.t
+ return DBI->connect($remote_dsn, $user, $auth, $attr);
+ }
+
+ my %go_attr;
+ # extract any go_ attributes from the connect() attr arg
+ for my $k (grep { /^go_/ } keys %$attr) {
+ $go_attr{$k} = delete $attr->{$k};
+ }
+ # then override those with any attributes embedded in our dsn (not remote_dsn)
+ for my $kv (grep /=/, split /;/, $dsn, -1) {
+ my ($k, $v) = split /=/, $kv, 2;
+ $go_attr{ "go_$k" } = $v;
+ }
+
+ if (not ref $go_attr{go_policy}) { # if not a policy object already
+ my $policy_class = $go_attr{go_policy} || 'classic';
+ $policy_class = "DBD::Gofer::Policy::$policy_class"
+ unless $policy_class =~ /::/;
+ _load_class($policy_class)
+ or return $drh->set_err($DBI::stderr, "Can't load $policy_class: $@");
+ # replace policy name in %go_attr with policy object
+ $go_attr{go_policy} = eval { $policy_class->new(\%go_attr) }
+ or return $drh->set_err($DBI::stderr, "Can't instanciate $policy_class: $@");
+ }
+ # policy object is left in $go_attr{go_policy} so transport can see it
+ my $go_policy = $go_attr{go_policy};
+
+ if ($go_attr{go_cache} and not ref $go_attr{go_cache}) { # if not a cache object already
+ my $cache_class = $go_attr{go_cache};
+ $cache_class = "DBI::Util::CacheMemory" if $cache_class eq '1';
+ _load_class($cache_class)
+ or return $drh->set_err($DBI::stderr, "Can't load $cache_class $@");
+ $go_attr{go_cache} = eval { $cache_class->new() }
+ or $drh->set_err(0, "Can't instanciate $cache_class: $@"); # warning
+ }
+
+ # delete any other attributes that don't apply to transport
+ my $go_connect_method = delete $go_attr{go_connect_method};
+
+ my $transport_class = delete $go_attr{go_transport}
+ or return $drh->set_err($DBI::stderr, "No transport= argument in '$orig_dsn'");
+ $transport_class = "DBD::Gofer::Transport::$transport_class"
+ unless $transport_class =~ /::/;
+ _load_class($transport_class)
+ or return $drh->set_err($DBI::stderr, "Can't load $transport_class: $@");
+ my $go_transport = eval { $transport_class->new(\%go_attr) }
+ or return $drh->set_err($DBI::stderr, "Can't instanciate $transport_class: $@");
+
+ my $request_class = "DBI::Gofer::Request";
+ my $go_request = eval {
+ my $go_attr = { %$attr };
+ # XXX user/pass of fwd server vs db server ? also impact of autoproxy
+ if ($user) {
+ $go_attr->{Username} = $user;
+ $go_attr->{Password} = $auth;
+ }
+ # delete any attributes we can't serialize (or don't want to)
+ delete @{$go_attr}{qw(Profile HandleError HandleSetErr Callbacks)};
+ # delete any attributes that should only apply to the client-side
+ delete @{$go_attr}{qw(RootClass DbTypeSubclass)};
+
+ $go_connect_method ||= $go_policy->connect_method($remote_dsn, $go_attr) || 'connect';
+ $request_class->new({
+ dbh_connect_call => [ $go_connect_method, $remote_dsn, $user, $auth, $go_attr ],
+ })
+ } or return $drh->set_err($DBI::stderr, "Can't instanciate $request_class: $@");
+
+ my ($dbh, $dbh_inner) = DBI::_new_dbh($drh, {
+ 'Name' => $dsn,
+ 'USER' => $user,
+ go_transport => $go_transport,
+ go_request => $go_request,
+ go_policy => $go_policy,
+ });
+
+ # mark as inactive temporarily for STORE. Active not set until connected() called.
+ $dbh->STORE(Active => 0);
+
+ # should we ping to check the connection
+ # and fetch dbh attributes
+ my $skip_connect_check = $go_policy->skip_connect_check($attr, $dbh);
+ if (not $skip_connect_check) {
+ if (not $dbh->go_dbh_method(undef, 'ping')) {
+ return undef if $dbh->err; # error already recorded, typically
+ return $dbh->set_err($DBI::stderr, "ping failed");
+ }
+ }
+
+ return $dbh;
+ }
+
+ sub _load_class { # return true or false+$@
+ my $class = shift;
+ (my $pm = $class) =~ s{::}{/}g;
+ $pm .= ".pm";
+ return 1 if eval { require $pm };
+ delete $INC{$pm}; # shouldn't be needed (perl bug?) and assigning undef isn't enough
+ undef; # error in $@
+ }
+
+}
+
+
+{ package DBD::Gofer::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+ use Carp qw(carp croak);
+
+ my %dbh_local_store_attrib = %DBD::Gofer::xxh_local_store_attrib;
+
+ sub connected {
+ shift->STORE(Active => 1);
+ }
+
+ sub go_dbh_method {
+ my $dbh = shift;
+ my $meta = shift;
+ # @_ now contains ($method_name, @args)
+
+ my $request = $dbh->{go_request};
+ $request->init_request([ wantarray, @_ ], $dbh);
+ ++$dbh->{go_request_count};
+
+ my $go_policy = $dbh->{go_policy};
+ my $dbh_attribute_update = $go_policy->dbh_attribute_update();
+ $request->dbh_attributes( $go_policy->dbh_attribute_list() )
+ if $dbh_attribute_update eq 'every'
+ or $dbh->{go_request_count}==1;
+
+ $request->dbh_last_insert_id_args($meta->{go_last_insert_id_args})
+ if $meta->{go_last_insert_id_args};
+
+ my $transport = $dbh->{go_transport}
+ or return $dbh->set_err($DBI::stderr, "Not connected (no transport)");
+
+ local $transport->{go_cache} = $dbh->{go_cache}
+ if defined $dbh->{go_cache};
+
+ my ($response, $retransmit_sub) = $transport->transmit_request($request);
+ $response ||= $transport->receive_response($request, $retransmit_sub);
+ $dbh->{go_response} = $response
+ or die "No response object returned by $transport";
+
+ die "response '$response' returned by $transport is not a response object"
+ unless UNIVERSAL::isa($response,"DBI::Gofer::Response");
+
+ if (my $dbh_attributes = $response->dbh_attributes) {
+
+ # XXX installed_methods piggybacks on dbh_attributes for now
+ if (my $installed_methods = delete $dbh_attributes->{dbi_installed_methods}) {
+ DBD::Gofer::install_methods_proxy($installed_methods)
+ if $dbh->{go_request_count}==1;
+ }
+
+ # XXX we don't STORE here, we just stuff the value into the attribute cache
+ $dbh->{$_} = $dbh_attributes->{$_}
+ for keys %$dbh_attributes;
+ }
+
+ my $rv = $response->rv;
+ if (my $resultset_list = $response->sth_resultsets) {
+ # dbh method call returned one or more resultsets
+ # (was probably a metadata method like table_info)
+ #
+ # setup an sth but don't execute/forward it
+ my $sth = $dbh->prepare(undef, { go_skip_prepare_check => 1 });
+ # set the sth response to our dbh response
+ (tied %$sth)->{go_response} = $response;
+ # setup the sth with the results in our response
+ $sth->more_results;
+ # and return that new sth as if it came from original request
+ $rv = [ $sth ];
+ }
+ elsif (!$rv) { # should only occur for major transport-level error
+ #carp("no rv in response { @{[ %$response ]} }");
+ $rv = [ ];
+ }
+
+ DBD::Gofer::set_err_from_response($dbh, $response);
+
+ return (wantarray) ? @$rv : $rv->[0];
+ }
+
+
+ # Methods that should be forwarded but can be cached
+ for my $method (qw(
+ tables table_info column_info primary_key_info foreign_key_info statistics_info
+ data_sources type_info_all get_info
+ parse_trace_flags parse_trace_flag
+ func
+ )) {
+ my $policy_name = "cache_$method";
+ my $super_name = "SUPER::$method";
+ my $sub = sub {
+ my $dbh = shift;
+ my $rv;
+
+ # if we know the remote side doesn't override the DBI's default method
+ # then we might as well just call the DBI's default method on the client
+ # (which may, in turn, call other methods that are forwarded, like get_info)
+ if ($dbh->{dbi_default_methods}{$method} && $dbh->{go_policy}->skip_default_methods()) {
+ $dbh->trace_msg(" !! $method: using local default as remote method is also default\n");
+ return $dbh->$super_name(@_);
+ }
+
+ my $cache;
+ my $cache_key;
+ if (my $cache_it = $dbh->{go_policy}->$policy_name(undef, $dbh, @_)) {
+ $cache = $dbh->{go_meta_cache} ||= {}; # keep separate from go_cache
+ $cache_key = sprintf "%s_wa%d(%s)", $policy_name, wantarray||0,
+ join(",\t", map { # XXX basic but sufficient for now
+ !ref($_) ? DBI::neat($_,1e6)
+ : ref($_) eq 'ARRAY' ? DBI::neat_list($_,1e6,",\001")
+ : ref($_) eq 'HASH' ? do { my @k = sort keys %$_; DBI::neat_list([@k,@{$_}{@k}],1e6,",\002") }
+ : do { warn "unhandled argument type ($_)"; $_ }
+ } @_);
+ if ($rv = $cache->{$cache_key}) {
+ $dbh->trace_msg("$method(@_) returning previously cached value ($cache_key)\n",4);
+ my @cache_rv = @$rv;
+ # if it's an sth we have to clone it
+ $cache_rv[0] = $cache_rv[0]->go_clone_sth if UNIVERSAL::isa($cache_rv[0],'DBI::st');
+ return (wantarray) ? @cache_rv : $cache_rv[0];
+ }
+ }
+
+ $rv = [ (wantarray)
+ ? ($dbh->go_dbh_method(undef, $method, @_))
+ : scalar $dbh->go_dbh_method(undef, $method, @_)
+ ];
+
+ if ($cache) {
+ $dbh->trace_msg("$method(@_) caching return value ($cache_key)\n",4);
+ my @cache_rv = @$rv;
+ # if it's an sth we have to clone it
+ #$cache_rv[0] = $cache_rv[0]->go_clone_sth
+ # if UNIVERSAL::isa($cache_rv[0],'DBI::st');
+ $cache->{$cache_key} = \@cache_rv
+ unless UNIVERSAL::isa($cache_rv[0],'DBI::st'); # XXX cloning sth not yet done
+ }
+
+ return (wantarray) ? @$rv : $rv->[0];
+ };
+ no strict 'refs';
+ *$method = $sub;
+ }
+
+
+ # Methods that can use the DBI defaults for some situations/drivers
+ for my $method (qw(
+ quote quote_identifier
+ )) { # XXX keep DBD::Gofer::Policy::Base in sync
+ my $policy_name = "locally_$method";
+ my $super_name = "SUPER::$method";
+ my $sub = sub {
+ my $dbh = shift;
+
+ # if we know the remote side doesn't override the DBI's default method
+ # then we might as well just call the DBI's default method on the client
+ # (which may, in turn, call other methods that are forwarded, like get_info)
+ if ($dbh->{dbi_default_methods}{$method} && $dbh->{go_policy}->skip_default_methods()) {
+ $dbh->trace_msg(" !! $method: using local default as remote method is also default\n");
+ return $dbh->$super_name(@_);
+ }
+
+ # false: use remote gofer
+ # 1: use local DBI default method
+ # code ref: use the code ref
+ my $locally = $dbh->{go_policy}->$policy_name($dbh, @_);
+ if ($locally) {
+ return $locally->($dbh, @_) if ref $locally eq 'CODE';
+ return $dbh->$super_name(@_);
+ }
+ return $dbh->go_dbh_method(undef, $method, @_); # propagate context
+ };
+ no strict 'refs';
+ *$method = $sub;
+ }
+
+
+ # Methods that should always fail
+ for my $method (qw(
+ begin_work commit rollback
+ )) {
+ no strict 'refs';
+ *$method = sub { return shift->set_err($DBI::stderr, "$method not available with DBD::Gofer") }
+ }
+
+
+ sub do {
+ my ($dbh, $sql, $attr, @args) = @_;
+ delete $dbh->{Statement}; # avoid "Modification of non-creatable hash value attempted"
+ $dbh->{Statement} = $sql; # for profiling and ShowErrorStatement
+ my $meta = { go_last_insert_id_args => $attr->{go_last_insert_id_args} };
+ return $dbh->go_dbh_method($meta, 'do', $sql, $attr, @args);
+ }
+
+ sub ping {
+ my $dbh = shift;
+ return $dbh->set_err(0, "can't ping while not connected") # warning
+ unless $dbh->SUPER::FETCH('Active');
+ my $skip_ping = $dbh->{go_policy}->skip_ping();
+ return ($skip_ping) ? 1 : $dbh->go_dbh_method(undef, 'ping', @_);
+ }
+
+ sub last_insert_id {
+ my $dbh = shift;
+ my $response = $dbh->{go_response} or return undef;
+ return $response->last_insert_id;
+ }
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+
+ # FETCH is effectively already cached because the DBI checks the
+ # attribute cache in the handle before calling FETCH
+ # and this FETCH copies the value into the attribute cache
+
+ # forward driver-private attributes (except ours)
+ if ($attrib =~ m/^[a-z]/ && $attrib !~ /^go_/) {
+ my $value = $dbh->go_dbh_method(undef, 'FETCH', $attrib);
+ $dbh->{$attrib} = $value; # XXX forces caching by DBI
+ return $dbh->{$attrib} = $value;
+ }
+
+ # else pass up to DBI to handle
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ if ($attrib eq 'AutoCommit') {
+ croak "Can't enable transactions when using DBD::Gofer" if !$value;
+ return $dbh->SUPER::STORE($attrib => ($value) ? -901 : -900);
+ }
+ return $dbh->SUPER::STORE($attrib => $value)
+ # we handle this attribute locally
+ if $dbh_local_store_attrib{$attrib}
+ # or it's a private_ (application) attribute
+ or $attrib =~ /^private_/
+ # or not yet connected (ie being called by DBI->connect)
+ or not $dbh->FETCH('Active');
+
+ return $dbh->SUPER::STORE($attrib => $value)
+ if $DBD::Gofer::xxh_local_store_attrib_if_same_value{$attrib}
+ && do { # values are the same
+ my $crnt = $dbh->FETCH($attrib);
+ local $^W;
+ (defined($value) ^ defined($crnt))
+ ? 0 # definedness differs
+ : $value eq $crnt;
+ };
+
+ # dbh attributes are set at connect-time - see connect()
+ carp("Can't alter \$dbh->{$attrib} after handle created with DBD::Gofer") if $dbh->FETCH('Warn');
+ return $dbh->set_err($DBI::stderr, "Can't alter \$dbh->{$attrib} after handle created with DBD::Gofer");
+ }
+
+ sub disconnect {
+ my $dbh = shift;
+ $dbh->{go_transport} = undef;
+ $dbh->STORE(Active => 0);
+ }
+
+ sub prepare {
+ my ($dbh, $statement, $attr)= @_;
+
+ return $dbh->set_err($DBI::stderr, "Can't prepare when disconnected")
+ unless $dbh->FETCH('Active');
+
+ $attr = { %$attr } if $attr; # copy so we can edit
+
+ my $policy = delete($attr->{go_policy}) || $dbh->{go_policy};
+ my $lii_args = delete $attr->{go_last_insert_id_args};
+ my $go_prepare = delete($attr->{go_prepare_method})
+ || $dbh->{go_prepare_method}
+ || $policy->prepare_method($dbh, $statement, $attr)
+ || 'prepare'; # e.g. for code not using placeholders
+ my $go_cache = delete $attr->{go_cache};
+ # set to undef if there are no attributes left for the actual prepare call
+ $attr = undef if $attr and not %$attr;
+
+ my ($sth, $sth_inner) = DBI::_new_sth($dbh, {
+ Statement => $statement,
+ go_prepare_call => [ 0, $go_prepare, $statement, $attr ],
+ # go_method_calls => [], # autovivs if needed
+ go_request => $dbh->{go_request},
+ go_transport => $dbh->{go_transport},
+ go_policy => $policy,
+ go_last_insert_id_args => $lii_args,
+ go_cache => $go_cache,
+ });
+ $sth->STORE(Active => 0);
+
+ my $skip_prepare_check = $policy->skip_prepare_check($attr, $dbh, $statement, $attr, $sth);
+ if (not $skip_prepare_check) {
+ $sth->go_sth_method() or return undef;
+ }
+
+ return $sth;
+ }
+
+ sub prepare_cached {
+ my ($dbh, $sql, $attr, $if_active)= @_;
+ $attr ||= {};
+ return $dbh->SUPER::prepare_cached($sql, {
+ %$attr,
+ go_prepare_method => $attr->{go_prepare_method} || 'prepare_cached',
+ }, $if_active);
+ }
+
+ *go_cache = \&DBD::Gofer::go_cache;
+}
+
+
+{ package DBD::Gofer::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict;
+
+ my %sth_local_store_attrib = (%DBD::Gofer::xxh_local_store_attrib, NUM_OF_FIELDS => 1);
+
+ sub go_sth_method {
+ my ($sth, $meta) = @_;
+
+ if (my $ParamValues = $sth->{ParamValues}) {
+ my $ParamAttr = $sth->{ParamAttr};
+ # XXX the sort here is a hack to work around a DBD::Sybase bug
+ # but only works properly for params 1..9
+ # (reverse because of the unshift)
+ my @params = reverse sort keys %$ParamValues;
+ if (@params > 9 && ($sth->{Database}{go_dsn}||'') =~ /dbi:Sybase/) {
+ # if more than 9 then we need to do a proper numeric sort
+ # also warn to alert user of this issue
+ warn "Sybase param binding order hack in use";
+ @params = sort { $b <=> $a } @params;
+ }
+ for my $p (@params) {
+ # unshift to put binds before execute call
+ unshift @{ $sth->{go_method_calls} },
+ [ 'bind_param', $p, $ParamValues->{$p}, $ParamAttr->{$p} ];
+ }
+ }
+
+ my $dbh = $sth->{Database} or die "panic";
+ ++$dbh->{go_request_count};
+
+ my $request = $sth->{go_request};
+ $request->init_request($sth->{go_prepare_call}, $sth);
+ $request->sth_method_calls(delete $sth->{go_method_calls})
+ if $sth->{go_method_calls};
+ $request->sth_result_attr({}); # (currently) also indicates this is an sth request
+
+ $request->dbh_last_insert_id_args($meta->{go_last_insert_id_args})
+ if $meta->{go_last_insert_id_args};
+
+ my $go_policy = $sth->{go_policy};
+ my $dbh_attribute_update = $go_policy->dbh_attribute_update();
+ $request->dbh_attributes( $go_policy->dbh_attribute_list() )
+ if $dbh_attribute_update eq 'every'
+ or $dbh->{go_request_count}==1;
+
+ my $transport = $sth->{go_transport}
+ or return $sth->set_err($DBI::stderr, "Not connected (no transport)");
+
+ local $transport->{go_cache} = $sth->{go_cache}
+ if defined $sth->{go_cache};
+
+ my ($response, $retransmit_sub) = $transport->transmit_request($request);
+ $response ||= $transport->receive_response($request, $retransmit_sub);
+ $sth->{go_response} = $response
+ or die "No response object returned by $transport";
+ $dbh->{go_response} = $response; # mainly for last_insert_id
+
+ if (my $dbh_attributes = $response->dbh_attributes) {
+ # XXX we don't STORE here, we just stuff the value into the attribute cache
+ $dbh->{$_} = $dbh_attributes->{$_}
+ for keys %$dbh_attributes;
+ # record the values returned, so we know that we have fetched
+ # values are which we have fetched (see dbh->FETCH method)
+ $dbh->{go_dbh_attributes_fetched} = $dbh_attributes;
+ }
+
+ my $rv = $response->rv; # may be undef on error
+ if ($response->sth_resultsets) {
+ # setup first resultset - including sth attributes
+ $sth->more_results;
+ }
+ else {
+ $sth->STORE(Active => 0);
+ $sth->{go_rows} = $rv;
+ }
+ # set error/warn/info (after more_results as that'll clear err)
+ DBD::Gofer::set_err_from_response($sth, $response);
+
+ return $rv;
+ }
+
+
+ sub bind_param {
+ my ($sth, $param, $value, $attr) = @_;
+ $sth->{ParamValues}{$param} = $value;
+ $sth->{ParamAttr}{$param} = $attr
+ if defined $attr; # attr is sticky if not explicitly set
+ return 1;
+ }
+
+
+ sub execute {
+ my $sth = shift;
+ $sth->bind_param($_, $_[$_-1]) for (1..@_);
+ push @{ $sth->{go_method_calls} }, [ 'execute' ];
+ my $meta = { go_last_insert_id_args => $sth->{go_last_insert_id_args} };
+ return $sth->go_sth_method($meta);
+ }
+
+
+ sub more_results {
+ my $sth = shift;
+
+ $sth->finish;
+
+ my $response = $sth->{go_response} or do {
+ # e.g., we haven't sent a request yet (ie prepare then more_results)
+ $sth->trace_msg(" No response object present", 3);
+ return;
+ };
+
+ my $resultset_list = $response->sth_resultsets
+ or return $sth->set_err($DBI::stderr, "No sth_resultsets");
+
+ my $meta = shift @$resultset_list
+ or return undef; # no more result sets
+ #warn "more_results: ".Data::Dumper::Dumper($meta);
+
+ # pull out the special non-atributes first
+ my ($rowset, $err, $errstr, $state)
+ = delete @{$meta}{qw(rowset err errstr state)};
+
+ # copy meta attributes into attribute cache
+ my $NUM_OF_FIELDS = delete $meta->{NUM_OF_FIELDS};
+ $sth->STORE('NUM_OF_FIELDS', $NUM_OF_FIELDS);
+ # XXX need to use STORE for some?
+ $sth->{$_} = $meta->{$_} for keys %$meta;
+
+ if (($NUM_OF_FIELDS||0) > 0) {
+ $sth->{go_rows} = ($rowset) ? @$rowset : -1;
+ $sth->{go_current_rowset} = $rowset;
+ $sth->{go_current_rowset_err} = [ $err, $errstr, $state ]
+ if defined $err;
+ $sth->STORE(Active => 1) if $rowset;
+ }
+
+ return $sth;
+ }
+
+
+ sub go_clone_sth {
+ my ($sth1) = @_;
+ # clone an (un-fetched-from) sth - effectively undoes the initial more_results
+ # not 100% so just for use in caching returned sth e.g. table_info
+ my $sth2 = $sth1->{Database}->prepare($sth1->{Statement}, { go_skip_prepare_check => 1 });
+ $sth2->STORE($_, $sth1->{$_}) for qw(NUM_OF_FIELDS Active);
+ my $sth2_inner = tied %$sth2;
+ $sth2_inner->{$_} = $sth1->{$_} for qw(NUM_OF_PARAMS FetchHashKeyName);
+ die "not fully implemented yet";
+ return $sth2;
+ }
+
+
+ sub fetchrow_arrayref {
+ my ($sth) = @_;
+ my $resultset = $sth->{go_current_rowset} || do {
+ # should only happen if fetch called after execute failed
+ my $rowset_err = $sth->{go_current_rowset_err}
+ || [ 1, 'no result set (did execute fail)' ];
+ return $sth->set_err( @$rowset_err );
+ };
+ return $sth->_set_fbav(shift @$resultset) if @$resultset;
+ $sth->finish; # no more data so finish
+ return undef;
+ }
+ *fetch = \&fetchrow_arrayref; # alias
+
+
+ sub fetchall_arrayref {
+ my ($sth, $slice, $max_rows) = @_;
+ my $resultset = $sth->{go_current_rowset} || do {
+ # should only happen if fetch called after execute failed
+ my $rowset_err = $sth->{go_current_rowset_err}
+ || [ 1, 'no result set (did execute fail)' ];
+ return $sth->set_err( @$rowset_err );
+ };
+ my $mode = ref($slice) || 'ARRAY';
+ return $sth->SUPER::fetchall_arrayref($slice, $max_rows)
+ if ref($slice) or defined $max_rows;
+ $sth->finish; # no more data after this so finish
+ return $resultset;
+ }
+
+
+ sub rows {
+ return shift->{go_rows};
+ }
+
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+
+ return $sth->SUPER::STORE($attrib => $value)
+ if $sth_local_store_attrib{$attrib} # handle locally
+ # or it's a private_ (application) attribute
+ or $attrib =~ /^private_/;
+
+ # otherwise warn but do it anyway
+ # this will probably need refining later
+ my $msg = "Altering \$sth->{$attrib} won't affect proxied handle";
+ Carp::carp($msg) if $sth->FETCH('Warn');
+
+ # XXX could perhaps do
+ # push @{ $sth->{go_method_calls} }, [ 'STORE', $attrib, $value ]
+ # if not $sth->FETCH('Executed');
+ # but how to handle repeat executions? How to we know when an
+ # attribute is being set to affect the current resultset or the
+ # next execution?
+ # Could just always use go_method_calls I guess.
+
+ # do the store locally anyway, just in case
+ $sth->SUPER::STORE($attrib => $value);
+
+ return $sth->set_err($DBI::stderr, $msg);
+ }
+
+ # sub bind_param_array
+ # we use DBI's default, which sets $sth->{ParamArrays}{$param} = $value
+ # and calls bind_param($param, undef, $attr) if $attr.
+
+ sub execute_array {
+ my $sth = shift;
+ my $attr = shift;
+ $sth->bind_param_array($_, $_[$_-1]) for (1..@_);
+ push @{ $sth->{go_method_calls} }, [ 'execute_array', $attr ];
+ return $sth->go_sth_method($attr);
+ }
+
+ *go_cache = \&DBD::Gofer::go_cache;
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer - A stateless-proxy driver for communicating with a remote DBI
+
+=head1 SYNOPSIS
+
+ use DBI;
+
+ $original_dsn = "dbi:..."; # your original DBI Data Source Name
+
+ $dbh = DBI->connect("dbi:Gofer:transport=$transport;...;dsn=$original_dsn",
+ $user, $passwd, \%attributes);
+
+ ... use $dbh as if it was connected to $original_dsn ...
+
+
+The C<transport=$transport> part specifies the name of the module to use to
+transport the requests to the remote DBI. If $transport doesn't contain any
+double colons then it's prefixed with C<DBD::Gofer::Transport::>.
+
+The C<dsn=$original_dsn> part I<must be the last element> of the DSN because
+everything after C<dsn=> is assumed to be the DSN that the remote DBI should
+use.
+
+The C<...> represents attributes that influence the operation of the Gofer
+driver or transport. These are described below or in the documentation of the
+transport module being used.
+
+=encoding ISO8859-1
+
+=head1 DESCRIPTION
+
+DBD::Gofer is a DBI database driver that forwards requests to another DBI
+driver, usually in a separate process, often on a separate machine. It tries to
+be as transparent as possible so it appears that you are using the remote
+driver directly.
+
+DBD::Gofer is very similar to DBD::Proxy. The major difference is that with
+DBD::Gofer no state is maintained on the remote end. That means every
+request contains all the information needed to create the required state. (So,
+for example, every request includes the DSN to connect to.) Each request can be
+sent to any available server. The server executes the request and returns a
+single response that includes all the data.
+
+This is very similar to the way http works as a stateless protocol for the web.
+Each request from your web browser can be handled by a different web server process.
+
+=head2 Use Cases
+
+This may seem like pointless overhead but there are situations where this is a
+very good thing. Let's consider a specific case.
+
+Imagine using DBD::Gofer with an http transport. Your application calls
+connect(), prepare("select * from table where foo=?"), bind_param(), and execute().
+At this point DBD::Gofer builds a request containing all the information
+about the method calls. It then uses the httpd transport to send that request
+to an apache web server.
+
+This 'dbi execute' web server executes the request (using DBI::Gofer::Execute
+and related modules) and builds a response that contains all the rows of data,
+if the statement returned any, along with all the attributes that describe the
+results, such as $sth->{NAME}. This response is sent back to DBD::Gofer which
+unpacks it and presents it to the application as if it had executed the
+statement itself.
+
+=head2 Advantages
+
+Okay, but you still don't see the point? Well let's consider what we've gained:
+
+=head3 Connection Pooling and Throttling
+
+The 'dbi execute' web server leverages all the functionality of web
+infrastructure in terms of load balancing, high-availability, firewalls, access
+management, proxying, caching.
+
+At its most basic level you get a configurable pool of persistent database connections.
+
+=head3 Simple Scaling
+
+Got thousands of processes all trying to connect to the database? You can use
+DBD::Gofer to connect them to your smaller pool of 'dbi execute' web servers instead.
+
+=head3 Caching
+
+Client-side caching is as simple as adding "C<cache=1>" to the DSN.
+This feature alone can be worth using DBD::Gofer for.
+
+=head3 Fewer Network Round-trips
+
+DBD::Gofer sends as few requests as possible (dependent on the policy being used).
+
+=head3 Thin Clients / Unsupported Platforms
+
+You no longer need drivers for your database on every system. DBD::Gofer is pure perl.
+
+=head1 CONSTRAINTS
+
+There are some natural constraints imposed by the DBD::Gofer 'stateless' approach.
+But not many:
+
+=head2 You can't change database handle attributes after connect()
+
+You can't change database handle attributes after you've connected.
+Use the connect() call to specify all the attribute settings you want.
+
+This is because it's critical that when a request is complete the database
+handle is left in the same state it was when first connected.
+
+An exception is made for attributes with names starting "C<private_>":
+They can be set after connect() but the change is only applied locally.
+
+=head2 You can't change statement handle attributes after prepare()
+
+You can't change statement handle attributes after prepare.
+
+An exception is made for attributes with names starting "C<private_>":
+They can be set after prepare() but the change is only applied locally.
+
+=head2 You can't use transactions
+
+AutoCommit only. Transactions aren't supported.
+
+(In theory transactions could be supported when using a transport that
+maintains a connection, like C<stream> does. If you're interested in this
+please get in touch via dbi-dev@perl.org)
+
+=head2 You can't call driver-private sth methods
+
+But that's rarely needed anyway.
+
+=head1 GENERAL CAVEATS
+
+A few important things to keep in mind when using DBD::Gofer:
+
+=head2 Temporary tables, locks, and other per-connection persistent state
+
+You shouldn't expect any per-session state to persist between requests.
+This includes locks and temporary tables.
+
+Because the server-side may execute your requests via a different
+database connections, you can't rely on any per-connection persistent state,
+such as temporary tables, being available from one request to the next.
+
+This is an easy trap to fall into. A good way to check for this is to test your
+code with a Gofer policy package that sets the C<connect_method> policy to
+'connect' to force a new connection for each request. The C<pedantic> policy does this.
+
+=head2 Driver-private Database Handle Attributes
+
+Some driver-private dbh attributes may not be available if the driver has not
+implemented the private_attribute_info() method (added in DBI 1.54).
+
+=head2 Driver-private Statement Handle Attributes
+
+Driver-private sth attributes can be set in the prepare() call. TODO
+
+Some driver-private sth attributes may not be available if the driver has not
+implemented the private_attribute_info() method (added in DBI 1.54).
+
+=head2 Multiple Resultsets
+
+Multiple resultsets are supported only if the driver supports the more_results() method
+(an exception is made for DBD::Sybase).
+
+=head2 Statement activity that also updates dbh attributes
+
+Some drivers may update one or more dbh attributes after performing activity on
+a child sth. For example, DBD::mysql provides $dbh->{mysql_insertid} in addition to
+$sth->{mysql_insertid}. Currently mysql_insertid is supported via a hack but a
+more general mechanism is needed for other drivers to use.
+
+=head2 Methods that report an error always return undef
+
+With DBD::Gofer, a method that sets an error always return an undef or empty list.
+That shouldn't be a problem in practice because the DBI doesn't define any
+methods that return meaningful values while also reporting an error.
+
+=head2 Subclassing only applies to client-side
+
+The RootClass and DbTypeSubclass attributes are not passed to the Gofer server.
+
+=head1 CAVEATS FOR SPECIFIC METHODS
+
+=head2 last_insert_id
+
+To enable use of last_insert_id you need to indicate to DBD::Gofer that you'd
+like to use it. You do that my adding a C<go_last_insert_id_args> attribute to
+the do() or prepare() method calls. For example:
+
+ $dbh->do($sql, { go_last_insert_id_args => [...] });
+
+or
+
+ $sth = $dbh->prepare($sql, { go_last_insert_id_args => [...] });
+
+The array reference should contains the args that you want passed to the
+last_insert_id() method.
+
+=head2 execute_for_fetch
+
+The array methods bind_param_array() and execute_array() are supported.
+When execute_array() is called the data is serialized and executed in a single
+round-trip to the Gofer server. This makes it very fast, but requires enough
+memory to store all the serialized data.
+
+The execute_for_fetch() method currently isn't optimised, it uses the DBI
+fallback behaviour of executing each tuple individually.
+(It could be implemented as a wrapper for execute_array() - patches welcome.)
+
+=head1 TRANSPORTS
+
+DBD::Gofer doesn't concern itself with transporting requests and responses to and fro.
+For that it uses special Gofer transport modules.
+
+Gofer transport modules usually come in pairs: one for the 'client' DBD::Gofer
+driver to use and one for the remote 'server' end. They have very similar names:
+
+ DBD::Gofer::Transport::<foo>
+ DBI::Gofer::Transport::<foo>
+
+Sometimes the transports on the DBD and DBI sides may have different names. For
+example DBD::Gofer::Transport::http is typically used with DBI::Gofer::Transport::mod_perl
+(DBD::Gofer::Transport::http and DBI::Gofer::Transport::mod_perl modules are
+part of the GoferTransport-http distribution).
+
+=head2 Bundled Transports
+
+Several transport modules are provided with DBD::Gofer:
+
+=head3 null
+
+The null transport is the simplest of them all. It doesn't actually transport the request anywhere.
+It just serializes (freezes) the request into a string, then thaws it back into
+a data structure before passing it to DBI::Gofer::Execute to execute. The same
+freeze and thaw is applied to the results.
+
+The null transport is the best way to test if your application will work with Gofer.
+Just set the DBI_AUTOPROXY environment variable to "C<dbi:Gofer:transport=null;policy=pedantic>"
+(see L</Using DBI_AUTOPROXY> below) and run your application, or ideally its test suite, as usual.
+
+It doesn't take any parameters.
+
+=head3 pipeone
+
+The pipeone transport launches a subprocess for each request. It passes in the
+request and reads the response.
+
+The fact that a new subprocess is started for each request ensures that the
+server side is truly stateless. While this does make the transport I<very> slow,
+it is useful as a way to test that your application doesn't depend on
+per-connection state, such as temporary tables, persisting between requests.
+
+It's also useful both as a proof of concept and as a base class for the stream
+driver.
+
+=head3 stream
+
+The stream driver also launches a subprocess and writes requests and reads
+responses, like the pipeone transport. In this case, however, the subprocess
+is expected to handle more that one request. (Though it will be automatically
+restarted if it exits.)
+
+This is the first transport that is truly useful because it can launch the
+subprocess on a remote machine using C<ssh>. This means you can now use DBD::Gofer
+to easily access any databases that's accessible from any system you can login to.
+You also get all the benefits of ssh, including encryption and optional compression.
+
+See L</Using DBI_AUTOPROXY> below for an example.
+
+=head2 Other Transports
+
+Implementing a Gofer transport is I<very> simple, and more transports are very welcome.
+Just take a look at any existing transports that are similar to your needs.
+
+=head3 http
+
+See the GoferTransport-http distribution on CPAN: http://search.cpan.org/dist/GoferTransport-http/
+
+=head3 Gearman
+
+I know Ask Bjørn Hansen has implemented a transport for the C<gearman> distributed
+job system, though it's not on CPAN at the time of writing this.
+
+=head1 CONNECTING
+
+Simply prefix your existing DSN with "C<dbi:Gofer:transport=$transport;dsn=>"
+where $transport is the name of the Gofer transport you want to use (see L</TRANSPORTS>).
+The C<transport> and C<dsn> attributes must be specified and the C<dsn> attributes must be last.
+
+Other attributes can be specified in the DSN to configure DBD::Gofer and/or the
+Gofer transport module being used. The main attributes after C<transport>, are
+C<url> and C<policy>. These and other attributes are described below.
+
+=head2 Using DBI_AUTOPROXY
+
+The simplest way to try out DBD::Gofer is to set the DBI_AUTOPROXY environment variable.
+In this case you don't include the C<dsn=> part. For example:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=null"
+
+or, for a more useful example, try:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=stream;url=ssh:user@example.com"
+
+=head2 Connection Attributes
+
+These attributes can be specified in the DSN. They can also be passed in the
+\%attr parameter of the DBI connect method by adding a "C<go_>" prefix to the name.
+
+=head3 transport
+
+Specifies the Gofer transport class to use. Required. See L</TRANSPORTS> above.
+
+If the value does not include C<::> then "C<DBD::Gofer::Transport::>" is prefixed.
+
+The transport object can be accessed via $h->{go_transport}.
+
+=head3 dsn
+
+Specifies the DSN for the remote side to connect to. Required, and must be last.
+
+=head3 url
+
+Used to tell the transport where to connect to. The exact form of the value depends on the transport used.
+
+=head3 policy
+
+Specifies the policy to use. See L</CONFIGURING BEHAVIOUR POLICY>.
+
+If the value does not include C<::> then "C<DBD::Gofer::Policy>" is prefixed.
+
+The policy object can be accessed via $h->{go_policy}.
+
+=head3 timeout
+
+Specifies a timeout, in seconds, to use when waiting for responses from the server side.
+
+=head3 retry_limit
+
+Specifies the number of times a failed request will be retried. Default is 0.
+
+=head3 retry_hook
+
+Specifies a code reference to be called to decide if a failed request should be retried.
+The code reference is called like this:
+
+ $transport = $h->{go_transport};
+ $retry = $transport->go_retry_hook->($request, $response, $transport);
+
+If it returns true then the request will be retried, upto the C<retry_limit>.
+If it returns a false but defined value then the request will not be retried.
+If it returns undef then the default behaviour will be used, as if C<retry_hook>
+had not been specified.
+
+The default behaviour is to retry requests where $request->is_idempotent is true,
+or the error message matches C</induced by DBI_GOFER_RANDOM/>.
+
+=head3 cache
+
+Specifies that client-side caching should be performed. The value is the name
+of a cache class to use.
+
+Any class implementing get($key) and set($key, $value) methods can be used.
+That includes a great many powerful caching classes on CPAN, including the
+Cache and Cache::Cache distributions.
+
+You can use "C<cache=1>" is a shortcut for "C<cache=DBI::Util::CacheMemory>".
+See L<DBI::Util::CacheMemory> for a description of this simple fast default cache.
+
+The cache object can be accessed via $h->go_cache. For example:
+
+ $dbh->go_cache->clear; # free up memory being used by the cache
+
+The cache keys are the frozen (serialized) requests, and the values are the
+frozen responses.
+
+The default behaviour is to only use the cache for requests where
+$request->is_idempotent is true (i.e., the dbh has the ReadOnly attribute set
+or the SQL statement is obviously a SELECT without a FOR UPDATE clause.)
+
+For even more control you can use the C<go_cache> attribute to pass in an
+instantiated cache object. Individual methods, including prepare(), can also
+specify alternative caches via the C<go_cache> attribute. For example, to
+specify no caching for a particular query, you could use
+
+ $sth = $dbh->prepare( $sql, { go_cache => 0 } );
+
+This can be used to implement different caching policies for different statements.
+
+It's interesting to note that DBD::Gofer can be used to add client-side caching
+to any (gofer compatible) application, with no code changes and no need for a
+gofer server. Just set the DBI_AUTOPROXY environment variable like this:
+
+ DBI_AUTOPROXY='dbi:Gofer:transport=null;cache=1'
+
+=head1 CONFIGURING BEHAVIOUR POLICY
+
+DBD::Gofer supports a 'policy' mechanism that allows you to fine-tune the number of round-trips to the Gofer server.
+The policies are grouped into classes (which may be subclassed) and referenced by the name of the class.
+
+The L<DBD::Gofer::Policy::Base> class is the base class for all the policy
+packages and describes all the available policies.
+
+Three policy packages are supplied with DBD::Gofer:
+
+L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it
+makes more round-trips to the Gofer server.
+
+L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy.
+
+L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications.
+
+Generally the default C<classic> policy is fine. When first testing an existing
+application with Gofer it is a good idea to start with the C<pedantic> policy
+first and then switch to C<classic> or a custom policy, for final testing.
+
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 ACKNOWLEDGEMENTS
+
+The development of DBD::Gofer and related modules was sponsored by
+Shopzilla.com (L<http://Shopzilla.com>), where I currently work.
+
+=head1 SEE ALSO
+
+L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>.
+
+L<DBI::Gofer::Transport::Base>, L<DBD::Gofer::Policy::Base>.
+
+L<DBI>
+
+=head1 Caveats for specific drivers
+
+This section aims to record issues to be aware of when using Gofer with specific drivers.
+It usually only documents issues that are not natural consequences of the limitations
+of the Gofer approach - as documented above.
+
+=head1 TODO
+
+This is just a random brain dump... (There's more in the source of the Changes file, not the pod)
+
+Document policy mechanism
+
+Add mechanism for transports to list config params and for Gofer to apply any that match (and warn if any left over?)
+
+Driver-private sth attributes - set via prepare() - change DBI spec
+
+add hooks into transport base class for checking & updating a result set cache
+ ie via a standard cache interface such as:
+ http://search.cpan.org/~robm/Cache-FastMmap/FastMmap.pm
+ http://search.cpan.org/~bradfitz/Cache-Memcached/lib/Cache/Memcached.pm
+ http://search.cpan.org/~dclinton/Cache-Cache/
+ http://search.cpan.org/~cleishman/Cache/
+Also caching instructions could be passed through the httpd transport layer
+in such a way that appropriate http cache headers are added to the results
+so that web caches (squid etc) could be used to implement the caching.
+(MUST require the use of GET rather than POST requests.)
+
+Rework handling of installed_methods to not piggyback on dbh_attributes?
+
+Perhaps support transactions for transports where it's possible (ie null and stream)?
+Would make stream transport (ie ssh) more useful to more people.
+
+Make sth_result_attr more like dbh_attributes (using '*' etc)
+
+Add @val = FETCH_many(@names) to DBI in C and use in Gofer/Execute?
+
+Implement _new_sth in C.
+
+=cut
diff --git a/lib/DBD/Gofer/Policy/Base.pm b/lib/DBD/Gofer/Policy/Base.pm
new file mode 100644
index 0000000..1725b03
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/Base.pm
@@ -0,0 +1,162 @@
+package DBD::Gofer::Policy::Base;
+
+# $Id: Base.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+use Carp;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+our $AUTOLOAD;
+
+my %policy_defaults = (
+ # force connect method (unless overridden by go_connect_method=>'...' attribute)
+ # if false: call same method on client as on server
+ connect_method => 'connect',
+ # force prepare method (unless overridden by go_prepare_method=>'...' attribute)
+ # if false: call same method on client as on server
+ prepare_method => 'prepare',
+ skip_connect_check => 0,
+ skip_default_methods => 0,
+ skip_prepare_check => 0,
+ skip_ping => 0,
+ dbh_attribute_update => 'every',
+ dbh_attribute_list => ['*'],
+ locally_quote => 0,
+ locally_quote_identifier => 0,
+ cache_parse_trace_flags => 1,
+ cache_parse_trace_flag => 1,
+ cache_data_sources => 1,
+ cache_type_info_all => 1,
+ cache_tables => 0,
+ cache_table_info => 0,
+ cache_column_info => 0,
+ cache_primary_key_info => 0,
+ cache_foreign_key_info => 0,
+ cache_statistics_info => 0,
+ cache_get_info => 0,
+ cache_func => 0,
+);
+
+my $base_policy_file = $INC{"DBD/Gofer/Policy/Base.pm"};
+
+__PACKAGE__->create_policy_subs(\%policy_defaults);
+
+sub create_policy_subs {
+ my ($class, $policy_defaults) = @_;
+
+ while ( my ($policy_name, $policy_default) = each %$policy_defaults) {
+ my $policy_attr_name = "go_$policy_name";
+ my $sub = sub {
+ # $policy->foo($attr, ...)
+ #carp "$policy_name($_[1],...)";
+ # return the policy default value unless an attribute overrides it
+ return (ref $_[1] && exists $_[1]->{$policy_attr_name})
+ ? $_[1]->{$policy_attr_name}
+ : $policy_default;
+ };
+ no strict 'refs';
+ *{$class . '::' . $policy_name} = $sub;
+ }
+}
+
+sub AUTOLOAD {
+ carp "Unknown policy name $AUTOLOAD used";
+ # only warn once
+ no strict 'refs';
+ *$AUTOLOAD = sub { undef };
+ return undef;
+}
+
+sub new {
+ my ($class, $args) = @_;
+ my $policy = {};
+ bless $policy, $class;
+}
+
+sub DESTROY { };
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::Base - Base class for DBD::Gofer policies
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=...", ...)
+
+=head1 DESCRIPTION
+
+DBD::Gofer can be configured via a 'policy' mechanism that allows you to
+fine-tune the number of round-trips to the Gofer server. The policies are
+grouped into classes (which may be subclassed) and referenced by the name of
+the class.
+
+The L<DBD::Gofer::Policy::Base> class is the base class for all the policy
+classes and describes all the individual policy items.
+
+The Base policy is not used directly. You should use a policy class derived from it.
+
+=head1 POLICY CLASSES
+
+Three policy classes are supplied with DBD::Gofer:
+
+L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it
+makes more round-trips to the Gofer server.
+
+L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy.
+
+L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications.
+
+Generally the default C<classic> policy is fine. When first testing an existing
+application with Gofer it is a good idea to start with the C<pedantic> policy
+first and then switch to C<classic> or a custom policy, for final testing.
+
+=head1 POLICY ITEMS
+
+These are temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+See the source code to this module for more details.
+
+=head1 POLICY CUSTOMIZATION
+
+XXX This area of DBD::Gofer is subject to change.
+
+There are three ways to customize policies:
+
+Policy classes are designed to influence the overall behaviour of DBD::Gofer
+with existing, unaltered programs, so they work in a reasonably optimal way
+without requiring code changes. You can implement new policy classes as
+subclasses of existing policies.
+
+In many cases individual policy items can be overridden on a case-by-case basis
+within your application code. You do this by passing a corresponding
+C<<go_<policy_name>>> attribute into DBI methods by your application code.
+This let's you fine-tune the behaviour for special cases.
+
+The policy items are implemented as methods. In many cases the methods are
+passed parameters relating to the DBD::Gofer code being executed. This means
+the policy can implement dynamic behaviour that varies depending on the
+particular circumstances, such as the particular statement being executed.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Policy/classic.pm b/lib/DBD/Gofer/Policy/classic.pm
new file mode 100644
index 0000000..8f828f0
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/classic.pm
@@ -0,0 +1,79 @@
+package DBD::Gofer::Policy::classic;
+
+# $Id: classic.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+use base qw(DBD::Gofer::Policy::Base);
+
+__PACKAGE__->create_policy_subs({
+
+ # always use connect_cached on server
+ connect_method => 'connect_cached',
+
+ # use same methods on server as is called on client
+ prepare_method => '',
+
+ # don't skip the connect check since that also sets dbh attributes
+ # although this makes connect more expensive, that's partly offset
+ # by skip_ping=>1 below, which makes connect_cached very fast.
+ skip_connect_check => 0,
+
+ # most code doesn't rely on sth attributes being set after prepare
+ skip_prepare_check => 1,
+
+ # we're happy to use local method if that's the same as the remote
+ skip_default_methods => 1,
+
+ # ping is not important for DBD::Gofer and most transports
+ skip_ping => 1,
+
+ # only update dbh attributes on first contact with server
+ dbh_attribute_update => 'first',
+
+ # we'd like to set locally_* but can't because drivers differ
+
+ # get_info results usually don't change
+ cache_get_info => 1,
+});
+
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::classic - The 'classic' policy for DBD::Gofer
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=classic", ...)
+
+The C<classic> policy is the default DBD::Gofer policy, so need not be included in the DSN.
+
+=head1 DESCRIPTION
+
+Temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Policy/pedantic.pm b/lib/DBD/Gofer/Policy/pedantic.pm
new file mode 100644
index 0000000..6829bea
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/pedantic.pm
@@ -0,0 +1,53 @@
+package DBD::Gofer::Policy::pedantic;
+
+# $Id: pedantic.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+use base qw(DBD::Gofer::Policy::Base);
+
+# the 'pedantic' policy is the same as the Base policy
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::pedantic - The 'pedantic' policy for DBD::Gofer
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=pedantic", ...)
+
+=head1 DESCRIPTION
+
+The C<pedantic> policy tries to be as transparent as possible. To do this it
+makes round-trips to the server for almost every DBI method call.
+
+This is the best policy to use when first testing existing code with Gofer.
+Once it's working well you should consider moving to the C<classic> policy or defining your own policy class.
+
+Temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Policy/rush.pm b/lib/DBD/Gofer/Policy/rush.pm
new file mode 100644
index 0000000..9cfd582
--- /dev/null
+++ b/lib/DBD/Gofer/Policy/rush.pm
@@ -0,0 +1,90 @@
+package DBD::Gofer::Policy::rush;
+
+# $Id: rush.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+use base qw(DBD::Gofer::Policy::Base);
+
+__PACKAGE__->create_policy_subs({
+
+ # always use connect_cached on server
+ connect_method => 'connect_cached',
+
+ # use same methods on server as is called on client
+ # (because code not using placeholders would bloat the sth cache)
+ prepare_method => '',
+
+ # Skipping the connect check is fast, but it also skips
+ # fetching the remote dbh attributes!
+ # Make sure that your application doesn't need access to dbh attributes.
+ skip_connect_check => 1,
+
+ # most code doesn't rely on sth attributes being set after prepare
+ skip_prepare_check => 1,
+
+ # we're happy to use local method if that's the same as the remote
+ skip_default_methods => 1,
+
+ # ping is almost meaningless for DBD::Gofer and most transports anyway
+ skip_ping => 1,
+
+ # don't update dbh attributes at all
+ # XXX actually we currently need dbh_attribute_update for skip_default_methods to work
+ # and skip_default_methods is more valuable to us than the cost of dbh_attribute_update
+ dbh_attribute_update => 'none', # actually means 'first' currently
+ #dbh_attribute_list => undef,
+
+ # we'd like to set locally_* but can't because drivers differ
+
+ # in a rush assume metadata doesn't change
+ cache_tables => 1,
+ cache_table_info => 1,
+ cache_column_info => 1,
+ cache_primary_key_info => 1,
+ cache_foreign_key_info => 1,
+ cache_statistics_info => 1,
+ cache_get_info => 1,
+});
+
+
+1;
+
+=head1 NAME
+
+DBD::Gofer::Policy::rush - The 'rush' policy for DBD::Gofer
+
+=head1 SYNOPSIS
+
+ $dbh = DBI->connect("dbi:Gofer:transport=...;policy=rush", ...)
+
+=head1 DESCRIPTION
+
+The C<rush> policy tries to make as few round-trips as possible.
+It's the opposite end of the policy spectrum to the C<pedantic> policy.
+
+Temporary docs: See the source code for list of policies and their defaults.
+
+In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBD/Gofer/Transport/Base.pm b/lib/DBD/Gofer/Transport/Base.pm
new file mode 100644
index 0000000..fe0d078
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/Base.pm
@@ -0,0 +1,410 @@
+package DBD::Gofer::Transport::Base;
+
+# $Id: Base.pm 14120 2010-06-07 19:52:19Z hmbrand $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use base qw(DBI::Gofer::Transport::Base);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 14120 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ trace
+ go_dsn
+ go_url
+ go_policy
+ go_timeout
+ go_retry_hook
+ go_retry_limit
+ go_cache
+ cache_hit
+ cache_miss
+ cache_store
+));
+__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
+ meta
+));
+
+
+sub new {
+ my ($class, $args) = @_;
+ $args->{$_} = 0 for (qw(cache_hit cache_miss cache_store));
+ $args->{keep_meta_frozen} ||= 1 if $args->{go_cache};
+ #warn "args @{[ %$args ]}\n";
+ return $class->SUPER::new($args);
+}
+
+
+sub _init_trace { $ENV{DBD_GOFER_TRACE} || 0 }
+
+
+sub new_response {
+ my $self = shift;
+ return DBI::Gofer::Response->new(@_);
+}
+
+
+sub transmit_request {
+ my ($self, $request) = @_;
+ my $trace = $self->trace;
+ my $response;
+
+ my ($go_cache, $request_cache_key);
+ if ($go_cache = $self->{go_cache}) {
+ $request_cache_key
+ = $request->{meta}{request_cache_key}
+ = $self->get_cache_key_for_request($request);
+ if ($request_cache_key) {
+ my $frozen_response = eval { $go_cache->get($request_cache_key) };
+ if ($frozen_response) {
+ $self->_dump("cached response found for ".ref($request), $request)
+ if $trace;
+ $response = $self->thaw_response($frozen_response);
+ $self->trace_msg("transmit_request is returning a response from cache $go_cache\n")
+ if $trace;
+ ++$self->{cache_hit};
+ return $response;
+ }
+ warn $@ if $@;
+ ++$self->{cache_miss};
+ $self->trace_msg("transmit_request cache miss\n")
+ if $trace;
+ }
+ }
+
+ my $to = $self->go_timeout;
+ my $transmit_sub = sub {
+ $self->trace_msg("transmit_request\n") if $trace;
+ local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to;
+
+ my $response = eval {
+ local $SIG{PIPE} = sub {
+ my $extra = ($! eq "Broken pipe") ? "" : " ($!)";
+ die "Unable to send request: Broken pipe$extra\n";
+ };
+ alarm($to) if $to;
+ $self->transmit_request_by_transport($request);
+ };
+ alarm(0) if $to;
+
+ if ($@) {
+ return $self->transport_timedout("transmit_request", $to)
+ if $@ eq "TIMEOUT\n";
+ return $self->new_response({ err => 1, errstr => $@ });
+ }
+
+ return $response;
+ };
+
+ $response = $self->_transmit_request_with_retries($request, $transmit_sub);
+
+ if ($response) {
+ my $frozen_response = delete $response->{meta}{frozen};
+ $self->_store_response_in_cache($frozen_response, $request_cache_key)
+ if $request_cache_key;
+ }
+
+ $self->trace_msg("transmit_request is returning a response itself\n")
+ if $trace && $response;
+
+ return $response unless wantarray;
+ return ($response, $transmit_sub);
+}
+
+
+sub _transmit_request_with_retries {
+ my ($self, $request, $transmit_sub) = @_;
+ my $response;
+ do {
+ $response = $transmit_sub->();
+ } while ( $response && $self->response_needs_retransmit($request, $response) );
+ return $response;
+}
+
+
+sub receive_response {
+ my ($self, $request, $retransmit_sub) = @_;
+ my $to = $self->go_timeout;
+
+ my $receive_sub = sub {
+ $self->trace_msg("receive_response\n");
+ local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to;
+
+ my $response = eval {
+ alarm($to) if $to;
+ $self->receive_response_by_transport($request);
+ };
+ alarm(0) if $to;
+
+ if ($@) {
+ return $self->transport_timedout("receive_response", $to)
+ if $@ eq "TIMEOUT\n";
+ return $self->new_response({ err => 1, errstr => $@ });
+ }
+ return $response;
+ };
+
+ my $response;
+ do {
+ $response = $receive_sub->();
+ if ($self->response_needs_retransmit($request, $response)) {
+ $response = $self->_transmit_request_with_retries($request, $retransmit_sub);
+ $response ||= $receive_sub->();
+ }
+ } while ( $self->response_needs_retransmit($request, $response) );
+
+ if ($response) {
+ my $frozen_response = delete $response->{meta}{frozen};
+ my $request_cache_key = $request->{meta}{request_cache_key};
+ $self->_store_response_in_cache($frozen_response, $request_cache_key)
+ if $request_cache_key && $self->{go_cache};
+ }
+
+ return $response;
+}
+
+
+sub response_retry_preference {
+ my ($self, $request, $response) = @_;
+
+ # give the user a chance to express a preference (or undef for default)
+ if (my $go_retry_hook = $self->go_retry_hook) {
+ my $retry = $go_retry_hook->($request, $response, $self);
+ $self->trace_msg(sprintf "go_retry_hook returned %s\n",
+ (defined $retry) ? $retry : 'undef');
+ return $retry if defined $retry;
+ }
+
+ # This is the main decision point. We don't retry requests that got
+ # as far as executing because the error is probably from the database
+ # (not transport) so retrying is unlikely to help. But note that any
+ # severe transport error occuring after execute is likely to return
+ # a new response object that doesn't have the execute flag set. Beware!
+ return 0 if $response->executed_flag_set;
+
+ return 1 if ($response->errstr || '') =~ m/induced by DBI_GOFER_RANDOM/;
+
+ return 1 if $request->is_idempotent; # i.e. is SELECT or ReadOnly was set
+
+ return undef; # we couldn't make up our mind
+}
+
+
+sub response_needs_retransmit {
+ my ($self, $request, $response) = @_;
+
+ my $err = $response->err
+ or return 0; # nothing went wrong
+
+ my $retry = $self->response_retry_preference($request, $response);
+
+ if (!$retry) { # false or undef
+ $self->trace_msg("response_needs_retransmit: response not suitable for retry\n");
+ return 0;
+ }
+
+ # we'd like to retry but have we retried too much already?
+
+ my $retry_limit = $self->go_retry_limit;
+ if (!$retry_limit) {
+ $self->trace_msg("response_needs_retransmit: retries disabled (retry_limit not set)\n");
+ return 0;
+ }
+
+ my $request_meta = $request->meta;
+ my $retry_count = $request_meta->{retry_count} || 0;
+ if ($retry_count >= $retry_limit) {
+ $self->trace_msg("response_needs_retransmit: $retry_count is too many retries\n");
+ # XXX should be possible to disable altering the err
+ $response->errstr(sprintf "%s (after %d retries by gofer)", $response->errstr, $retry_count);
+ return 0;
+ }
+
+ # will retry now, do the admin
+ ++$retry_count;
+ $self->trace_msg("response_needs_retransmit: retry $retry_count\n");
+
+ # hook so response_retry_preference can defer some code execution
+ # until we've checked retry_count and retry_limit.
+ if (ref $retry eq 'CODE') {
+ $retry->($retry_count, $retry_limit)
+ and warn "should return false"; # protect future use
+ }
+
+ ++$request_meta->{retry_count}; # update count for this request object
+ ++$self->meta->{request_retry_count}; # update cumulative transport stats
+
+ return 1;
+}
+
+
+sub transport_timedout {
+ my ($self, $method, $timeout) = @_;
+ $timeout ||= $self->go_timeout;
+ return $self->new_response({ err => 1, errstr => "DBD::Gofer $method timed-out after $timeout seconds" });
+}
+
+
+# return undef if we don't want to cache this request
+# subclasses may use more specialized rules
+sub get_cache_key_for_request {
+ my ($self, $request) = @_;
+
+ # we only want to cache idempotent requests
+ # is_idempotent() is true if GOf_REQUEST_IDEMPOTENT or GOf_REQUEST_READONLY set
+ return undef if not $request->is_idempotent;
+
+ # XXX would be nice to avoid the extra freeze here
+ my $key = $self->freeze_request($request, undef, 1);
+
+ #use Digest::MD5; warn "get_cache_key_for_request: ".Digest::MD5::md5_base64($key)."\n";
+
+ return $key;
+}
+
+
+sub _store_response_in_cache {
+ my ($self, $frozen_response, $request_cache_key) = @_;
+ my $go_cache = $self->{go_cache}
+ or return;
+
+ # new() ensures that enabling go_cache also enables keep_meta_frozen
+ warn "No meta frozen in response" if !$frozen_response;
+ warn "No request_cache_key" if !$request_cache_key;
+
+ if ($frozen_response && $request_cache_key) {
+ $self->trace_msg("receive_response added response to cache $go_cache\n");
+ eval { $go_cache->set($request_cache_key, $frozen_response) };
+ warn $@ if $@;
+ ++$self->{cache_store};
+ }
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::Base - base class for DBD::Gofer client transports
+
+=head1 SYNOPSIS
+
+ my $remote_dsn = "..."
+ DBI->connect("dbi:Gofer:transport=...;url=...;timeout=...;retry_limit=...;dsn=$remote_dsn",...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY='dbi:Gofer:transport=...;url=...'
+
+which will force I<all> DBI connections to be made via that Gofer server.
+
+=head1 DESCRIPTION
+
+This is the base class for all DBD::Gofer client transports.
+
+=head1 ATTRIBUTES
+
+Gofer transport attributes can be specified either in the attributes parameter
+of the connect() method call, or in the DSN string. When used in the DSN
+string, attribute names don't have the C<go_> prefix.
+
+=head2 go_dsn
+
+The full DBI DSN that the Gofer server should connect to on your behalf.
+
+When used in the DSN it must be the last element in the DSN string.
+
+=head2 go_timeout
+
+A time limit for sending a request and receiving a response. Some drivers may
+implement sending and receiving as separate steps, in which case (currently)
+the timeout applies to each separately.
+
+If a request needs to be resent then the timeout is restarted for each sending
+of a request and receiving of a response.
+
+=head2 go_retry_limit
+
+The maximum number of times an request may be retried. The default is 2.
+
+=head2 go_retry_hook
+
+This subroutine reference is called, if defined, for each response received where $response->err is true.
+
+The subroutine is pass three parameters: the request object, the response object, and the transport object.
+
+If it returns an undefined value then the default retry behaviour is used. See L</RETRY ON ERROR> below.
+
+If it returns a defined but false value then the request is not resent.
+
+If it returns true value then the request is resent, so long as the number of retries does not exceed C<go_retry_limit>.
+
+=head1 RETRY ON ERROR
+
+The default retry on error behaviour is:
+
+ - Retry if the error was due to DBI_GOFER_RANDOM. See L<DBI::Gofer::Execute>.
+
+ - Retry if $request->is_idempotent returns true. See L<DBI::Gofer::Request>.
+
+A retry won't be allowed if the number of previous retries has reached C<go_retry_limit>.
+
+=head1 TRACING
+
+Tracing of gofer requests and responses can be enabled by setting the
+C<DBD_GOFER_TRACE> environment variable. A value of 1 gives a reasonably
+compact summary of each request and response. A value of 2 or more gives a
+detailed, and voluminous, dump.
+
+The trace is written using DBI->trace_msg() and so is written to the default
+DBI trace output, which is usually STDERR.
+
+=head1 METHODS
+
+I<This section is currently far from complete.>
+
+=head2 response_retry_preference
+
+ $retry = $transport->response_retry_preference($request, $response);
+
+The response_retry_preference is called by DBD::Gofer when considering if a
+request should be retried after an error.
+
+Returns true (would like to retry), false (must not retry), undef (no preference).
+
+If a true value is returned in the form of a CODE ref then, if DBD::Gofer does
+decide to retry the request, it calls the code ref passing $retry_count, $retry_limit.
+Can be used for logging and/or to implement exponential backoff behaviour.
+Currently the called code must return using C<return;> to allow for future extensions.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007-2008, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer>, L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>.
+
+and some example transports:
+
+L<DBD::Gofer::Transport::stream>
+
+L<DBD::Gofer::Transport::http>
+
+L<DBI::Gofer::Transport::mod_perl>
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/corostream.pm b/lib/DBD/Gofer/Transport/corostream.pm
new file mode 100644
index 0000000..6e79278
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/corostream.pm
@@ -0,0 +1,144 @@
+package DBD::Gofer::Transport::corostream;
+
+use strict;
+use warnings;
+
+use Carp;
+
+use Coro::Select; # a slow but coro-aware replacement for CORE::select (global effect!)
+
+use Coro;
+use Coro::Handle;
+
+use base qw(DBD::Gofer::Transport::stream);
+
+# XXX ensure DBI_PUREPERL for parent doesn't pass to child
+sub start_pipe_command {
+ local $ENV{DBI_PUREPERL} = $ENV{DBI_PUREPERL_COROCHILD}; # typically undef
+ my $connection = shift->SUPER::start_pipe_command(@_);
+ return $connection;
+}
+
+
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::corostream - Async DBD::Gofer stream transport using Coro and AnyEvent
+
+=head1 SYNOPSIS
+
+ DBI_AUTOPROXY="dbi:Gofer:transport=corostream" perl some-perl-script-using-dbi.pl
+
+or
+
+ $dsn = ...; # the DSN for the driver and database you want to use
+ $dbh = DBI->connect("dbi:Gofer:transport=corostream;dsn=$dsn", ...);
+
+=head1 DESCRIPTION
+
+The I<BIG WIN> from using L<Coro> is that it enables the use of existing
+DBI frameworks like L<DBIx::Class>.
+
+=head1 KNOWN ISSUES AND LIMITATIONS
+
+ - Uses Coro::Select so alters CORE::select globally
+ Parent class probably needs refactoring to enable a more encapsulated approach.
+
+ - Doesn't prevent multiple concurrent requests
+ Probably just needs a per-connection semaphore
+
+ - Coro has many caveats. Caveat emptor.
+
+=head1 STATUS
+
+THIS IS CURRENTLY JUST A PROOF-OF-CONCEPT IMPLEMENTATION FOR EXPERIMENTATION.
+
+Please note that I have no plans to develop this code further myself.
+I'd very much welcome contributions. Interested? Let me know!
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2010, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::stream>
+
+L<DBD::Gofer>
+
+=head1 APPENDIX
+
+Example code:
+
+ #!perl
+
+ use strict;
+ use warnings;
+ use Time::HiRes qw(time);
+
+ BEGIN { $ENV{PERL_ANYEVENT_STRICT} = 1; $ENV{PERL_ANYEVENT_VERBOSE} = 1; }
+
+ use AnyEvent;
+
+ BEGIN { $ENV{DBI_TRACE} = 0; $ENV{DBI_GOFER_TRACE} = 0; $ENV{DBD_GOFER_TRACE} = 0; };
+
+ use DBI;
+
+ $ENV{DBI_AUTOPROXY} = 'dbi:Gofer:transport=corostream';
+
+ my $ticker = AnyEvent->timer( after => 0, interval => 0.1, cb => sub {
+ warn sprintf "-tick- %.2f\n", time
+ } );
+
+ warn "connecting...\n";
+ my $dbh = DBI->connect("dbi:NullP:");
+ warn "...connected\n";
+
+ for (1..3) {
+ warn "entering DBI...\n";
+ $dbh->do("sleep 0.3"); # pseudo-sql understood by the DBD::NullP driver
+ warn "...returned\n";
+ }
+
+ warn "done.";
+
+Example output:
+
+ $ perl corogofer.pl
+ connecting...
+ -tick- 1293631437.14
+ -tick- 1293631437.14
+ ...connected
+ entering DBI...
+ -tick- 1293631437.25
+ -tick- 1293631437.35
+ -tick- 1293631437.45
+ -tick- 1293631437.55
+ ...returned
+ entering DBI...
+ -tick- 1293631437.66
+ -tick- 1293631437.76
+ -tick- 1293631437.86
+ ...returned
+ entering DBI...
+ -tick- 1293631437.96
+ -tick- 1293631438.06
+ -tick- 1293631438.16
+ ...returned
+ done. at corogofer.pl line 39.
+
+You can see that the timer callback is firing while the code 'waits' inside the
+do() method for the response from the database. Normally that would block.
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/null.pm b/lib/DBD/Gofer/Transport/null.pm
new file mode 100644
index 0000000..4b8d86c
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/null.pm
@@ -0,0 +1,111 @@
+package DBD::Gofer::Transport::null;
+
+# $Id: null.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use base qw(DBD::Gofer::Transport::Base);
+
+use DBI::Gofer::Execute;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ pending_response
+ transmit_count
+));
+
+my $executor = DBI::Gofer::Execute->new();
+
+
+sub transmit_request_by_transport {
+ my ($self, $request) = @_;
+ $self->transmit_count( ($self->transmit_count()||0) + 1 ); # just for tests
+
+ my $frozen_request = $self->freeze_request($request);
+
+ # ...
+ # the request is magically transported over to ... ourselves
+ # ...
+
+ my $response = $executor->execute_request( $self->thaw_request($frozen_request, undef, 1) );
+
+ # put response 'on the shelf' ready for receive_response()
+ $self->pending_response( $response );
+
+ return undef;
+}
+
+
+sub receive_response_by_transport {
+ my $self = shift;
+
+ my $response = $self->pending_response;
+
+ my $frozen_response = $self->freeze_response($response, undef, 1);
+
+ # ...
+ # the response is magically transported back to ... ourselves
+ # ...
+
+ return $self->thaw_response($frozen_response);
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::null - DBD::Gofer client transport for testing
+
+=head1 SYNOPSIS
+
+ my $original_dsn = "..."
+ DBI->connect("dbi:Gofer:transport=null;dsn=$original_dsn",...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=null"
+
+=head1 DESCRIPTION
+
+Connect via DBD::Gofer but execute the requests within the same process.
+
+This is a quick and simple way to test applications for compatibility with the
+(few) restrictions that DBD::Gofer imposes.
+
+It also provides a simple, portable way for the DBI test suite to be used to
+test DBD::Gofer on all platforms with no setup.
+
+Also, by measuring the difference in performance between normal connections and
+connections via C<dbi:Gofer:transport=null> the basic cost of using DBD::Gofer
+can be measured. Furthermore, the additional cost of more advanced transports can be
+isolated by comparing their performance with the null transport.
+
+The C<t/85gofer.t> script in the DBI distribution includes a comparative benchmark.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::Base>
+
+L<DBD::Gofer>
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/pipeone.pm b/lib/DBD/Gofer/Transport/pipeone.pm
new file mode 100644
index 0000000..3df2bf3
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/pipeone.pm
@@ -0,0 +1,253 @@
+package DBD::Gofer::Transport::pipeone;
+
+# $Id: pipeone.pm 10087 2007-10-16 12:42:37Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use Carp;
+use Fcntl;
+use IO::Select;
+use IPC::Open3 qw(open3);
+use Symbol qw(gensym);
+
+use base qw(DBD::Gofer::Transport::Base);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ connection_info
+ go_perl
+));
+
+
+sub new {
+ my ($self, $args) = @_;
+ $args->{go_perl} ||= do {
+ ($INC{"blib.pm"}) ? [ $^X, '-Mblib' ] : [ $^X ];
+ };
+ if (not ref $args->{go_perl}) {
+ # user can override the perl to be used, either with an array ref
+ # containing the command name and args to use, or with a string
+ # (ie via the DSN) in which case, to enable args to be passed,
+ # we split on two or more consecutive spaces (otherwise the path
+ # to perl couldn't contain a space itself).
+ $args->{go_perl} = [ split /\s{2,}/, $args->{go_perl} ];
+ }
+ return $self->SUPER::new($args);
+}
+
+
+# nonblock($fh) puts filehandle into nonblocking mode
+sub nonblock {
+ my $fh = shift;
+ my $flags = fcntl($fh, F_GETFL, 0)
+ or croak "Can't get flags for filehandle $fh: $!";
+ fcntl($fh, F_SETFL, $flags | O_NONBLOCK)
+ or croak "Can't make filehandle $fh nonblocking: $!";
+}
+
+
+sub start_pipe_command {
+ my ($self, $cmd) = @_;
+ $cmd = [ $cmd ] unless ref $cmd eq 'ARRAY';
+
+ # if it's important that the subprocess uses the same
+ # (versions of) modules as us then the caller should
+ # set PERL5LIB itself.
+
+ # limit various forms of insanity, for now
+ local $ENV{DBI_TRACE}; # use DBI_GOFER_TRACE instead
+ local $ENV{DBI_AUTOPROXY};
+ local $ENV{DBI_PROFILE};
+
+ my ($wfh, $rfh, $efh) = (gensym, gensym, gensym);
+ my $pid = open3($wfh, $rfh, $efh, @$cmd)
+ or die "error starting @$cmd: $!\n";
+ if ($self->trace) {
+ $self->trace_msg(sprintf("Started pid $pid: @$cmd {fd: w%d r%d e%d, ppid=$$}\n", fileno $wfh, fileno $rfh, fileno $efh),0);
+ }
+ nonblock($rfh);
+ nonblock($efh);
+ my $ios = IO::Select->new($rfh, $efh);
+
+ return {
+ cmd=>$cmd,
+ pid=>$pid,
+ wfh=>$wfh, rfh=>$rfh, efh=>$efh,
+ ios=>$ios,
+ };
+}
+
+
+sub cmd_as_string {
+ my $self = shift;
+ # XXX meant to return a properly shell-escaped string suitable for system
+ # but its only for debugging so that can wait
+ my $connection_info = $self->connection_info;
+ return join " ", map { (m/^[-:\w]*$/) ? $_ : "'$_'" } @{$connection_info->{cmd}};
+}
+
+
+sub transmit_request_by_transport {
+ my ($self, $request) = @_;
+
+ my $frozen_request = $self->freeze_request($request);
+
+ my $cmd = [ @{$self->go_perl}, qw(-MDBI::Gofer::Transport::pipeone -e run_one_stdio)];
+ my $info = $self->start_pipe_command($cmd);
+
+ my $wfh = delete $info->{wfh};
+ # send frozen request
+ local $\;
+ print $wfh $frozen_request
+ or warn "error writing to @$cmd: $!\n";
+ # indicate that there's no more
+ close $wfh
+ or die "error closing pipe to @$cmd: $!\n";
+
+ $self->connection_info( $info );
+ return;
+}
+
+
+sub read_response_from_fh {
+ my ($self, $fh_actions) = @_;
+ my $trace = $self->trace;
+
+ my $info = $self->connection_info || die;
+ my ($ios) = @{$info}{qw(ios)};
+ my $errors = 0;
+ my $complete;
+
+ die "No handles to read response from" unless $ios->count;
+
+ while ($ios->count) {
+ my @readable = $ios->can_read();
+ for my $fh (@readable) {
+ local $_;
+ my $actions = $fh_actions->{$fh} || die "panic: no action for $fh";
+ my $rv = sysread($fh, $_='', 1024*31); # to fit in 32KB slab
+ unless ($rv) { # error (undef) or end of file (0)
+ my $action;
+ unless (defined $rv) { # was an error
+ $self->trace_msg("error on handle $fh: $!\n") if $trace >= 4;
+ $action = $actions->{error} || $actions->{eof};
+ ++$errors;
+ # XXX an error may be a permenent condition of the handle
+ # if so we'll loop here - not good
+ }
+ else {
+ $action = $actions->{eof};
+ $self->trace_msg("eof on handle $fh\n") if $trace >= 4;
+ }
+ if ($action->($fh)) {
+ $self->trace_msg("removing $fh from handle set\n") if $trace >= 4;
+ $ios->remove($fh);
+ }
+ next;
+ }
+ # action returns true if the response is now complete
+ # (we finish all handles
+ $actions->{read}->($fh) && ++$complete;
+ }
+ last if $complete;
+ }
+ return $errors;
+}
+
+
+sub receive_response_by_transport {
+ my $self = shift;
+
+ my $info = $self->connection_info || die;
+ my ($pid, $rfh, $efh, $ios, $cmd) = @{$info}{qw(pid rfh efh ios cmd)};
+
+ my $frozen_response;
+ my $stderr_msg;
+
+ $self->read_response_from_fh( {
+ $efh => {
+ error => sub { warn "error reading response stderr: $!"; 1 },
+ eof => sub { warn "eof on stderr" if 0; 1 },
+ read => sub { $stderr_msg .= $_; 0 },
+ },
+ $rfh => {
+ error => sub { warn "error reading response: $!"; 1 },
+ eof => sub { warn "eof on stdout" if 0; 1 },
+ read => sub { $frozen_response .= $_; 0 },
+ },
+ });
+
+ waitpid $info->{pid}, 0
+ or warn "waitpid: $!"; # XXX do something more useful?
+
+ die ref($self)." command (@$cmd) failed: $stderr_msg"
+ if not $frozen_response; # no output on stdout at all
+
+ # XXX need to be able to detect and deal with corruption
+ my $response = $self->thaw_response($frozen_response);
+
+ if ($stderr_msg) {
+ # add stderr messages as warnings (for PrintWarn)
+ $response->add_err(0, $stderr_msg, undef, $self->trace)
+ # but ignore warning from old version of blib
+ unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/;
+ }
+
+ return $response;
+}
+
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::pipeone - DBD::Gofer client transport for testing
+
+=head1 SYNOPSIS
+
+ $original_dsn = "...";
+ DBI->connect("dbi:Gofer:transport=pipeone;dsn=$original_dsn",...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY="dbi:Gofer:transport=pipeone"
+
+=head1 DESCRIPTION
+
+Connect via DBD::Gofer and execute each request by starting executing a subprocess.
+
+This is, as you might imagine, spectacularly inefficient!
+
+It's only intended for testing. Specifically it demonstrates that the server
+side is completely stateless.
+
+It also provides a base class for the much more useful L<DBD::Gofer::Transport::stream>
+transport.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::Base>
+
+L<DBD::Gofer>
+
+=cut
diff --git a/lib/DBD/Gofer/Transport/stream.pm b/lib/DBD/Gofer/Transport/stream.pm
new file mode 100644
index 0000000..61e211c
--- /dev/null
+++ b/lib/DBD/Gofer/Transport/stream.pm
@@ -0,0 +1,292 @@
+package DBD::Gofer::Transport::stream;
+
+# $Id: stream.pm 14598 2010-12-21 22:53:25Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use Carp;
+
+use base qw(DBD::Gofer::Transport::pipeone);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 14598 $ =~ /(\d+)/o);
+
+__PACKAGE__->mk_accessors(qw(
+ go_persist
+));
+
+my $persist_all = 5;
+my %persist;
+
+
+sub _connection_key {
+ my ($self) = @_;
+ return join "~", $self->go_url||"", @{ $self->go_perl || [] };
+}
+
+
+sub _connection_get {
+ my ($self) = @_;
+
+ my $persist = $self->go_persist; # = 0 can force non-caching
+ $persist = $persist_all if not defined $persist;
+ my $key = ($persist) ? $self->_connection_key : '';
+ if ($persist{$key} && $self->_connection_check($persist{$key})) {
+ $self->trace_msg("reusing persistent connection $key\n",0) if $self->trace >= 1;
+ return $persist{$key};
+ }
+
+ my $connection = $self->_make_connection;
+
+ if ($key) {
+ %persist = () if keys %persist > $persist_all; # XXX quick hack to limit subprocesses
+ $persist{$key} = $connection;
+ }
+
+ return $connection;
+}
+
+
+sub _connection_check {
+ my ($self, $connection) = @_;
+ $connection ||= $self->connection_info;
+ my $pid = $connection->{pid};
+ my $ok = (kill 0, $pid);
+ $self->trace_msg("_connection_check: $ok (pid $$)\n",0) if $self->trace;
+ return $ok;
+}
+
+
+sub _connection_kill {
+ my ($self) = @_;
+ my $connection = $self->connection_info;
+ my ($pid, $wfh, $rfh, $efh) = @{$connection}{qw(pid wfh rfh efh)};
+ $self->trace_msg("_connection_kill: closing write handle\n",0) if $self->trace;
+ # closing the write file handle should be enough, generally
+ close $wfh;
+ # in future we may want to be more aggressive
+ #close $rfh; close $efh; kill 15, $pid
+ # but deleting from the persist cache...
+ delete $persist{ $self->_connection_key };
+ # ... and removing the connection_info should suffice
+ $self->connection_info( undef );
+ return;
+}
+
+
+sub _make_connection {
+ my ($self) = @_;
+
+ my $go_perl = $self->go_perl;
+ my $cmd = [ @$go_perl, qw(-MDBI::Gofer::Transport::stream -e run_stdio_hex)];
+
+ #push @$cmd, "DBI_TRACE=2=/tmp/goferstream.log", "sh", "-c";
+ if (my $url = $self->go_url) {
+ die "Only 'ssh:user\@host' style url supported by this transport"
+ unless $url =~ s/^ssh://;
+ my $ssh = $url;
+ my $setup_env = join "||", map { "source $_ 2>/dev/null" } qw(.bash_profile .bash_login .profile);
+ my $setup = $setup_env.q{; exec "$@"};
+ # don't use $^X on remote system by default as it's possibly wrong
+ $cmd->[0] = 'perl' if "@$go_perl" eq $^X;
+ # -x not only 'Disables X11 forwarding' but also makes connections *much* faster
+ unshift @$cmd, qw(ssh -xq), split(' ', $ssh), qw(bash -c), $setup;
+ }
+
+ $self->trace_msg("new connection: @$cmd\n",0) if $self->trace;
+
+ # XXX add a handshake - some message from DBI::Gofer::Transport::stream that's
+ # sent as soon as it starts that we can wait for to report success - and soak up
+ # and report useful warnings etc from ssh before we get it? Increases latency though.
+ my $connection = $self->start_pipe_command($cmd);
+ return $connection;
+}
+
+
+sub transmit_request_by_transport {
+ my ($self, $request) = @_;
+ my $trace = $self->trace;
+
+ my $connection = $self->connection_info || do {
+ my $con = $self->_connection_get;
+ $self->connection_info( $con );
+ $con;
+ };
+
+ my $encoded_request = unpack("H*", $self->freeze_request($request));
+ $encoded_request .= "\015\012";
+
+ my $wfh = $connection->{wfh};
+ $self->trace_msg(sprintf("transmit_request_by_transport: to fh %s fd%d\n", $wfh, fileno($wfh)),0)
+ if $trace >= 4;
+
+ # send frozen request
+ local $\;
+ $wfh->print($encoded_request) # autoflush enabled
+ or do {
+ my $err = $!;
+ # XXX could/should make new connection and retry
+ $self->_connection_kill;
+ die "Error sending request: $err";
+ };
+ $self->trace_msg("Request sent: $encoded_request\n",0) if $trace >= 4;
+
+ return undef; # indicate no response yet (so caller calls receive_response_by_transport)
+}
+
+
+sub receive_response_by_transport {
+ my $self = shift;
+ my $trace = $self->trace;
+
+ $self->trace_msg("receive_response_by_transport: awaiting response\n",0) if $trace >= 4;
+ my $connection = $self->connection_info || die;
+ my ($pid, $rfh, $efh, $cmd) = @{$connection}{qw(pid rfh efh cmd)};
+
+ my $errno = 0;
+ my $encoded_response;
+ my $stderr_msg;
+
+ $self->read_response_from_fh( {
+ $efh => {
+ error => sub { warn "error reading response stderr: $!"; $errno||=$!; 1 },
+ eof => sub { warn "eof reading efh" if $trace >= 4; 1 },
+ read => sub { $stderr_msg .= $_; 0 },
+ },
+ $rfh => {
+ error => sub { warn "error reading response: $!"; $errno||=$!; 1 },
+ eof => sub { warn "eof reading rfh" if $trace >= 4; 1 },
+ read => sub { $encoded_response .= $_; ($encoded_response=~s/\015\012$//) ? 1 : 0 },
+ },
+ });
+
+ # if we got no output on stdout at all then the command has
+ # probably exited, possibly with an error to stderr.
+ # Turn this situation into a reasonably useful DBI error.
+ if (not $encoded_response) {
+ my @msg;
+ push @msg, "error while reading response: $errno" if $errno;
+ if ($stderr_msg) {
+ chomp $stderr_msg;
+ push @msg, sprintf "error reported by \"%s\" (pid %d%s): %s",
+ $self->cmd_as_string,
+ $pid, ((kill 0, $pid) ? "" : ", exited"),
+ $stderr_msg;
+ }
+ die join(", ", "No response received", @msg)."\n";
+ }
+
+ $self->trace_msg("Response received: $encoded_response\n",0)
+ if $trace >= 4;
+
+ $self->trace_msg("Gofer stream stderr message: $stderr_msg\n",0)
+ if $stderr_msg && $trace;
+
+ my $frozen_response = pack("H*", $encoded_response);
+
+ # XXX need to be able to detect and deal with corruption
+ my $response = $self->thaw_response($frozen_response);
+
+ if ($stderr_msg) {
+ # add stderr messages as warnings (for PrintWarn)
+ $response->add_err(0, $stderr_msg, undef, $trace)
+ # but ignore warning from old version of blib
+ unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/;
+ }
+
+ return $response;
+}
+
+sub transport_timedout {
+ my $self = shift;
+ $self->_connection_kill;
+ return $self->SUPER::transport_timedout(@_);
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBD::Gofer::Transport::stream - DBD::Gofer transport for stdio streaming
+
+=head1 SYNOPSIS
+
+ DBI->connect('dbi:Gofer:transport=stream;url=ssh:username@host.example.com;dsn=dbi:...',...)
+
+or, enable by setting the DBI_AUTOPROXY environment variable:
+
+ export DBI_AUTOPROXY='dbi:Gofer:transport=stream;url=ssh:username@host.example.com'
+
+=head1 DESCRIPTION
+
+Without the C<url=> parameter it launches a subprocess as
+
+ perl -MDBI::Gofer::Transport::stream -e run_stdio_hex
+
+and feeds requests into it and reads responses from it. But that's not very useful.
+
+With a C<url=ssh:username@host.example.com> parameter it uses ssh to launch the subprocess
+on a remote system. That's much more useful!
+
+It gives you secure remote access to DBI databases on any system you can login to.
+Using ssh also gives you optional compression and many other features (see the
+ssh manual for how to configure that and many other options via ~/.ssh/config file).
+
+The actual command invoked is something like:
+
+ ssh -xq ssh:username@host.example.com bash -c $setup $run
+
+where $run is the command shown above, and $command is
+
+ . .bash_profile 2>/dev/null || . .bash_login 2>/dev/null || . .profile 2>/dev/null; exec "$@"
+
+which is trying (in a limited and fairly unportable way) to setup the environment
+(PATH, PERL5LIB etc) as it would be if you had logged in to that system.
+
+The "C<perl>" used in the command will default to the value of $^X when not using ssh.
+On most systems that's the full path to the perl that's currently executing.
+
+
+=head1 PERSISTENCE
+
+Currently gofer stream connections persist (remain connected) after all
+database handles have been disconnected. This makes later connections in the
+same process very fast.
+
+Currently up to 5 different gofer stream connections (based on url) can
+persist. If more than 5 are in the cache when a new connection is made then
+the cache is cleared before adding the new connection. Simple but effective.
+
+=head1 TO DO
+
+Document go_perl attribute
+
+Automatically reconnect (within reason) if there's a transport error.
+
+Decide on default for persistent connection - on or off? limits? ttl?
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=head1 SEE ALSO
+
+L<DBD::Gofer::Transport::Base>
+
+L<DBD::Gofer>
+
+=cut
diff --git a/lib/DBD/NullP.pm b/lib/DBD/NullP.pm
new file mode 100644
index 0000000..b1f8a71
--- /dev/null
+++ b/lib/DBD/NullP.pm
@@ -0,0 +1,166 @@
+{
+ package DBD::NullP;
+
+ require DBI;
+ require Carp;
+
+ @EXPORT = qw(); # Do NOT @EXPORT anything.
+ $VERSION = sprintf("12.%06d", q$Revision: 14714 $ =~ /(\d+)/o);
+
+# $Id: NullP.pm 14714 2011-02-22 17:27:07Z timbo $
+#
+# Copyright (c) 1994-2007 Tim Bunce
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+ $drh = undef; # holds driver handle once initialised
+
+ sub driver{
+ return $drh if $drh;
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'NullP',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD Example Null Perl stub by Tim Bunce',
+ }, [ qw'example implementors private data']);
+ $drh;
+ }
+
+ sub CLONE {
+ undef $drh;
+ }
+}
+
+
+{ package DBD::NullP::dr; # ====== DRIVER ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub connect { # normally overridden, but a handy default
+ my $dbh = shift->SUPER::connect(@_)
+ or return;
+ $dbh->STORE(Active => 1);
+ $dbh;
+ }
+
+
+ sub DESTROY { undef }
+}
+
+
+{ package DBD::NullP::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+ use Carp qw(croak);
+
+ sub prepare {
+ my ($dbh, $statement)= @_;
+
+ my ($outer, $sth) = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ });
+
+ return $outer;
+ }
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ if ($attrib eq 'AutoCommit') {
+ Carp::croak("Can't disable AutoCommit") unless $value;
+ # convert AutoCommit values to magic ones to let DBI
+ # know that the driver has 'handled' the AutoCommit attribute
+ $value = ($value) ? -901 : -900;
+ }
+ return $dbh->SUPER::STORE($attrib, $value);
+ }
+
+ sub ping { 1 }
+
+ sub disconnect {
+ shift->STORE(Active => 0);
+ }
+
+}
+
+
+{ package DBD::NullP::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub bind_param {
+ my ($sth, $param, $value, $attr) = @_;
+ $sth->{ParamValues}{$param} = $value;
+ $sth->{ParamAttr}{$param} = $attr
+ if defined $attr; # attr is sticky if not explicitly set
+ return 1;
+ }
+
+ sub execute {
+ my $sth = shift;
+ $sth->bind_param($_, $_[$_-1]) for (1..@_);
+ if ($sth->{Statement} =~ m/^ \s* SELECT \s+/xmsi) {
+ $sth->STORE(NUM_OF_FIELDS => 1);
+ $sth->{NAME} = [ "fieldname" ];
+ # just for the sake of returning something, we return the params
+ my $params = $sth->{ParamValues} || {};
+ $sth->{dbd_nullp_data} = [ @{$params}{ sort keys %$params } ];
+ $sth->STORE(Active => 1);
+ }
+ # force a sleep - handy for testing
+ elsif ($sth->{Statement} =~ m/^ \s* SLEEP \s+ (\S+) /xmsi) {
+ my $secs = $1;
+ if (eval { require Time::HiRes; defined &Time::HiRes::sleep }) {
+ Time::HiRes::sleep($secs);
+ }
+ else {
+ sleep $secs;
+ }
+ }
+ # force an error - handy for testing
+ elsif ($sth->{Statement} =~ m/^ \s* ERROR \s+ (\d+) \s* (.*) /xmsi) {
+ return $sth->set_err($1, $2);
+ }
+ # anything else is silently ignored, sucessfully
+ 1;
+ }
+
+ sub fetchrow_arrayref {
+ my $sth = shift;
+ my $data = $sth->{dbd_nullp_data};
+ if (!$data || !@$data) {
+ $sth->finish; # no more data so finish
+ return undef;
+ }
+ return $sth->_set_fbav(shift @$data);
+ }
+ *fetch = \&fetchrow_arrayref; # alias
+
+ sub FETCH {
+ my ($sth, $attrib) = @_;
+ # would normally validate and only fetch known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::STORE($attrib, $value);
+ }
+
+}
+
+1;
diff --git a/lib/DBD/Proxy.pm b/lib/DBD/Proxy.pm
new file mode 100644
index 0000000..6c9e14d
--- /dev/null
+++ b/lib/DBD/Proxy.pm
@@ -0,0 +1,997 @@
+# -*- perl -*-
+#
+#
+# DBD::Proxy - DBI Proxy driver
+#
+#
+# Copyright (c) 1997,1998 Jochen Wiedmann
+#
+# The DBD::Proxy module is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself. In particular permission
+# is granted to Tim Bunce for distributing this as a part of the DBI.
+#
+#
+# Author: Jochen Wiedmann
+# Am Eisteich 9
+# 72555 Metzingen
+# Germany
+#
+# Email: joe@ispsoft.de
+# Phone: +49 7123 14881
+#
+
+use strict;
+use Carp;
+
+require DBI;
+DBI->require_version(1.0201);
+
+use RPC::PlClient 0.2000; # XXX change to 0.2017 once it's released
+
+{ package DBD::Proxy::RPC::PlClient;
+ @DBD::Proxy::RPC::PlClient::ISA = qw(RPC::PlClient);
+ sub Call {
+ my $self = shift;
+ if ($self->{debug}) {
+ my ($rpcmeth, $obj, $method, @args) = @_;
+ local $^W; # silence undefs
+ Carp::carp("Server $rpcmeth $method(@args)");
+ }
+ return $self->SUPER::Call(@_);
+ }
+}
+
+
+package DBD::Proxy;
+
+use vars qw($VERSION $drh %ATTR);
+
+$VERSION = "0.2004";
+
+$drh = undef; # holds driver handle once initialised
+
+%ATTR = ( # common to db & st, see also %ATTR in DBD::Proxy::db & ::st
+ 'Warn' => 'local',
+ 'Active' => 'local',
+ 'Kids' => 'local',
+ 'CachedKids' => 'local',
+ 'PrintError' => 'local',
+ 'RaiseError' => 'local',
+ 'HandleError' => 'local',
+ 'TraceLevel' => 'cached',
+ 'CompatMode' => 'local',
+);
+
+sub driver ($$) {
+ if (!$drh) {
+ my($class, $attr) = @_;
+
+ $class .= "::dr";
+
+ $drh = DBI::_new_drh($class, {
+ 'Name' => 'Proxy',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD::Proxy by Jochen Wiedmann',
+ });
+ $drh->STORE(CompatMode => 1); # disable DBI dispatcher attribute cache (for FETCH)
+ }
+ $drh;
+}
+
+sub CLONE {
+ undef $drh;
+}
+
+sub proxy_set_err {
+ my ($h,$errmsg) = @_;
+ my ($err, $state) = ($errmsg =~ s/ \[err=(.*?),state=(.*?)\]//)
+ ? ($1, $2) : (1, ' ' x 5);
+ return $h->set_err($err, $errmsg, $state);
+}
+
+package DBD::Proxy::dr; # ====== DRIVER ======
+
+$DBD::Proxy::dr::imp_data_size = 0;
+
+sub connect ($$;$$) {
+ my($drh, $dsn, $user, $auth, $attr)= @_;
+ my($dsnOrig) = $dsn;
+
+ my %attr = %$attr;
+ my ($var, $val);
+ while (length($dsn)) {
+ if ($dsn =~ /^dsn=(.*)/) {
+ $attr{'dsn'} = $1;
+ last;
+ }
+ if ($dsn =~ /^(.*?);(.*)/) {
+ $var = $1;
+ $dsn = $2;
+ } else {
+ $var = $dsn;
+ $dsn = '';
+ }
+ if ($var =~ /^(.*?)=(.*)/) {
+ $var = $1;
+ $val = $2;
+ $attr{$var} = $val;
+ }
+ }
+
+ my $err = '';
+ if (!defined($attr{'hostname'})) { $err .= " Missing hostname."; }
+ if (!defined($attr{'port'})) { $err .= " Missing port."; }
+ if (!defined($attr{'dsn'})) { $err .= " Missing remote dsn."; }
+
+ # Create a cipher object, if requested
+ my $cipherRef = undef;
+ if ($attr{'cipher'}) {
+ $cipherRef = eval { $attr{'cipher'}->new(pack('H*',
+ $attr{'key'})) };
+ if ($@) { $err .= " Cannot create cipher object: $@."; }
+ }
+ my $userCipherRef = undef;
+ if ($attr{'userkey'}) {
+ my $cipher = $attr{'usercipher'} || $attr{'cipher'};
+ $userCipherRef = eval { $cipher->new(pack('H*', $attr{'userkey'})) };
+ if ($@) { $err .= " Cannot create usercipher object: $@."; }
+ }
+
+ return DBD::Proxy::proxy_set_err($drh, $err) if $err; # Returns undef
+
+ my %client_opts = (
+ 'peeraddr' => $attr{'hostname'},
+ 'peerport' => $attr{'port'},
+ 'socket_proto' => 'tcp',
+ 'application' => $attr{dsn},
+ 'user' => $user || '',
+ 'password' => $auth || '',
+ 'version' => $DBD::Proxy::VERSION,
+ 'cipher' => $cipherRef,
+ 'debug' => $attr{debug} || 0,
+ 'timeout' => $attr{timeout} || undef,
+ 'logfile' => $attr{logfile} || undef
+ );
+ # Options starting with 'proxy_rpc_' are forwarded to the RPC layer after
+ # stripping the prefix.
+ while (my($var,$val) = each %attr) {
+ if ($var =~ s/^proxy_rpc_//) {
+ $client_opts{$var} = $val;
+ }
+ }
+ # Create an RPC::PlClient object.
+ my($client, $msg) = eval { DBD::Proxy::RPC::PlClient->new(%client_opts) };
+
+ return DBD::Proxy::proxy_set_err($drh, "Cannot log in to DBI::ProxyServer: $@")
+ if $@; # Returns undef
+ return DBD::Proxy::proxy_set_err($drh, "Constructor didn't return a handle: $msg")
+ unless ($msg =~ /^((?:\w+|\:\:)+)=(\w+)/); # Returns undef
+
+ $msg = RPC::PlClient::Object->new($1, $client, $msg);
+
+ my $max_proto_ver;
+ my ($server_ver_str) = eval { $client->Call('Version') };
+ if ( $@ ) {
+ # Server denies call, assume legacy protocol.
+ $max_proto_ver = 1;
+ } else {
+ # Parse proxy server version.
+ my ($server_ver_num) = $server_ver_str =~ /^DBI::ProxyServer\s+([\d\.]+)/;
+ $max_proto_ver = $server_ver_num >= 0.3 ? 2 : 1;
+ }
+ my $req_proto_ver;
+ if ( exists $attr{proxy_lazy_prepare} ) {
+ $req_proto_ver = ($attr{proxy_lazy_prepare} == 0) ? 2 : 1;
+ return DBD::Proxy::proxy_set_err($drh,
+ "DBI::ProxyServer does not support synchronous statement preparation.")
+ if $max_proto_ver < $req_proto_ver;
+ }
+
+ # Switch to user specific encryption mode, if desired
+ if ($userCipherRef) {
+ $client->{'cipher'} = $userCipherRef;
+ }
+
+ # create a 'blank' dbh
+ my $this = DBI::_new_dbh($drh, {
+ 'Name' => $dsnOrig,
+ 'proxy_dbh' => $msg,
+ 'proxy_client' => $client,
+ 'RowCacheSize' => $attr{'RowCacheSize'} || 20,
+ 'proxy_proto_ver' => $req_proto_ver || 1
+ });
+
+ foreach $var (keys %attr) {
+ if ($var =~ /proxy_/) {
+ $this->{$var} = $attr{$var};
+ }
+ }
+ $this->SUPER::STORE('Active' => 1);
+
+ $this;
+}
+
+
+sub DESTROY { undef }
+
+
+package DBD::Proxy::db; # ====== DATABASE ======
+
+$DBD::Proxy::db::imp_data_size = 0;
+
+# XXX probably many more methods need to be added here
+# in order to trigger our AUTOLOAD to redirect them to the server.
+# (Unless the sub is declared it's bypassed by perl method lookup.)
+# See notes in ToDo about method metadata
+# The question is whether to add all the methods in %DBI::DBI_methods
+# to the corresponding classes (::db, ::st etc)
+# Also need to consider methods that, if proxied, would change the server state
+# in a way that might not be visible on the client, ie begin_work -> AutoCommit.
+
+sub commit;
+sub connected;
+sub rollback;
+sub ping;
+
+
+use vars qw(%ATTR $AUTOLOAD);
+
+# inherited: STORE / FETCH against this class.
+# local: STORE / FETCH against parent class.
+# cached: STORE to remote and local objects, FETCH from local.
+# remote: STORE / FETCH against remote object only (default).
+#
+# Note: Attribute names starting with 'proxy_' always treated as 'inherited'.
+#
+%ATTR = ( # see also %ATTR in DBD::Proxy::st
+ %DBD::Proxy::ATTR,
+ RowCacheSize => 'inherited',
+ #AutoCommit => 'cached',
+ 'FetchHashKeyName' => 'cached',
+ Statement => 'local',
+ Driver => 'local',
+ dbi_connect_closure => 'local',
+ Username => 'local',
+);
+
+sub AUTOLOAD {
+ my $method = $AUTOLOAD;
+ $method =~ s/(.*::(.*)):://;
+ my $class = $1;
+ my $type = $2;
+ #warn "AUTOLOAD of $method (class=$class, type=$type)";
+ my %expand = (
+ 'method' => $method,
+ 'class' => $class,
+ 'type' => $type,
+ 'call' => "$method(\@_)",
+ # XXX was trying to be smart but was tripping up over the DBI's own
+ # smartness. Disabled, but left here in case there are issues.
+ # 'call' => (UNIVERSAL::can("DBI::_::$type", $method)) ? "$method(\@_)" : "func(\@_, '$method')",
+ );
+
+ my $method_code = q{
+ package ~class~;
+ sub ~method~ {
+ my $h = shift;
+ local $@;
+ my @result = wantarray
+ ? eval { $h->{'proxy_~type~h'}->~call~ }
+ : eval { scalar $h->{'proxy_~type~h'}->~call~ };
+ return DBD::Proxy::proxy_set_err($h, $@) if $@;
+ return wantarray ? @result : $result[0];
+ }
+ };
+ $method_code =~ s/\~(\w+)\~/$expand{$1}/eg;
+ local $SIG{__DIE__} = 'DEFAULT';
+ my $err = do { local $@; eval $method_code.2; $@ };
+ die $err if $err;
+ goto &$AUTOLOAD;
+}
+
+sub DESTROY {
+ my $dbh = shift;
+ local $@ if $@; # protect $@
+ $dbh->disconnect if $dbh->SUPER::FETCH('Active');
+}
+
+sub disconnect ($) {
+ my ($dbh) = @_;
+
+ # Sadly the Proxy too-often disagrees with the backend database
+ # on the subject of 'Active'. In the short term, I'd like the
+ # Proxy to ease up and let me decide when it's proper to go over
+ # the wire. This ultimately applies to finish() as well.
+ #return unless $dbh->SUPER::FETCH('Active');
+
+ # Drop database connection at remote end
+ my $rdbh = $dbh->{'proxy_dbh'};
+ if ( $rdbh ) {
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ eval { $rdbh->disconnect() } ;
+ DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ }
+
+ # Close TCP connect to remote
+ # XXX possibly best left till DESTROY? Add a config attribute to choose?
+ #$dbh->{proxy_client}->Disconnect(); # Disconnect method requires newer PlRPC module
+ $dbh->{proxy_client}->{socket} = undef; # hack
+
+ $dbh->SUPER::STORE('Active' => 0);
+ 1;
+}
+
+
+sub STORE ($$$) {
+ my($dbh, $attr, $val) = @_;
+ my $type = $ATTR{$attr} || 'remote';
+
+ if ($attr eq 'TraceLevel') {
+ warn("TraceLevel $val");
+ my $pc = $dbh->{proxy_client} || die;
+ $pc->{logfile} ||= 1; # XXX hack
+ $pc->{debug} = ($val && $val >= 4);
+ $pc->Debug("$pc debug enabled") if $pc->{debug};
+ }
+
+ if ($attr =~ /^proxy_/ || $type eq 'inherited') {
+ $dbh->{$attr} = $val;
+ return 1;
+ }
+
+ if ($type eq 'remote' || $type eq 'cached') {
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->STORE($attr => $val) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@; # returns undef
+ $dbh->SUPER::STORE($attr => $val) if $type eq 'cached';
+ return $result;
+ }
+ return $dbh->SUPER::STORE($attr => $val);
+}
+
+sub FETCH ($$) {
+ my($dbh, $attr) = @_;
+ # we only get here for cached attribute values if the handle is in CompatMode
+ # otherwise the DBI dispatcher handles the FETCH itself from the attribute cache.
+ my $type = $ATTR{$attr} || 'remote';
+
+ if ($attr =~ /^proxy_/ || $type eq 'inherited' || $type eq 'cached') {
+ return $dbh->{$attr};
+ }
+
+ return $dbh->SUPER::FETCH($attr) unless $type eq 'remote';
+
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->FETCH($attr) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ return $result;
+}
+
+sub prepare ($$;$) {
+ my($dbh, $stmt, $attr) = @_;
+ my $sth = DBI::_new_sth($dbh, {
+ 'Statement' => $stmt,
+ 'proxy_attr' => $attr,
+ 'proxy_cache_only' => 0,
+ 'proxy_params' => [],
+ }
+ );
+ my $proto_ver = $dbh->{'proxy_proto_ver'};
+ if ( $proto_ver > 1 ) {
+ $sth->{'proxy_attr_cache'} = {cache_filled => 0};
+ my $rdbh = $dbh->{'proxy_dbh'};
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $rsth = eval { $rdbh->prepare($sth->{'Statement'}, $sth->{'proxy_attr'}, undef, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return DBD::Proxy::proxy_set_err($sth, "Constructor didn't return a handle: $rsth")
+ unless ($rsth =~ /^((?:\w+|\:\:)+)=(\w+)/);
+
+ my $client = $dbh->{'proxy_client'};
+ $rsth = RPC::PlClient::Object->new($1, $client, $rsth);
+
+ $sth->{'proxy_sth'} = $rsth;
+ # If statement is a positioned update we do not want any readahead.
+ $sth->{'RowCacheSize'} = 1 if $stmt =~ /\bfor\s+update\b/i;
+ # Since resources are used by prepared remote handle, mark us active.
+ $sth->SUPER::STORE(Active => 1);
+ }
+ $sth;
+}
+
+sub quote {
+ my $dbh = shift;
+ my $proxy_quote = $dbh->{proxy_quote} || 'remote';
+
+ return $dbh->SUPER::quote(@_)
+ if $proxy_quote eq 'local' && @_ == 1;
+
+ # For the common case of only a single argument
+ # (no $data_type) we could learn and cache the behaviour.
+ # Or we could probe the driver with a few test cases.
+ # Or we could add a way to ask the DBI::ProxyServer
+ # if $dbh->can('quote') == \&DBI::_::db::quote.
+ # Tim
+ #
+ # Sounds all *very* smart to me. I'd rather suggest to
+ # implement some of the typical quote possibilities
+ # and let the user set
+ # $dbh->{'proxy_quote'} = 'backslash_escaped';
+ # for example.
+ # Jochen
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->quote(@_) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ return $result;
+}
+
+sub table_info {
+ my $dbh = shift;
+ my $rdbh = $dbh->{'proxy_dbh'};
+ #warn "table_info(@_)";
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my($numFields, $names, $types, @rows) = eval { $rdbh->table_info(@_) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ my ($sth, $inner) = DBI::_new_sth($dbh, {
+ 'Statement' => "SHOW TABLES",
+ 'proxy_params' => [],
+ 'proxy_data' => \@rows,
+ 'proxy_attr_cache' => {
+ 'NUM_OF_PARAMS' => 0,
+ 'NUM_OF_FIELDS' => $numFields,
+ 'NAME' => $names,
+ 'TYPE' => $types,
+ 'cache_filled' => 1
+ },
+ 'proxy_cache_only' => 1,
+ });
+ $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields);
+ $inner->{NAME} = $names;
+ $inner->{TYPE} = $types;
+ $sth->SUPER::STORE('Active' => 1); # already execute()'d
+ $sth->{'proxy_rows'} = @rows;
+ return $sth;
+}
+
+sub tables {
+ my $dbh = shift;
+ #warn "tables(@_)";
+ return $dbh->SUPER::tables(@_);
+}
+
+
+sub type_info_all {
+ my $dbh = shift;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $dbh->{'proxy_dbh'}->type_info_all(@_) };
+ return DBD::Proxy::proxy_set_err($dbh, $@) if $@;
+ return $result;
+}
+
+
+package DBD::Proxy::st; # ====== STATEMENT ======
+
+$DBD::Proxy::st::imp_data_size = 0;
+
+use vars qw(%ATTR);
+
+# inherited: STORE to current object. FETCH from current if exists, else call up
+# to the (proxy) database object.
+# local: STORE / FETCH against parent class.
+# cache_only: STORE noop (read-only). FETCH from private_* if exists, else call
+# remote and cache the result.
+# remote: STORE / FETCH against remote object only (default).
+#
+# Note: Attribute names starting with 'proxy_' always treated as 'inherited'.
+#
+%ATTR = ( # see also %ATTR in DBD::Proxy::db
+ %DBD::Proxy::ATTR,
+ 'Database' => 'local',
+ 'RowsInCache' => 'local',
+ 'RowCacheSize' => 'inherited',
+ 'NULLABLE' => 'cache_only',
+ 'NAME' => 'cache_only',
+ 'TYPE' => 'cache_only',
+ 'PRECISION' => 'cache_only',
+ 'SCALE' => 'cache_only',
+ 'NUM_OF_FIELDS' => 'cache_only',
+ 'NUM_OF_PARAMS' => 'cache_only'
+);
+
+*AUTOLOAD = \&DBD::Proxy::db::AUTOLOAD;
+
+sub execute ($@) {
+ my $sth = shift;
+ my $params = @_ ? \@_ : $sth->{'proxy_params'};
+
+ # new execute, so delete any cached rows from previous execute
+ undef $sth->{'proxy_data'};
+ undef $sth->{'proxy_rows'};
+
+ my $rsth = $sth->{proxy_sth};
+ my $dbh = $sth->FETCH('Database');
+ my $proto_ver = $dbh->{proxy_proto_ver};
+
+ my ($numRows, @outData);
+
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ if ( $proto_ver > 1 ) {
+ ($numRows, @outData) = eval { $rsth->execute($params, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+
+ # Attributes passed back only on the first execute() of a statement.
+ unless ($sth->{proxy_attr_cache}->{cache_filled}) {
+ my ($numFields, $numParams, $names, $types) = splice(@outData, 0, 4);
+ $sth->{'proxy_attr_cache'} = {
+ 'NUM_OF_FIELDS' => $numFields,
+ 'NUM_OF_PARAMS' => $numParams,
+ 'NAME' => $names,
+ 'cache_filled' => 1
+ };
+ $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields);
+ $sth->SUPER::STORE('NUM_OF_PARAMS' => $numParams);
+ }
+
+ }
+ else {
+ if ($rsth) {
+ ($numRows, @outData) = eval { $rsth->execute($params, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+
+ }
+ else {
+ my $rdbh = $dbh->{'proxy_dbh'};
+
+ # Legacy prepare is actually prepare + first execute on the server.
+ ($rsth, @outData) =
+ eval { $rdbh->prepare($sth->{'Statement'},
+ $sth->{'proxy_attr'}, $params, $proto_ver) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return DBD::Proxy::proxy_set_err($sth, "Constructor didn't return a handle: $rsth")
+ unless ($rsth =~ /^((?:\w+|\:\:)+)=(\w+)/);
+
+ my $client = $dbh->{'proxy_client'};
+ $rsth = RPC::PlClient::Object->new($1, $client, $rsth);
+
+ my ($numFields, $numParams, $names, $types) = splice(@outData, 0, 4);
+ $sth->{'proxy_sth'} = $rsth;
+ $sth->{'proxy_attr_cache'} = {
+ 'NUM_OF_FIELDS' => $numFields,
+ 'NUM_OF_PARAMS' => $numParams,
+ 'NAME' => $names
+ };
+ $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields);
+ $sth->SUPER::STORE('NUM_OF_PARAMS' => $numParams);
+ $numRows = shift @outData;
+ }
+ }
+ # Always condition active flag.
+ $sth->SUPER::STORE('Active' => 1) if $sth->FETCH('NUM_OF_FIELDS'); # is SELECT
+ $sth->{'proxy_rows'} = $numRows;
+ # Any remaining items are output params.
+ if (@outData) {
+ foreach my $p (@$params) {
+ if (ref($p->[0])) {
+ my $ref = shift @outData;
+ ${$p->[0]} = $$ref;
+ }
+ }
+ }
+
+ $sth->{'proxy_rows'} || '0E0';
+}
+
+sub fetch ($) {
+ my $sth = shift;
+
+ my $data = $sth->{'proxy_data'};
+
+ $sth->{'proxy_rows'} = 0 unless defined $sth->{'proxy_rows'};
+
+ if(!$data || !@$data) {
+ return undef unless $sth->SUPER::FETCH('Active');
+
+ my $rsth = $sth->{'proxy_sth'};
+ if (!$rsth) {
+ die "Attempt to fetch row without execute";
+ }
+ my $num_rows = $sth->FETCH('RowCacheSize') || 20;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my @rows = eval { $rsth->fetch($num_rows) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ unless (@rows == $num_rows) {
+ undef $sth->{'proxy_data'};
+ # server side has already called finish
+ $sth->SUPER::STORE(Active => 0);
+ }
+ return undef unless @rows;
+ $sth->{'proxy_data'} = $data = [@rows];
+ }
+ my $row = shift @$data;
+
+ $sth->SUPER::STORE(Active => 0) if ( $sth->{proxy_cache_only} and !@$data );
+ $sth->{'proxy_rows'}++;
+ return $sth->_set_fbav($row);
+}
+*fetchrow_arrayref = \&fetch;
+
+sub rows ($) {
+ my $rows = shift->{'proxy_rows'};
+ return (defined $rows) ? $rows : -1;
+}
+
+sub finish ($) {
+ my($sth) = @_;
+ return 1 unless $sth->SUPER::FETCH('Active');
+ my $rsth = $sth->{'proxy_sth'};
+ $sth->SUPER::STORE('Active' => 0);
+ return 0 unless $rsth; # Something's out of sync
+ my $no_finish = exists($sth->{'proxy_no_finish'})
+ ? $sth->{'proxy_no_finish'}
+ : $sth->FETCH('Database')->{'proxy_no_finish'};
+ unless ($no_finish) {
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $rsth->finish() };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return $result;
+ }
+ 1;
+}
+
+sub STORE ($$$) {
+ my($sth, $attr, $val) = @_;
+ my $type = $ATTR{$attr} || 'remote';
+
+ if ($attr =~ /^proxy_/ || $type eq 'inherited') {
+ $sth->{$attr} = $val;
+ return 1;
+ }
+
+ if ($type eq 'cache_only') {
+ return 0;
+ }
+
+ if ($type eq 'remote' || $type eq 'cached') {
+ my $rsth = $sth->{'proxy_sth'} or return undef;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $rsth->STORE($attr => $val) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if ($@);
+ return $result if $type eq 'remote'; # else fall through to cache locally
+ }
+ return $sth->SUPER::STORE($attr => $val);
+}
+
+sub FETCH ($$) {
+ my($sth, $attr) = @_;
+
+ if ($attr =~ /^proxy_/) {
+ return $sth->{$attr};
+ }
+
+ my $type = $ATTR{$attr} || 'remote';
+ if ($type eq 'inherited') {
+ if (exists($sth->{$attr})) {
+ return $sth->{$attr};
+ }
+ return $sth->FETCH('Database')->{$attr};
+ }
+
+ if ($type eq 'cache_only' &&
+ exists($sth->{'proxy_attr_cache'}->{$attr})) {
+ return $sth->{'proxy_attr_cache'}->{$attr};
+ }
+
+ if ($type ne 'local') {
+ my $rsth = $sth->{'proxy_sth'} or return undef;
+ local $SIG{__DIE__} = 'DEFAULT';
+ local $@;
+ my $result = eval { $rsth->FETCH($attr) };
+ return DBD::Proxy::proxy_set_err($sth, $@) if $@;
+ return $result;
+ }
+ elsif ($attr eq 'RowsInCache') {
+ my $data = $sth->{'proxy_data'};
+ $data ? @$data : 0;
+ }
+ else {
+ $sth->SUPER::FETCH($attr);
+ }
+}
+
+sub bind_param ($$$@) {
+ my $sth = shift; my $param = shift;
+ $sth->{'proxy_params'}->[$param-1] = [@_];
+}
+*bind_param_inout = \&bind_param;
+
+sub DESTROY {
+ my $sth = shift;
+ $sth->finish if $sth->SUPER::FETCH('Active');
+}
+
+
+1;
+
+
+__END__
+
+=head1 NAME
+
+DBD::Proxy - A proxy driver for the DBI
+
+=head1 SYNOPSIS
+
+ use DBI;
+
+ $dbh = DBI->connect("dbi:Proxy:hostname=$host;port=$port;dsn=$db",
+ $user, $passwd);
+
+ # See the DBI module documentation for full details
+
+=head1 DESCRIPTION
+
+DBD::Proxy is a Perl module for connecting to a database via a remote
+DBI driver. See L<DBD::Gofer> for an alternative with different trade-offs.
+
+This is of course not needed for DBI drivers which already
+support connecting to a remote database, but there are engines which
+don't offer network connectivity.
+
+Another application is offering database access through a firewall, as
+the driver offers query based restrictions. For example you can
+restrict queries to exactly those that are used in a given CGI
+application.
+
+Speaking of CGI, another application is (or rather, will be) to reduce
+the database connect/disconnect overhead from CGI scripts by using
+proxying the connect_cached method. The proxy server will hold the
+database connections open in a cache. The CGI script then trades the
+database connect/disconnect overhead for the DBD::Proxy
+connect/disconnect overhead which is typically much less.
+I<Note that the connect_cached method is new and still experimental.>
+
+
+=head1 CONNECTING TO THE DATABASE
+
+Before connecting to a remote database, you must ensure, that a Proxy
+server is running on the remote machine. There's no default port, so
+you have to ask your system administrator for the port number. See
+L<DBI::ProxyServer> for details.
+
+Say, your Proxy server is running on machine "alpha", port 3334, and
+you'd like to connect to an ODBC database called "mydb" as user "joe"
+with password "hello". When using DBD::ODBC directly, you'd do a
+
+ $dbh = DBI->connect("DBI:ODBC:mydb", "joe", "hello");
+
+With DBD::Proxy this becomes
+
+ $dsn = "DBI:Proxy:hostname=alpha;port=3334;dsn=DBI:ODBC:mydb";
+ $dbh = DBI->connect($dsn, "joe", "hello");
+
+You see, this is mainly the same. The DBD::Proxy module will create a
+connection to the Proxy server on "alpha" which in turn will connect
+to the ODBC database.
+
+Refer to the L<DBI> documentation on the C<connect> method for a way
+to automatically use DBD::Proxy without having to change your code.
+
+DBD::Proxy's DSN string has the format
+
+ $dsn = "DBI:Proxy:key1=val1; ... ;keyN=valN;dsn=valDSN";
+
+In other words, it is a collection of key/value pairs. The following
+keys are recognized:
+
+=over 4
+
+=item hostname
+
+=item port
+
+Hostname and port of the Proxy server; these keys must be present,
+no defaults. Example:
+
+ hostname=alpha;port=3334
+
+=item dsn
+
+The value of this attribute will be used as a dsn name by the Proxy
+server. Thus it must have the format C<DBI:driver:...>, in particular
+it will contain colons. The I<dsn> value may contain semicolons, hence
+this key *must* be the last and it's value will be the complete
+remaining part of the dsn. Example:
+
+ dsn=DBI:ODBC:mydb
+
+=item cipher
+
+=item key
+
+=item usercipher
+
+=item userkey
+
+By using these fields you can enable encryption. If you set,
+for example,
+
+ cipher=$class;key=$key
+
+(note the semicolon) then DBD::Proxy will create a new cipher object
+by executing
+
+ $cipherRef = $class->new(pack("H*", $key));
+
+and pass this object to the RPC::PlClient module when creating a
+client. See L<RPC::PlClient>. Example:
+
+ cipher=IDEA;key=97cd2375efa329aceef2098babdc9721
+
+The usercipher/userkey attributes allow you to use two phase encryption:
+The cipher/key encryption will be used in the login and authorisation
+phase. Once the client is authorised, he will change to usercipher/userkey
+encryption. Thus the cipher/key pair is a B<host> based secret, typically
+less secure than the usercipher/userkey secret and readable by anyone.
+The usercipher/userkey secret is B<your> private secret.
+
+Of course encryption requires an appropriately configured server. See
+<DBD::ProxyServer/CONFIGURATION FILE>.
+
+=item debug
+
+Turn on debugging mode
+
+=item stderr
+
+This attribute will set the corresponding attribute of the RPC::PlClient
+object, thus logging will not use syslog(), but redirected to stderr.
+This is the default under Windows.
+
+ stderr=1
+
+=item logfile
+
+Similar to the stderr attribute, but output will be redirected to the
+given file.
+
+ logfile=/dev/null
+
+=item RowCacheSize
+
+The DBD::Proxy driver supports this attribute (which is DBI standard,
+as of DBI 1.02). It's used to reduce network round-trips by fetching
+multiple rows in one go. The current default value is 20, but this may
+change.
+
+
+=item proxy_no_finish
+
+This attribute can be used to reduce network traffic: If the
+application is calling $sth->finish() then the proxy tells the server
+to finish the remote statement handle. Of course this slows down things
+quite a lot, but is perfectly good for reducing memory usage with
+persistent connections.
+
+However, if you set the I<proxy_no_finish> attribute to a TRUE value,
+either in the database handle or in the statement handle, then finish()
+calls will be supressed. This is what you want, for example, in small
+and fast CGI applications.
+
+=item proxy_quote
+
+This attribute can be used to reduce network traffic: By default calls
+to $dbh->quote() are passed to the remote driver. Of course this slows
+down things quite a lot, but is the safest default behaviour.
+
+However, if you set the I<proxy_quote> attribute to the value 'C<local>'
+either in the database handle or in the statement handle, and the call
+to quote has only one parameter, then the local default DBI quote
+method will be used (which will be faster but may be wrong).
+
+=back
+
+=head1 KNOWN ISSUES
+
+=head2 Unproxied method calls
+
+If a method isn't being proxied, try declaring a stub sub in the appropriate
+package (DBD::Proxy::db for a dbh method, and DBD::Proxy::st for an sth method).
+For example:
+
+ sub DBD::Proxy::db::selectall_arrayref;
+
+That will enable selectall_arrayref to be proxied.
+
+Currently many methods aren't explicitly proxied and so you get the DBI's
+default methods executed on the client.
+
+Some of those methods, like selectall_arrayref, may then call other methods
+that are proxied (selectall_arrayref calls fetchall_arrayref which calls fetch
+which is proxied). So things may appear to work but operate more slowly than
+the could.
+
+This may all change in a later version.
+
+=head2 Complex handle attributes
+
+Sometimes handles are having complex attributes like hash refs or
+array refs and not simple strings or integers. For example, with
+DBD::CSV, you would like to write something like
+
+ $dbh->{"csv_tables"}->{"passwd"} =
+ { "sep_char" => ":", "eol" => "\n";
+
+The above example would advice the CSV driver to assume the file
+"passwd" to be in the format of the /etc/passwd file: Colons as
+separators and a line feed without carriage return as line
+terminator.
+
+Surprisingly this example doesn't work with the proxy driver. To understand
+the reasons, you should consider the following: The Perl compiler is
+executing the above example in two steps:
+
+=over
+
+=item 1
+
+The first step is fetching the value of the key "csv_tables" in the
+handle $dbh. The value returned is complex, a hash ref.
+
+=item 2
+
+The second step is storing some value (the right hand side of the
+assignment) as the key "passwd" in the hash ref from step 1.
+
+=back
+
+This becomes a little bit clearer, if we rewrite the above code:
+
+ $tables = $dbh->{"csv_tables"};
+ $tables->{"passwd"} = { "sep_char" => ":", "eol" => "\n";
+
+While the examples work fine without the proxy, the fail due to a
+subtle difference in step 1: By DBI magic, the hash ref
+$dbh->{'csv_tables'} is returned from the server to the client.
+The client creates a local copy. This local copy is the result of
+step 1. In other words, step 2 modifies a local copy of the hash ref,
+but not the server's hash ref.
+
+The workaround is storing the modified local copy back to the server:
+
+ $tables = $dbh->{"csv_tables"};
+ $tables->{"passwd"} = { "sep_char" => ":", "eol" => "\n";
+ $dbh->{"csv_tables"} = $tables;
+
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is Copyright (c) 1997, 1998
+
+ Jochen Wiedmann
+ Am Eisteich 9
+ 72555 Metzingen
+ Germany
+
+ Email: joe@ispsoft.de
+ Phone: +49 7123 14887
+
+The DBD::Proxy module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. In particular permission
+is granted to Tim Bunce for distributing this as a part of the DBI.
+
+
+=head1 SEE ALSO
+
+L<DBI>, L<RPC::PlClient>, L<Storable>
+
+=cut
diff --git a/lib/DBD/Sponge.pm b/lib/DBD/Sponge.pm
new file mode 100644
index 0000000..2413bc0
--- /dev/null
+++ b/lib/DBD/Sponge.pm
@@ -0,0 +1,305 @@
+{
+ package DBD::Sponge;
+
+ require DBI;
+ require Carp;
+
+ our @EXPORT = qw(); # Do NOT @EXPORT anything.
+ our $VERSION = sprintf("12.%06d", q$Revision: 10002 $ =~ /(\d+)/o);
+
+
+# $Id: Sponge.pm 10002 2007-09-26 21:03:25Z timbo $
+#
+# Copyright (c) 1994-2003 Tim Bunce Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+ $drh = undef; # holds driver handle once initialised
+ my $methods_already_installed;
+
+ sub driver{
+ return $drh if $drh;
+
+ DBD::Sponge::db->install_method("sponge_test_installed_method")
+ unless $methods_already_installed++;
+
+ my($class, $attr) = @_;
+ $class .= "::dr";
+ ($drh) = DBI::_new_drh($class, {
+ 'Name' => 'Sponge',
+ 'Version' => $VERSION,
+ 'Attribution' => "DBD::Sponge $VERSION (fake cursor driver) by Tim Bunce",
+ });
+ $drh;
+ }
+
+ sub CLONE {
+ undef $drh;
+ }
+}
+
+
+{ package DBD::Sponge::dr; # ====== DRIVER ======
+ $imp_data_size = 0;
+ # we use default (dummy) connect method
+}
+
+
+{ package DBD::Sponge::db; # ====== DATABASE ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub prepare {
+ my($dbh, $statement, $attribs) = @_;
+ my $rows = delete $attribs->{'rows'}
+ or return $dbh->set_err($DBI::stderr,"No rows attribute supplied to prepare");
+ my ($outer, $sth) = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ 'rows' => $rows,
+ (map { exists $attribs->{$_} ? ($_=>$attribs->{$_}) : () }
+ qw(execute_hook)
+ ),
+ });
+ if (my $behave_like = $attribs->{behave_like}) {
+ $outer->{$_} = $behave_like->{$_}
+ foreach (qw(RaiseError PrintError HandleError ShowErrorStatement));
+ }
+
+ if ($statement =~ /^\s*insert\b/) { # very basic, just for testing execute_array()
+ $sth->{is_insert} = 1;
+ my $NUM_OF_PARAMS = $attribs->{NUM_OF_PARAMS}
+ or return $dbh->set_err($DBI::stderr,"NUM_OF_PARAMS not specified for INSERT statement");
+ $sth->STORE('NUM_OF_PARAMS' => $attribs->{NUM_OF_PARAMS} );
+ }
+ else { #assume select
+
+ # we need to set NUM_OF_FIELDS
+ my $numFields;
+ if ($attribs->{'NUM_OF_FIELDS'}) {
+ $numFields = $attribs->{'NUM_OF_FIELDS'};
+ } elsif ($attribs->{'NAME'}) {
+ $numFields = @{$attribs->{NAME}};
+ } elsif ($attribs->{'TYPE'}) {
+ $numFields = @{$attribs->{TYPE}};
+ } elsif (my $firstrow = $rows->[0]) {
+ $numFields = scalar @$firstrow;
+ } else {
+ return $dbh->set_err($DBI::stderr, 'Cannot determine NUM_OF_FIELDS');
+ }
+ $sth->STORE('NUM_OF_FIELDS' => $numFields);
+ $sth->{NAME} = $attribs->{NAME}
+ || [ map { "col$_" } 1..$numFields ];
+ $sth->{TYPE} = $attribs->{TYPE}
+ || [ (DBI::SQL_VARCHAR()) x $numFields ];
+ $sth->{PRECISION} = $attribs->{PRECISION}
+ || [ map { length($sth->{NAME}->[$_]) } 0..$numFields -1 ];
+ $sth->{SCALE} = $attribs->{SCALE}
+ || [ (0) x $numFields ];
+ $sth->{NULLABLE} = $attribs->{NULLABLE}
+ || [ (2) x $numFields ];
+ }
+
+ $outer;
+ }
+
+ sub type_info_all {
+ my ($dbh) = @_;
+ my $ti = [
+ { TYPE_NAME => 0,
+ DATA_TYPE => 1,
+ PRECISION => 2,
+ LITERAL_PREFIX => 3,
+ LITERAL_SUFFIX => 4,
+ CREATE_PARAMS => 5,
+ NULLABLE => 6,
+ CASE_SENSITIVE => 7,
+ SEARCHABLE => 8,
+ UNSIGNED_ATTRIBUTE=> 9,
+ MONEY => 10,
+ AUTO_INCREMENT => 11,
+ LOCAL_TYPE_NAME => 12,
+ MINIMUM_SCALE => 13,
+ MAXIMUM_SCALE => 14,
+ },
+ [ 'VARCHAR', DBI::SQL_VARCHAR(), undef, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ],
+ ];
+ return $ti;
+ }
+
+ sub FETCH {
+ my ($dbh, $attrib) = @_;
+ # In reality this would interrogate the database engine to
+ # either return dynamic values that cannot be precomputed
+ # or fetch and cache attribute values too expensive to prefetch.
+ return 1 if $attrib eq 'AutoCommit';
+ # else pass up to DBI to handle
+ return $dbh->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($dbh, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ if ($attrib eq 'AutoCommit') {
+ return 1 if $value; # is already set
+ Carp::croak("Can't disable AutoCommit");
+ }
+ return $dbh->SUPER::STORE($attrib, $value);
+ }
+
+ sub sponge_test_installed_method {
+ my ($dbh, @args) = @_;
+ return $dbh->set_err(42, "not enough parameters") unless @args >= 2;
+ return \@args;
+ }
+}
+
+
+{ package DBD::Sponge::st; # ====== STATEMENT ======
+ $imp_data_size = 0;
+ use strict;
+
+ sub execute {
+ my $sth = shift;
+
+ # hack to support ParamValues (when not using bind_param)
+ $sth->{ParamValues} = (@_) ? { map { $_ => $_[$_-1] } 1..@_ } : undef;
+
+ if (my $hook = $sth->{execute_hook}) {
+ &$hook($sth, @_) or return;
+ }
+
+ if ($sth->{is_insert}) {
+ my $row;
+ $row = (@_) ? [ @_ ] : die "bind_param not supported yet" ;
+ my $NUM_OF_PARAMS = $sth->{NUM_OF_PARAMS};
+ return $sth->set_err($DBI::stderr, @$row." values bound (@$row) but $NUM_OF_PARAMS expected")
+ if @$row != $NUM_OF_PARAMS;
+ { local $^W; $sth->trace_msg("inserting (@$row)\n"); }
+ push @{ $sth->{rows} }, $row;
+ }
+ else { # mark select sth as Active
+ $sth->STORE(Active => 1);
+ }
+ # else do nothing for select as data is already in $sth->{rows}
+ return 1;
+ }
+
+ sub fetch {
+ my ($sth) = @_;
+ my $row = shift @{$sth->{'rows'}};
+ unless ($row) {
+ $sth->STORE(Active => 0);
+ return undef;
+ }
+ return $sth->_set_fbav($row);
+ }
+ *fetchrow_arrayref = \&fetch;
+
+ sub FETCH {
+ my ($sth, $attrib) = @_;
+ # would normally validate and only fetch known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+ }
+
+ sub STORE {
+ my ($sth, $attrib, $value) = @_;
+ # would normally validate and only store known attributes
+ # else pass up to DBI to handle
+ return $sth->SUPER::STORE($attrib, $value);
+ }
+}
+
+1;
+
+__END__
+
+=pod
+
+=head1 NAME
+
+DBD::Sponge - Create a DBI statement handle from Perl data
+
+=head1 SYNOPSIS
+
+ my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 });
+ my $sth = $sponge->prepare($statement, {
+ rows => $data,
+ NAME => $names,
+ %attr
+ }
+ );
+
+=head1 DESCRIPTION
+
+DBD::Sponge is useful for making a Perl data structure accessible through a
+standard DBI statement handle. This may be useful to DBD module authors who
+need to transform data in this way.
+
+=head1 METHODS
+
+=head2 connect()
+
+ my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 });
+
+Here's a sample syntax for creating a database handle for the Sponge driver.
+No username and password are needed.
+
+=head2 prepare()
+
+ my $sth = $sponge->prepare($statement, {
+ rows => $data,
+ NAME => $names,
+ %attr
+ }
+ );
+
+=over 4
+
+=item *
+
+The C<$statement> here is an arbitrary statement or name you want
+to provide as identity of your data. If you're using DBI::Profile
+it will appear in the profile data.
+
+Generally it's expected that you are preparing a statement handle
+as if a C<select> statement happened.
+
+=item *
+
+C<$data> is a reference to the data you are providing, given as an array of arrays.
+
+=item *
+
+C<$names> is a reference an array of column names for the C<$data> you are providing.
+The number and order should match the number and ordering of the C<$data> columns.
+
+=item *
+
+C<%attr> is a hash of other standard DBI attributes that you might pass to a prepare statement.
+
+Currently only NAME, TYPE, and PRECISION are supported.
+
+=back
+
+=head1 BUGS
+
+Using this module to prepare INSERT-like statements is not currently documented.
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is Copyright (c) 2003 Tim Bunce
+
+Documentation initially written by Mark Stosberg
+
+The DBD::Sponge module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. In particular permission
+is granted to Tim Bunce for distributing this as a part of the DBI.
+
+=head1 SEE ALSO
+
+L<DBI>
+
+=cut
diff --git a/lib/DBI/Const/GetInfo/ANSI.pm b/lib/DBI/Const/GetInfo/ANSI.pm
new file mode 100644
index 0000000..428ce37
--- /dev/null
+++ b/lib/DBI/Const/GetInfo/ANSI.pm
@@ -0,0 +1,236 @@
+# $Id: ANSI.pm 8696 2007-01-24 23:12:38Z timbo $
+#
+# Copyright (c) 2002 Tim Bunce Ireland
+#
+# Constant data describing ANSI CLI info types and return values for the
+# SQLGetInfo() method of ODBC.
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+package DBI::Const::GetInfo::ANSI;
+
+=head1 NAME
+
+DBI::Const::GetInfo::ANSI - ISO/IEC SQL/CLI Constants for GetInfo
+
+=head1 SYNOPSIS
+
+ The API for this module is private and subject to change.
+
+=head1 DESCRIPTION
+
+Information requested by GetInfo().
+
+See: A.1 C header file SQLCLI.H, Page 316, 317.
+
+The API for this module is private and subject to change.
+
+=head1 REFERENCES
+
+ ISO/IEC FCD 9075-3:200x Information technology - Database Languages -
+ SQL - Part 3: Call-Level Interface (SQL/CLI)
+
+ SC32 N00744 = WG3:VIE-005 = H2-2002-007
+
+ Date: 2002-01-15
+
+=cut
+
+my
+$VERSION = sprintf("2.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
+
+
+%InfoTypes =
+(
+ SQL_ALTER_TABLE => 86
+, SQL_CATALOG_NAME => 10003
+, SQL_COLLATING_SEQUENCE => 10004
+, SQL_CURSOR_COMMIT_BEHAVIOR => 23
+, SQL_CURSOR_SENSITIVITY => 10001
+, SQL_DATA_SOURCE_NAME => 2
+, SQL_DATA_SOURCE_READ_ONLY => 25
+, SQL_DBMS_NAME => 17
+, SQL_DBMS_VERSION => 18
+, SQL_DEFAULT_TRANSACTION_ISOLATION => 26
+, SQL_DESCRIBE_PARAMETER => 10002
+, SQL_FETCH_DIRECTION => 8
+, SQL_GETDATA_EXTENSIONS => 81
+, SQL_IDENTIFIER_CASE => 28
+, SQL_INTEGRITY => 73
+, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 34
+, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 97
+, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 99
+, SQL_MAXIMUM_COLUMNS_IN_SELECT => 100
+, SQL_MAXIMUM_COLUMNS_IN_TABLE => 101
+, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 30
+, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 1
+, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 31
+, SQL_MAXIMUM_DRIVER_CONNECTIONS => 0
+, SQL_MAXIMUM_IDENTIFIER_LENGTH => 10005
+, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 32
+, SQL_MAXIMUM_STMT_OCTETS => 20000
+, SQL_MAXIMUM_STMT_OCTETS_DATA => 20001
+, SQL_MAXIMUM_STMT_OCTETS_SCHEMA => 20002
+, SQL_MAXIMUM_TABLES_IN_SELECT => 106
+, SQL_MAXIMUM_TABLE_NAME_LENGTH => 35
+, SQL_MAXIMUM_USER_NAME_LENGTH => 107
+, SQL_NULL_COLLATION => 85
+, SQL_ORDER_BY_COLUMNS_IN_SELECT => 90
+, SQL_OUTER_JOIN_CAPABILITIES => 115
+, SQL_SCROLL_CONCURRENCY => 43
+, SQL_SEARCH_PATTERN_ESCAPE => 14
+, SQL_SERVER_NAME => 13
+, SQL_SPECIAL_CHARACTERS => 94
+, SQL_TRANSACTION_CAPABLE => 46
+, SQL_TRANSACTION_ISOLATION_OPTION => 72
+, SQL_USER_NAME => 47
+);
+
+=head2 %ReturnTypes
+
+See: Codes and data types for implementation information (Table 28), Page 85, 86.
+
+Mapped to ODBC datatype names.
+
+=cut
+
+%ReturnTypes = # maxlen
+(
+ SQL_ALTER_TABLE => 'SQLUINTEGER bitmask' # INTEGER
+, SQL_CATALOG_NAME => 'SQLCHAR' # CHARACTER (1)
+, SQL_COLLATING_SEQUENCE => 'SQLCHAR' # CHARACTER (254)
+, SQL_CURSOR_COMMIT_BEHAVIOR => 'SQLUSMALLINT' # SMALLINT
+, SQL_CURSOR_SENSITIVITY => 'SQLUINTEGER' # INTEGER
+, SQL_DATA_SOURCE_NAME => 'SQLCHAR' # CHARACTER (128)
+, SQL_DATA_SOURCE_READ_ONLY => 'SQLCHAR' # CHARACTER (1)
+, SQL_DBMS_NAME => 'SQLCHAR' # CHARACTER (254)
+, SQL_DBMS_VERSION => 'SQLCHAR' # CHARACTER (254)
+, SQL_DEFAULT_TRANSACTION_ISOLATION => 'SQLUINTEGER' # INTEGER
+, SQL_DESCRIBE_PARAMETER => 'SQLCHAR' # CHARACTER (1)
+, SQL_FETCH_DIRECTION => 'SQLUINTEGER bitmask' # INTEGER
+, SQL_GETDATA_EXTENSIONS => 'SQLUINTEGER bitmask' # INTEGER
+, SQL_IDENTIFIER_CASE => 'SQLUSMALLINT' # SMALLINT
+, SQL_INTEGRITY => 'SQLCHAR' # CHARACTER (1)
+, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_COLUMNS_IN_SELECT => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_COLUMNS_IN_TABLE => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_DRIVER_CONNECTIONS => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_IDENTIFIER_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_STMT_OCTETS => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_STMT_OCTETS_DATA => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_STMT_OCTETS_SCHEMA => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_TABLES_IN_SELECT => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_TABLE_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_MAXIMUM_USER_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
+, SQL_NULL_COLLATION => 'SQLUSMALLINT' # SMALLINT
+, SQL_ORDER_BY_COLUMNS_IN_SELECT => 'SQLCHAR' # CHARACTER (1)
+, SQL_OUTER_JOIN_CAPABILITIES => 'SQLUINTEGER bitmask' # INTEGER
+, SQL_SCROLL_CONCURRENCY => 'SQLUINTEGER bitmask' # INTEGER
+, SQL_SEARCH_PATTERN_ESCAPE => 'SQLCHAR' # CHARACTER (1)
+, SQL_SERVER_NAME => 'SQLCHAR' # CHARACTER (128)
+, SQL_SPECIAL_CHARACTERS => 'SQLCHAR' # CHARACTER (254)
+, SQL_TRANSACTION_CAPABLE => 'SQLUSMALLINT' # SMALLINT
+, SQL_TRANSACTION_ISOLATION_OPTION => 'SQLUINTEGER bitmask' # INTEGER
+, SQL_USER_NAME => 'SQLCHAR' # CHARACTER (128)
+);
+
+=head2 %ReturnValues
+
+See: A.1 C header file SQLCLI.H, Page 317, 318.
+
+=cut
+
+$ReturnValues{SQL_ALTER_TABLE} =
+{
+ SQL_AT_ADD_COLUMN => 0x00000001
+, SQL_AT_DROP_COLUMN => 0x00000002
+, SQL_AT_ALTER_COLUMN => 0x00000004
+, SQL_AT_ADD_CONSTRAINT => 0x00000008
+, SQL_AT_DROP_CONSTRAINT => 0x00000010
+};
+$ReturnValues{SQL_CURSOR_COMMIT_BEHAVIOR} =
+{
+ SQL_CB_DELETE => 0
+, SQL_CB_CLOSE => 1
+, SQL_CB_PRESERVE => 2
+};
+$ReturnValues{SQL_FETCH_DIRECTION} =
+{
+ SQL_FD_FETCH_NEXT => 0x00000001
+, SQL_FD_FETCH_FIRST => 0x00000002
+, SQL_FD_FETCH_LAST => 0x00000004
+, SQL_FD_FETCH_PRIOR => 0x00000008
+, SQL_FD_FETCH_ABSOLUTE => 0x00000010
+, SQL_FD_FETCH_RELATIVE => 0x00000020
+};
+$ReturnValues{SQL_GETDATA_EXTENSIONS} =
+{
+ SQL_GD_ANY_COLUMN => 0x00000001
+, SQL_GD_ANY_ORDER => 0x00000002
+};
+$ReturnValues{SQL_IDENTIFIER_CASE} =
+{
+ SQL_IC_UPPER => 1
+, SQL_IC_LOWER => 2
+, SQL_IC_SENSITIVE => 3
+, SQL_IC_MIXED => 4
+};
+$ReturnValues{SQL_NULL_COLLATION} =
+{
+ SQL_NC_HIGH => 1
+, SQL_NC_LOW => 2
+};
+$ReturnValues{SQL_OUTER_JOIN_CAPABILITIES} =
+{
+ SQL_OUTER_JOIN_LEFT => 0x00000001
+, SQL_OUTER_JOIN_RIGHT => 0x00000002
+, SQL_OUTER_JOIN_FULL => 0x00000004
+, SQL_OUTER_JOIN_NESTED => 0x00000008
+, SQL_OUTER_JOIN_NOT_ORDERED => 0x00000010
+, SQL_OUTER_JOIN_INNER => 0x00000020
+, SQL_OUTER_JOIN_ALL_COMPARISON_OPS => 0x00000040
+};
+$ReturnValues{SQL_SCROLL_CONCURRENCY} =
+{
+ SQL_SCCO_READ_ONLY => 0x00000001
+, SQL_SCCO_LOCK => 0x00000002
+, SQL_SCCO_OPT_ROWVER => 0x00000004
+, SQL_SCCO_OPT_VALUES => 0x00000008
+};
+$ReturnValues{SQL_TRANSACTION_ACCESS_MODE} =
+{
+ SQL_TRANSACTION_READ_ONLY => 0x00000001
+, SQL_TRANSACTION_READ_WRITE => 0x00000002
+};
+$ReturnValues{SQL_TRANSACTION_CAPABLE} =
+{
+ SQL_TC_NONE => 0
+, SQL_TC_DML => 1
+, SQL_TC_ALL => 2
+, SQL_TC_DDL_COMMIT => 3
+, SQL_TC_DDL_IGNORE => 4
+};
+$ReturnValues{SQL_TRANSACTION_ISOLATION} =
+{
+ SQL_TRANSACTION_READ_UNCOMMITTED => 0x00000001
+, SQL_TRANSACTION_READ_COMMITTED => 0x00000002
+, SQL_TRANSACTION_REPEATABLE_READ => 0x00000004
+, SQL_TRANSACTION_SERIALIZABLE => 0x00000008
+};
+
+1;
+
+=head1 TODO
+
+Corrections, e.g.:
+
+ SQL_TRANSACTION_ISOLATION_OPTION vs. SQL_TRANSACTION_ISOLATION
+
+=cut
diff --git a/lib/DBI/Const/GetInfo/ODBC.pm b/lib/DBI/Const/GetInfo/ODBC.pm
new file mode 100644
index 0000000..0f71a06
--- /dev/null
+++ b/lib/DBI/Const/GetInfo/ODBC.pm
@@ -0,0 +1,1363 @@
+# $Id: ODBC.pm 11373 2008-06-02 19:01:33Z timbo $
+#
+# Copyright (c) 2002 Tim Bunce Ireland
+#
+# Constant data describing Microsoft ODBC info types and return values
+# for the SQLGetInfo() method of ODBC.
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+package DBI::Const::GetInfo::ODBC;
+
+=head1 NAME
+
+DBI::Const::GetInfo::ODBC - ODBC Constants for GetInfo
+
+=head1 SYNOPSIS
+
+ The API for this module is private and subject to change.
+
+=head1 DESCRIPTION
+
+Information requested by GetInfo().
+
+The API for this module is private and subject to change.
+
+=head1 REFERENCES
+
+ MDAC SDK 2.6
+ ODBC version number (0x0351)
+
+ sql.h
+ sqlext.h
+
+=cut
+
+my
+$VERSION = sprintf("2.%06d", q$Revision: 11373 $ =~ /(\d+)/o);
+
+
+%InfoTypes =
+(
+ SQL_ACCESSIBLE_PROCEDURES => 20
+, SQL_ACCESSIBLE_TABLES => 19
+, SQL_ACTIVE_CONNECTIONS => 0
+, SQL_ACTIVE_ENVIRONMENTS => 116
+, SQL_ACTIVE_STATEMENTS => 1
+, SQL_AGGREGATE_FUNCTIONS => 169
+, SQL_ALTER_DOMAIN => 117
+, SQL_ALTER_TABLE => 86
+, SQL_ASYNC_MODE => 10021
+, SQL_BATCH_ROW_COUNT => 120
+, SQL_BATCH_SUPPORT => 121
+, SQL_BOOKMARK_PERSISTENCE => 82
+, SQL_CATALOG_LOCATION => 114 # SQL_QUALIFIER_LOCATION
+, SQL_CATALOG_NAME => 10003
+, SQL_CATALOG_NAME_SEPARATOR => 41 # SQL_QUALIFIER_NAME_SEPARATOR
+, SQL_CATALOG_TERM => 42 # SQL_QUALIFIER_TERM
+, SQL_CATALOG_USAGE => 92 # SQL_QUALIFIER_USAGE
+, SQL_COLLATION_SEQ => 10004
+, SQL_COLUMN_ALIAS => 87
+, SQL_CONCAT_NULL_BEHAVIOR => 22
+, SQL_CONVERT_BIGINT => 53
+, SQL_CONVERT_BINARY => 54
+, SQL_CONVERT_BIT => 55
+, SQL_CONVERT_CHAR => 56
+, SQL_CONVERT_DATE => 57
+, SQL_CONVERT_DECIMAL => 58
+, SQL_CONVERT_DOUBLE => 59
+, SQL_CONVERT_FLOAT => 60
+, SQL_CONVERT_FUNCTIONS => 48
+, SQL_CONVERT_GUID => 173
+, SQL_CONVERT_INTEGER => 61
+, SQL_CONVERT_INTERVAL_DAY_TIME => 123
+, SQL_CONVERT_INTERVAL_YEAR_MONTH => 124
+, SQL_CONVERT_LONGVARBINARY => 71
+, SQL_CONVERT_LONGVARCHAR => 62
+, SQL_CONVERT_NUMERIC => 63
+, SQL_CONVERT_REAL => 64
+, SQL_CONVERT_SMALLINT => 65
+, SQL_CONVERT_TIME => 66
+, SQL_CONVERT_TIMESTAMP => 67
+, SQL_CONVERT_TINYINT => 68
+, SQL_CONVERT_VARBINARY => 69
+, SQL_CONVERT_VARCHAR => 70
+, SQL_CONVERT_WCHAR => 122
+, SQL_CONVERT_WLONGVARCHAR => 125
+, SQL_CONVERT_WVARCHAR => 126
+, SQL_CORRELATION_NAME => 74
+, SQL_CREATE_ASSERTION => 127
+, SQL_CREATE_CHARACTER_SET => 128
+, SQL_CREATE_COLLATION => 129
+, SQL_CREATE_DOMAIN => 130
+, SQL_CREATE_SCHEMA => 131
+, SQL_CREATE_TABLE => 132
+, SQL_CREATE_TRANSLATION => 133
+, SQL_CREATE_VIEW => 134
+, SQL_CURSOR_COMMIT_BEHAVIOR => 23
+, SQL_CURSOR_ROLLBACK_BEHAVIOR => 24
+, SQL_CURSOR_SENSITIVITY => 10001
+, SQL_DATA_SOURCE_NAME => 2
+, SQL_DATA_SOURCE_READ_ONLY => 25
+, SQL_DATABASE_NAME => 16
+, SQL_DATETIME_LITERALS => 119
+, SQL_DBMS_NAME => 17
+, SQL_DBMS_VER => 18
+, SQL_DDL_INDEX => 170
+, SQL_DEFAULT_TXN_ISOLATION => 26
+, SQL_DESCRIBE_PARAMETER => 10002
+, SQL_DM_VER => 171
+, SQL_DRIVER_HDBC => 3
+, SQL_DRIVER_HDESC => 135
+, SQL_DRIVER_HENV => 4
+, SQL_DRIVER_HLIB => 76
+, SQL_DRIVER_HSTMT => 5
+, SQL_DRIVER_NAME => 6
+, SQL_DRIVER_ODBC_VER => 77
+, SQL_DRIVER_VER => 7
+, SQL_DROP_ASSERTION => 136
+, SQL_DROP_CHARACTER_SET => 137
+, SQL_DROP_COLLATION => 138
+, SQL_DROP_DOMAIN => 139
+, SQL_DROP_SCHEMA => 140
+, SQL_DROP_TABLE => 141
+, SQL_DROP_TRANSLATION => 142
+, SQL_DROP_VIEW => 143
+, SQL_DYNAMIC_CURSOR_ATTRIBUTES1 => 144
+, SQL_DYNAMIC_CURSOR_ATTRIBUTES2 => 145
+, SQL_EXPRESSIONS_IN_ORDERBY => 27
+, SQL_FETCH_DIRECTION => 8
+, SQL_FILE_USAGE => 84
+, SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1 => 146
+, SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2 => 147
+, SQL_GETDATA_EXTENSIONS => 81
+, SQL_GROUP_BY => 88
+, SQL_IDENTIFIER_CASE => 28
+, SQL_IDENTIFIER_QUOTE_CHAR => 29
+, SQL_INDEX_KEYWORDS => 148
+# SQL_INFO_DRIVER_START => 1000
+# SQL_INFO_FIRST => 0
+# SQL_INFO_LAST => 114 # SQL_QUALIFIER_LOCATION
+, SQL_INFO_SCHEMA_VIEWS => 149
+, SQL_INSERT_STATEMENT => 172
+, SQL_INTEGRITY => 73
+, SQL_KEYSET_CURSOR_ATTRIBUTES1 => 150
+, SQL_KEYSET_CURSOR_ATTRIBUTES2 => 151
+, SQL_KEYWORDS => 89
+, SQL_LIKE_ESCAPE_CLAUSE => 113
+, SQL_LOCK_TYPES => 78
+, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 34 # SQL_MAX_CATALOG_NAME_LEN
+, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 97 # SQL_MAX_COLUMNS_IN_GROUP_BY
+, SQL_MAXIMUM_COLUMNS_IN_INDEX => 98 # SQL_MAX_COLUMNS_IN_INDEX
+, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 99 # SQL_MAX_COLUMNS_IN_ORDER_BY
+, SQL_MAXIMUM_COLUMNS_IN_SELECT => 100 # SQL_MAX_COLUMNS_IN_SELECT
+, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 30 # SQL_MAX_COLUMN_NAME_LEN
+, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 1 # SQL_MAX_CONCURRENT_ACTIVITIES
+, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 31 # SQL_MAX_CURSOR_NAME_LEN
+, SQL_MAXIMUM_DRIVER_CONNECTIONS => 0 # SQL_MAX_DRIVER_CONNECTIONS
+, SQL_MAXIMUM_IDENTIFIER_LENGTH => 10005 # SQL_MAX_IDENTIFIER_LEN
+, SQL_MAXIMUM_INDEX_SIZE => 102 # SQL_MAX_INDEX_SIZE
+, SQL_MAXIMUM_ROW_SIZE => 104 # SQL_MAX_ROW_SIZE
+, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 32 # SQL_MAX_SCHEMA_NAME_LEN
+, SQL_MAXIMUM_STATEMENT_LENGTH => 105 # SQL_MAX_STATEMENT_LEN
+, SQL_MAXIMUM_TABLES_IN_SELECT => 106 # SQL_MAX_TABLES_IN_SELECT
+, SQL_MAXIMUM_USER_NAME_LENGTH => 107 # SQL_MAX_USER_NAME_LEN
+, SQL_MAX_ASYNC_CONCURRENT_STATEMENTS => 10022
+, SQL_MAX_BINARY_LITERAL_LEN => 112
+, SQL_MAX_CATALOG_NAME_LEN => 34
+, SQL_MAX_CHAR_LITERAL_LEN => 108
+, SQL_MAX_COLUMNS_IN_GROUP_BY => 97
+, SQL_MAX_COLUMNS_IN_INDEX => 98
+, SQL_MAX_COLUMNS_IN_ORDER_BY => 99
+, SQL_MAX_COLUMNS_IN_SELECT => 100
+, SQL_MAX_COLUMNS_IN_TABLE => 101
+, SQL_MAX_COLUMN_NAME_LEN => 30
+, SQL_MAX_CONCURRENT_ACTIVITIES => 1
+, SQL_MAX_CURSOR_NAME_LEN => 31
+, SQL_MAX_DRIVER_CONNECTIONS => 0
+, SQL_MAX_IDENTIFIER_LEN => 10005
+, SQL_MAX_INDEX_SIZE => 102
+, SQL_MAX_OWNER_NAME_LEN => 32
+, SQL_MAX_PROCEDURE_NAME_LEN => 33
+, SQL_MAX_QUALIFIER_NAME_LEN => 34
+, SQL_MAX_ROW_SIZE => 104
+, SQL_MAX_ROW_SIZE_INCLUDES_LONG => 103
+, SQL_MAX_SCHEMA_NAME_LEN => 32
+, SQL_MAX_STATEMENT_LEN => 105
+, SQL_MAX_TABLES_IN_SELECT => 106
+, SQL_MAX_TABLE_NAME_LEN => 35
+, SQL_MAX_USER_NAME_LEN => 107
+, SQL_MULTIPLE_ACTIVE_TXN => 37
+, SQL_MULT_RESULT_SETS => 36
+, SQL_NEED_LONG_DATA_LEN => 111
+, SQL_NON_NULLABLE_COLUMNS => 75
+, SQL_NULL_COLLATION => 85
+, SQL_NUMERIC_FUNCTIONS => 49
+, SQL_ODBC_API_CONFORMANCE => 9
+, SQL_ODBC_INTERFACE_CONFORMANCE => 152
+, SQL_ODBC_SAG_CLI_CONFORMANCE => 12
+, SQL_ODBC_SQL_CONFORMANCE => 15
+, SQL_ODBC_SQL_OPT_IEF => 73
+, SQL_ODBC_VER => 10
+, SQL_OJ_CAPABILITIES => 115
+, SQL_ORDER_BY_COLUMNS_IN_SELECT => 90
+, SQL_OUTER_JOINS => 38
+, SQL_OUTER_JOIN_CAPABILITIES => 115 # SQL_OJ_CAPABILITIES
+, SQL_OWNER_TERM => 39
+, SQL_OWNER_USAGE => 91
+, SQL_PARAM_ARRAY_ROW_COUNTS => 153
+, SQL_PARAM_ARRAY_SELECTS => 154
+, SQL_POSITIONED_STATEMENTS => 80
+, SQL_POS_OPERATIONS => 79
+, SQL_PROCEDURES => 21
+, SQL_PROCEDURE_TERM => 40
+, SQL_QUALIFIER_LOCATION => 114
+, SQL_QUALIFIER_NAME_SEPARATOR => 41
+, SQL_QUALIFIER_TERM => 42
+, SQL_QUALIFIER_USAGE => 92
+, SQL_QUOTED_IDENTIFIER_CASE => 93
+, SQL_ROW_UPDATES => 11
+, SQL_SCHEMA_TERM => 39 # SQL_OWNER_TERM
+, SQL_SCHEMA_USAGE => 91 # SQL_OWNER_USAGE
+, SQL_SCROLL_CONCURRENCY => 43
+, SQL_SCROLL_OPTIONS => 44
+, SQL_SEARCH_PATTERN_ESCAPE => 14
+, SQL_SERVER_NAME => 13
+, SQL_SPECIAL_CHARACTERS => 94
+, SQL_SQL92_DATETIME_FUNCTIONS => 155
+, SQL_SQL92_FOREIGN_KEY_DELETE_RULE => 156
+, SQL_SQL92_FOREIGN_KEY_UPDATE_RULE => 157
+, SQL_SQL92_GRANT => 158
+, SQL_SQL92_NUMERIC_VALUE_FUNCTIONS => 159
+, SQL_SQL92_PREDICATES => 160
+, SQL_SQL92_RELATIONAL_JOIN_OPERATORS => 161
+, SQL_SQL92_REVOKE => 162
+, SQL_SQL92_ROW_VALUE_CONSTRUCTOR => 163
+, SQL_SQL92_STRING_FUNCTIONS => 164
+, SQL_SQL92_VALUE_EXPRESSIONS => 165
+, SQL_SQL_CONFORMANCE => 118
+, SQL_STANDARD_CLI_CONFORMANCE => 166
+, SQL_STATIC_CURSOR_ATTRIBUTES1 => 167
+, SQL_STATIC_CURSOR_ATTRIBUTES2 => 168
+, SQL_STATIC_SENSITIVITY => 83
+, SQL_STRING_FUNCTIONS => 50
+, SQL_SUBQUERIES => 95
+, SQL_SYSTEM_FUNCTIONS => 51
+, SQL_TABLE_TERM => 45
+, SQL_TIMEDATE_ADD_INTERVALS => 109
+, SQL_TIMEDATE_DIFF_INTERVALS => 110
+, SQL_TIMEDATE_FUNCTIONS => 52
+, SQL_TRANSACTION_CAPABLE => 46 # SQL_TXN_CAPABLE
+, SQL_TRANSACTION_ISOLATION_OPTION => 72 # SQL_TXN_ISOLATION_OPTION
+, SQL_TXN_CAPABLE => 46
+, SQL_TXN_ISOLATION_OPTION => 72
+, SQL_UNION => 96
+, SQL_UNION_STATEMENT => 96 # SQL_UNION
+, SQL_USER_NAME => 47
+, SQL_XOPEN_CLI_YEAR => 10000
+);
+
+=head2 %ReturnTypes
+
+See: mk:@MSITStore:X:\dm\cli\mdac\sdk26\Docs\odbc.chm::/htm/odbcsqlgetinfo.htm
+
+ => : alias
+ => !!! : edited
+
+=cut
+
+%ReturnTypes =
+(
+ SQL_ACCESSIBLE_PROCEDURES => 'SQLCHAR' # 20
+, SQL_ACCESSIBLE_TABLES => 'SQLCHAR' # 19
+, SQL_ACTIVE_CONNECTIONS => 'SQLUSMALLINT' # 0 =>
+, SQL_ACTIVE_ENVIRONMENTS => 'SQLUSMALLINT' # 116
+, SQL_ACTIVE_STATEMENTS => 'SQLUSMALLINT' # 1 =>
+, SQL_AGGREGATE_FUNCTIONS => 'SQLUINTEGER bitmask' # 169
+, SQL_ALTER_DOMAIN => 'SQLUINTEGER bitmask' # 117
+, SQL_ALTER_TABLE => 'SQLUINTEGER bitmask' # 86
+, SQL_ASYNC_MODE => 'SQLUINTEGER' # 10021
+, SQL_BATCH_ROW_COUNT => 'SQLUINTEGER bitmask' # 120
+, SQL_BATCH_SUPPORT => 'SQLUINTEGER bitmask' # 121
+, SQL_BOOKMARK_PERSISTENCE => 'SQLUINTEGER bitmask' # 82
+, SQL_CATALOG_LOCATION => 'SQLUSMALLINT' # 114
+, SQL_CATALOG_NAME => 'SQLCHAR' # 10003
+, SQL_CATALOG_NAME_SEPARATOR => 'SQLCHAR' # 41
+, SQL_CATALOG_TERM => 'SQLCHAR' # 42
+, SQL_CATALOG_USAGE => 'SQLUINTEGER bitmask' # 92
+, SQL_COLLATION_SEQ => 'SQLCHAR' # 10004
+, SQL_COLUMN_ALIAS => 'SQLCHAR' # 87
+, SQL_CONCAT_NULL_BEHAVIOR => 'SQLUSMALLINT' # 22
+, SQL_CONVERT_BIGINT => 'SQLUINTEGER bitmask' # 53
+, SQL_CONVERT_BINARY => 'SQLUINTEGER bitmask' # 54
+, SQL_CONVERT_BIT => 'SQLUINTEGER bitmask' # 55
+, SQL_CONVERT_CHAR => 'SQLUINTEGER bitmask' # 56
+, SQL_CONVERT_DATE => 'SQLUINTEGER bitmask' # 57
+, SQL_CONVERT_DECIMAL => 'SQLUINTEGER bitmask' # 58
+, SQL_CONVERT_DOUBLE => 'SQLUINTEGER bitmask' # 59
+, SQL_CONVERT_FLOAT => 'SQLUINTEGER bitmask' # 60
+, SQL_CONVERT_FUNCTIONS => 'SQLUINTEGER bitmask' # 48
+, SQL_CONVERT_GUID => 'SQLUINTEGER bitmask' # 173
+, SQL_CONVERT_INTEGER => 'SQLUINTEGER bitmask' # 61
+, SQL_CONVERT_INTERVAL_DAY_TIME => 'SQLUINTEGER bitmask' # 123
+, SQL_CONVERT_INTERVAL_YEAR_MONTH => 'SQLUINTEGER bitmask' # 124
+, SQL_CONVERT_LONGVARBINARY => 'SQLUINTEGER bitmask' # 71
+, SQL_CONVERT_LONGVARCHAR => 'SQLUINTEGER bitmask' # 62
+, SQL_CONVERT_NUMERIC => 'SQLUINTEGER bitmask' # 63
+, SQL_CONVERT_REAL => 'SQLUINTEGER bitmask' # 64
+, SQL_CONVERT_SMALLINT => 'SQLUINTEGER bitmask' # 65
+, SQL_CONVERT_TIME => 'SQLUINTEGER bitmask' # 66
+, SQL_CONVERT_TIMESTAMP => 'SQLUINTEGER bitmask' # 67
+, SQL_CONVERT_TINYINT => 'SQLUINTEGER bitmask' # 68
+, SQL_CONVERT_VARBINARY => 'SQLUINTEGER bitmask' # 69
+, SQL_CONVERT_VARCHAR => 'SQLUINTEGER bitmask' # 70
+, SQL_CONVERT_WCHAR => 'SQLUINTEGER bitmask' # 122 => !!!
+, SQL_CONVERT_WLONGVARCHAR => 'SQLUINTEGER bitmask' # 125 => !!!
+, SQL_CONVERT_WVARCHAR => 'SQLUINTEGER bitmask' # 126 => !!!
+, SQL_CORRELATION_NAME => 'SQLUSMALLINT' # 74
+, SQL_CREATE_ASSERTION => 'SQLUINTEGER bitmask' # 127
+, SQL_CREATE_CHARACTER_SET => 'SQLUINTEGER bitmask' # 128
+, SQL_CREATE_COLLATION => 'SQLUINTEGER bitmask' # 129
+, SQL_CREATE_DOMAIN => 'SQLUINTEGER bitmask' # 130
+, SQL_CREATE_SCHEMA => 'SQLUINTEGER bitmask' # 131
+, SQL_CREATE_TABLE => 'SQLUINTEGER bitmask' # 132
+, SQL_CREATE_TRANSLATION => 'SQLUINTEGER bitmask' # 133
+, SQL_CREATE_VIEW => 'SQLUINTEGER bitmask' # 134
+, SQL_CURSOR_COMMIT_BEHAVIOR => 'SQLUSMALLINT' # 23
+, SQL_CURSOR_ROLLBACK_BEHAVIOR => 'SQLUSMALLINT' # 24
+, SQL_CURSOR_SENSITIVITY => 'SQLUINTEGER' # 10001
+, SQL_DATA_SOURCE_NAME => 'SQLCHAR' # 2
+, SQL_DATA_SOURCE_READ_ONLY => 'SQLCHAR' # 25
+, SQL_DATABASE_NAME => 'SQLCHAR' # 16
+, SQL_DATETIME_LITERALS => 'SQLUINTEGER bitmask' # 119
+, SQL_DBMS_NAME => 'SQLCHAR' # 17
+, SQL_DBMS_VER => 'SQLCHAR' # 18
+, SQL_DDL_INDEX => 'SQLUINTEGER bitmask' # 170
+, SQL_DEFAULT_TXN_ISOLATION => 'SQLUINTEGER' # 26
+, SQL_DESCRIBE_PARAMETER => 'SQLCHAR' # 10002
+, SQL_DM_VER => 'SQLCHAR' # 171
+, SQL_DRIVER_HDBC => 'SQLUINTEGER' # 3
+, SQL_DRIVER_HDESC => 'SQLUINTEGER' # 135
+, SQL_DRIVER_HENV => 'SQLUINTEGER' # 4
+, SQL_DRIVER_HLIB => 'SQLUINTEGER' # 76
+, SQL_DRIVER_HSTMT => 'SQLUINTEGER' # 5
+, SQL_DRIVER_NAME => 'SQLCHAR' # 6
+, SQL_DRIVER_ODBC_VER => 'SQLCHAR' # 77
+, SQL_DRIVER_VER => 'SQLCHAR' # 7
+, SQL_DROP_ASSERTION => 'SQLUINTEGER bitmask' # 136
+, SQL_DROP_CHARACTER_SET => 'SQLUINTEGER bitmask' # 137
+, SQL_DROP_COLLATION => 'SQLUINTEGER bitmask' # 138
+, SQL_DROP_DOMAIN => 'SQLUINTEGER bitmask' # 139
+, SQL_DROP_SCHEMA => 'SQLUINTEGER bitmask' # 140
+, SQL_DROP_TABLE => 'SQLUINTEGER bitmask' # 141
+, SQL_DROP_TRANSLATION => 'SQLUINTEGER bitmask' # 142
+, SQL_DROP_VIEW => 'SQLUINTEGER bitmask' # 143
+, SQL_DYNAMIC_CURSOR_ATTRIBUTES1 => 'SQLUINTEGER bitmask' # 144
+, SQL_DYNAMIC_CURSOR_ATTRIBUTES2 => 'SQLUINTEGER bitmask' # 145
+, SQL_EXPRESSIONS_IN_ORDERBY => 'SQLCHAR' # 27
+, SQL_FETCH_DIRECTION => 'SQLUINTEGER bitmask' # 8 => !!!
+, SQL_FILE_USAGE => 'SQLUSMALLINT' # 84
+, SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1 => 'SQLUINTEGER bitmask' # 146
+, SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2 => 'SQLUINTEGER bitmask' # 147
+, SQL_GETDATA_EXTENSIONS => 'SQLUINTEGER bitmask' # 81
+, SQL_GROUP_BY => 'SQLUSMALLINT' # 88
+, SQL_IDENTIFIER_CASE => 'SQLUSMALLINT' # 28
+, SQL_IDENTIFIER_QUOTE_CHAR => 'SQLCHAR' # 29
+, SQL_INDEX_KEYWORDS => 'SQLUINTEGER bitmask' # 148
+# SQL_INFO_DRIVER_START => '' # 1000 =>
+# SQL_INFO_FIRST => 'SQLUSMALLINT' # 0 =>
+# SQL_INFO_LAST => 'SQLUSMALLINT' # 114 =>
+, SQL_INFO_SCHEMA_VIEWS => 'SQLUINTEGER bitmask' # 149
+, SQL_INSERT_STATEMENT => 'SQLUINTEGER bitmask' # 172
+, SQL_INTEGRITY => 'SQLCHAR' # 73
+, SQL_KEYSET_CURSOR_ATTRIBUTES1 => 'SQLUINTEGER bitmask' # 150
+, SQL_KEYSET_CURSOR_ATTRIBUTES2 => 'SQLUINTEGER bitmask' # 151
+, SQL_KEYWORDS => 'SQLCHAR' # 89
+, SQL_LIKE_ESCAPE_CLAUSE => 'SQLCHAR' # 113
+, SQL_LOCK_TYPES => 'SQLUINTEGER bitmask' # 78 => !!!
+, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 'SQLUSMALLINT' # 34 =>
+, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 'SQLUSMALLINT' # 97 =>
+, SQL_MAXIMUM_COLUMNS_IN_INDEX => 'SQLUSMALLINT' # 98 =>
+, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 'SQLUSMALLINT' # 99 =>
+, SQL_MAXIMUM_COLUMNS_IN_SELECT => 'SQLUSMALLINT' # 100 =>
+, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 'SQLUSMALLINT' # 30 =>
+, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 'SQLUSMALLINT' # 1 =>
+, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 'SQLUSMALLINT' # 31 =>
+, SQL_MAXIMUM_DRIVER_CONNECTIONS => 'SQLUSMALLINT' # 0 =>
+, SQL_MAXIMUM_IDENTIFIER_LENGTH => 'SQLUSMALLINT' # 10005 =>
+, SQL_MAXIMUM_INDEX_SIZE => 'SQLUINTEGER' # 102 =>
+, SQL_MAXIMUM_ROW_SIZE => 'SQLUINTEGER' # 104 =>
+, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 'SQLUSMALLINT' # 32 =>
+, SQL_MAXIMUM_STATEMENT_LENGTH => 'SQLUINTEGER' # 105 =>
+, SQL_MAXIMUM_TABLES_IN_SELECT => 'SQLUSMALLINT' # 106 =>
+, SQL_MAXIMUM_USER_NAME_LENGTH => 'SQLUSMALLINT' # 107 =>
+, SQL_MAX_ASYNC_CONCURRENT_STATEMENTS => 'SQLUINTEGER' # 10022
+, SQL_MAX_BINARY_LITERAL_LEN => 'SQLUINTEGER' # 112
+, SQL_MAX_CATALOG_NAME_LEN => 'SQLUSMALLINT' # 34
+, SQL_MAX_CHAR_LITERAL_LEN => 'SQLUINTEGER' # 108
+, SQL_MAX_COLUMNS_IN_GROUP_BY => 'SQLUSMALLINT' # 97
+, SQL_MAX_COLUMNS_IN_INDEX => 'SQLUSMALLINT' # 98
+, SQL_MAX_COLUMNS_IN_ORDER_BY => 'SQLUSMALLINT' # 99
+, SQL_MAX_COLUMNS_IN_SELECT => 'SQLUSMALLINT' # 100
+, SQL_MAX_COLUMNS_IN_TABLE => 'SQLUSMALLINT' # 101
+, SQL_MAX_COLUMN_NAME_LEN => 'SQLUSMALLINT' # 30
+, SQL_MAX_CONCURRENT_ACTIVITIES => 'SQLUSMALLINT' # 1
+, SQL_MAX_CURSOR_NAME_LEN => 'SQLUSMALLINT' # 31
+, SQL_MAX_DRIVER_CONNECTIONS => 'SQLUSMALLINT' # 0
+, SQL_MAX_IDENTIFIER_LEN => 'SQLUSMALLINT' # 10005
+, SQL_MAX_INDEX_SIZE => 'SQLUINTEGER' # 102
+, SQL_MAX_OWNER_NAME_LEN => 'SQLUSMALLINT' # 32 =>
+, SQL_MAX_PROCEDURE_NAME_LEN => 'SQLUSMALLINT' # 33
+, SQL_MAX_QUALIFIER_NAME_LEN => 'SQLUSMALLINT' # 34 =>
+, SQL_MAX_ROW_SIZE => 'SQLUINTEGER' # 104
+, SQL_MAX_ROW_SIZE_INCLUDES_LONG => 'SQLCHAR' # 103
+, SQL_MAX_SCHEMA_NAME_LEN => 'SQLUSMALLINT' # 32
+, SQL_MAX_STATEMENT_LEN => 'SQLUINTEGER' # 105
+, SQL_MAX_TABLES_IN_SELECT => 'SQLUSMALLINT' # 106
+, SQL_MAX_TABLE_NAME_LEN => 'SQLUSMALLINT' # 35
+, SQL_MAX_USER_NAME_LEN => 'SQLUSMALLINT' # 107
+, SQL_MULTIPLE_ACTIVE_TXN => 'SQLCHAR' # 37
+, SQL_MULT_RESULT_SETS => 'SQLCHAR' # 36
+, SQL_NEED_LONG_DATA_LEN => 'SQLCHAR' # 111
+, SQL_NON_NULLABLE_COLUMNS => 'SQLUSMALLINT' # 75
+, SQL_NULL_COLLATION => 'SQLUSMALLINT' # 85
+, SQL_NUMERIC_FUNCTIONS => 'SQLUINTEGER bitmask' # 49
+, SQL_ODBC_API_CONFORMANCE => 'SQLUSMALLINT' # 9 => !!!
+, SQL_ODBC_INTERFACE_CONFORMANCE => 'SQLUINTEGER' # 152
+, SQL_ODBC_SAG_CLI_CONFORMANCE => 'SQLUSMALLINT' # 12 => !!!
+, SQL_ODBC_SQL_CONFORMANCE => 'SQLUSMALLINT' # 15 => !!!
+, SQL_ODBC_SQL_OPT_IEF => 'SQLCHAR' # 73 =>
+, SQL_ODBC_VER => 'SQLCHAR' # 10
+, SQL_OJ_CAPABILITIES => 'SQLUINTEGER bitmask' # 115
+, SQL_ORDER_BY_COLUMNS_IN_SELECT => 'SQLCHAR' # 90
+, SQL_OUTER_JOINS => 'SQLCHAR' # 38 => !!!
+, SQL_OUTER_JOIN_CAPABILITIES => 'SQLUINTEGER bitmask' # 115 =>
+, SQL_OWNER_TERM => 'SQLCHAR' # 39 =>
+, SQL_OWNER_USAGE => 'SQLUINTEGER bitmask' # 91 =>
+, SQL_PARAM_ARRAY_ROW_COUNTS => 'SQLUINTEGER' # 153
+, SQL_PARAM_ARRAY_SELECTS => 'SQLUINTEGER' # 154
+, SQL_POSITIONED_STATEMENTS => 'SQLUINTEGER bitmask' # 80 => !!!
+, SQL_POS_OPERATIONS => 'SQLINTEGER bitmask' # 79
+, SQL_PROCEDURES => 'SQLCHAR' # 21
+, SQL_PROCEDURE_TERM => 'SQLCHAR' # 40
+, SQL_QUALIFIER_LOCATION => 'SQLUSMALLINT' # 114 =>
+, SQL_QUALIFIER_NAME_SEPARATOR => 'SQLCHAR' # 41 =>
+, SQL_QUALIFIER_TERM => 'SQLCHAR' # 42 =>
+, SQL_QUALIFIER_USAGE => 'SQLUINTEGER bitmask' # 92 =>
+, SQL_QUOTED_IDENTIFIER_CASE => 'SQLUSMALLINT' # 93
+, SQL_ROW_UPDATES => 'SQLCHAR' # 11
+, SQL_SCHEMA_TERM => 'SQLCHAR' # 39
+, SQL_SCHEMA_USAGE => 'SQLUINTEGER bitmask' # 91
+, SQL_SCROLL_CONCURRENCY => 'SQLUINTEGER bitmask' # 43 => !!!
+, SQL_SCROLL_OPTIONS => 'SQLUINTEGER bitmask' # 44
+, SQL_SEARCH_PATTERN_ESCAPE => 'SQLCHAR' # 14
+, SQL_SERVER_NAME => 'SQLCHAR' # 13
+, SQL_SPECIAL_CHARACTERS => 'SQLCHAR' # 94
+, SQL_SQL92_DATETIME_FUNCTIONS => 'SQLUINTEGER bitmask' # 155
+, SQL_SQL92_FOREIGN_KEY_DELETE_RULE => 'SQLUINTEGER bitmask' # 156
+, SQL_SQL92_FOREIGN_KEY_UPDATE_RULE => 'SQLUINTEGER bitmask' # 157
+, SQL_SQL92_GRANT => 'SQLUINTEGER bitmask' # 158
+, SQL_SQL92_NUMERIC_VALUE_FUNCTIONS => 'SQLUINTEGER bitmask' # 159
+, SQL_SQL92_PREDICATES => 'SQLUINTEGER bitmask' # 160
+, SQL_SQL92_RELATIONAL_JOIN_OPERATORS => 'SQLUINTEGER bitmask' # 161
+, SQL_SQL92_REVOKE => 'SQLUINTEGER bitmask' # 162
+, SQL_SQL92_ROW_VALUE_CONSTRUCTOR => 'SQLUINTEGER bitmask' # 163
+, SQL_SQL92_STRING_FUNCTIONS => 'SQLUINTEGER bitmask' # 164
+, SQL_SQL92_VALUE_EXPRESSIONS => 'SQLUINTEGER bitmask' # 165
+, SQL_SQL_CONFORMANCE => 'SQLUINTEGER' # 118
+, SQL_STANDARD_CLI_CONFORMANCE => 'SQLUINTEGER bitmask' # 166
+, SQL_STATIC_CURSOR_ATTRIBUTES1 => 'SQLUINTEGER bitmask' # 167
+, SQL_STATIC_CURSOR_ATTRIBUTES2 => 'SQLUINTEGER bitmask' # 168
+, SQL_STATIC_SENSITIVITY => 'SQLUINTEGER bitmask' # 83 => !!!
+, SQL_STRING_FUNCTIONS => 'SQLUINTEGER bitmask' # 50
+, SQL_SUBQUERIES => 'SQLUINTEGER bitmask' # 95
+, SQL_SYSTEM_FUNCTIONS => 'SQLUINTEGER bitmask' # 51
+, SQL_TABLE_TERM => 'SQLCHAR' # 45
+, SQL_TIMEDATE_ADD_INTERVALS => 'SQLUINTEGER bitmask' # 109
+, SQL_TIMEDATE_DIFF_INTERVALS => 'SQLUINTEGER bitmask' # 110
+, SQL_TIMEDATE_FUNCTIONS => 'SQLUINTEGER bitmask' # 52
+, SQL_TRANSACTION_CAPABLE => 'SQLUSMALLINT' # 46 =>
+, SQL_TRANSACTION_ISOLATION_OPTION => 'SQLUINTEGER bitmask' # 72 =>
+, SQL_TXN_CAPABLE => 'SQLUSMALLINT' # 46
+, SQL_TXN_ISOLATION_OPTION => 'SQLUINTEGER bitmask' # 72
+, SQL_UNION => 'SQLUINTEGER bitmask' # 96
+, SQL_UNION_STATEMENT => 'SQLUINTEGER bitmask' # 96 =>
+, SQL_USER_NAME => 'SQLCHAR' # 47
+, SQL_XOPEN_CLI_YEAR => 'SQLCHAR' # 10000
+);
+
+=head2 %ReturnValues
+
+See: sql.h, sqlext.h
+Edited:
+ SQL_TXN_ISOLATION_OPTION
+
+=cut
+
+$ReturnValues{SQL_AGGREGATE_FUNCTIONS} =
+{
+ SQL_AF_AVG => 0x00000001
+, SQL_AF_COUNT => 0x00000002
+, SQL_AF_MAX => 0x00000004
+, SQL_AF_MIN => 0x00000008
+, SQL_AF_SUM => 0x00000010
+, SQL_AF_DISTINCT => 0x00000020
+, SQL_AF_ALL => 0x00000040
+};
+$ReturnValues{SQL_ALTER_DOMAIN} =
+{
+ SQL_AD_CONSTRAINT_NAME_DEFINITION => 0x00000001
+, SQL_AD_ADD_DOMAIN_CONSTRAINT => 0x00000002
+, SQL_AD_DROP_DOMAIN_CONSTRAINT => 0x00000004
+, SQL_AD_ADD_DOMAIN_DEFAULT => 0x00000008
+, SQL_AD_DROP_DOMAIN_DEFAULT => 0x00000010
+, SQL_AD_ADD_CONSTRAINT_INITIALLY_DEFERRED => 0x00000020
+, SQL_AD_ADD_CONSTRAINT_INITIALLY_IMMEDIATE => 0x00000040
+, SQL_AD_ADD_CONSTRAINT_DEFERRABLE => 0x00000080
+, SQL_AD_ADD_CONSTRAINT_NON_DEFERRABLE => 0x00000100
+};
+$ReturnValues{SQL_ALTER_TABLE} =
+{
+ SQL_AT_ADD_COLUMN => 0x00000001
+, SQL_AT_DROP_COLUMN => 0x00000002
+, SQL_AT_ADD_CONSTRAINT => 0x00000008
+, SQL_AT_ADD_COLUMN_SINGLE => 0x00000020
+, SQL_AT_ADD_COLUMN_DEFAULT => 0x00000040
+, SQL_AT_ADD_COLUMN_COLLATION => 0x00000080
+, SQL_AT_SET_COLUMN_DEFAULT => 0x00000100
+, SQL_AT_DROP_COLUMN_DEFAULT => 0x00000200
+, SQL_AT_DROP_COLUMN_CASCADE => 0x00000400
+, SQL_AT_DROP_COLUMN_RESTRICT => 0x00000800
+, SQL_AT_ADD_TABLE_CONSTRAINT => 0x00001000
+, SQL_AT_DROP_TABLE_CONSTRAINT_CASCADE => 0x00002000
+, SQL_AT_DROP_TABLE_CONSTRAINT_RESTRICT => 0x00004000
+, SQL_AT_CONSTRAINT_NAME_DEFINITION => 0x00008000
+, SQL_AT_CONSTRAINT_INITIALLY_DEFERRED => 0x00010000
+, SQL_AT_CONSTRAINT_INITIALLY_IMMEDIATE => 0x00020000
+, SQL_AT_CONSTRAINT_DEFERRABLE => 0x00040000
+, SQL_AT_CONSTRAINT_NON_DEFERRABLE => 0x00080000
+};
+$ReturnValues{SQL_ASYNC_MODE} =
+{
+ SQL_AM_NONE => 0
+, SQL_AM_CONNECTION => 1
+, SQL_AM_STATEMENT => 2
+};
+$ReturnValues{SQL_ATTR_MAX_ROWS} =
+{
+ SQL_CA2_MAX_ROWS_SELECT => 0x00000080
+, SQL_CA2_MAX_ROWS_INSERT => 0x00000100
+, SQL_CA2_MAX_ROWS_DELETE => 0x00000200
+, SQL_CA2_MAX_ROWS_UPDATE => 0x00000400
+, SQL_CA2_MAX_ROWS_CATALOG => 0x00000800
+# SQL_CA2_MAX_ROWS_AFFECTS_ALL =>
+};
+$ReturnValues{SQL_ATTR_SCROLL_CONCURRENCY} =
+{
+ SQL_CA2_READ_ONLY_CONCURRENCY => 0x00000001
+, SQL_CA2_LOCK_CONCURRENCY => 0x00000002
+, SQL_CA2_OPT_ROWVER_CONCURRENCY => 0x00000004
+, SQL_CA2_OPT_VALUES_CONCURRENCY => 0x00000008
+, SQL_CA2_SENSITIVITY_ADDITIONS => 0x00000010
+, SQL_CA2_SENSITIVITY_DELETIONS => 0x00000020
+, SQL_CA2_SENSITIVITY_UPDATES => 0x00000040
+};
+$ReturnValues{SQL_BATCH_ROW_COUNT} =
+{
+ SQL_BRC_PROCEDURES => 0x0000001
+, SQL_BRC_EXPLICIT => 0x0000002
+, SQL_BRC_ROLLED_UP => 0x0000004
+};
+$ReturnValues{SQL_BATCH_SUPPORT} =
+{
+ SQL_BS_SELECT_EXPLICIT => 0x00000001
+, SQL_BS_ROW_COUNT_EXPLICIT => 0x00000002
+, SQL_BS_SELECT_PROC => 0x00000004
+, SQL_BS_ROW_COUNT_PROC => 0x00000008
+};
+$ReturnValues{SQL_BOOKMARK_PERSISTENCE} =
+{
+ SQL_BP_CLOSE => 0x00000001
+, SQL_BP_DELETE => 0x00000002
+, SQL_BP_DROP => 0x00000004
+, SQL_BP_TRANSACTION => 0x00000008
+, SQL_BP_UPDATE => 0x00000010
+, SQL_BP_OTHER_HSTMT => 0x00000020
+, SQL_BP_SCROLL => 0x00000040
+};
+$ReturnValues{SQL_CATALOG_LOCATION} =
+{
+ SQL_CL_START => 0x0001 # SQL_QL_START
+, SQL_CL_END => 0x0002 # SQL_QL_END
+};
+$ReturnValues{SQL_CATALOG_USAGE} =
+{
+ SQL_CU_DML_STATEMENTS => 0x00000001 # SQL_QU_DML_STATEMENTS
+, SQL_CU_PROCEDURE_INVOCATION => 0x00000002 # SQL_QU_PROCEDURE_INVOCATION
+, SQL_CU_TABLE_DEFINITION => 0x00000004 # SQL_QU_TABLE_DEFINITION
+, SQL_CU_INDEX_DEFINITION => 0x00000008 # SQL_QU_INDEX_DEFINITION
+, SQL_CU_PRIVILEGE_DEFINITION => 0x00000010 # SQL_QU_PRIVILEGE_DEFINITION
+};
+$ReturnValues{SQL_CONCAT_NULL_BEHAVIOR} =
+{
+ SQL_CB_NULL => 0x0000
+, SQL_CB_NON_NULL => 0x0001
+};
+$ReturnValues{SQL_CONVERT_} =
+{
+ SQL_CVT_CHAR => 0x00000001
+, SQL_CVT_NUMERIC => 0x00000002
+, SQL_CVT_DECIMAL => 0x00000004
+, SQL_CVT_INTEGER => 0x00000008
+, SQL_CVT_SMALLINT => 0x00000010
+, SQL_CVT_FLOAT => 0x00000020
+, SQL_CVT_REAL => 0x00000040
+, SQL_CVT_DOUBLE => 0x00000080
+, SQL_CVT_VARCHAR => 0x00000100
+, SQL_CVT_LONGVARCHAR => 0x00000200
+, SQL_CVT_BINARY => 0x00000400
+, SQL_CVT_VARBINARY => 0x00000800
+, SQL_CVT_BIT => 0x00001000
+, SQL_CVT_TINYINT => 0x00002000
+, SQL_CVT_BIGINT => 0x00004000
+, SQL_CVT_DATE => 0x00008000
+, SQL_CVT_TIME => 0x00010000
+, SQL_CVT_TIMESTAMP => 0x00020000
+, SQL_CVT_LONGVARBINARY => 0x00040000
+, SQL_CVT_INTERVAL_YEAR_MONTH => 0x00080000
+, SQL_CVT_INTERVAL_DAY_TIME => 0x00100000
+, SQL_CVT_WCHAR => 0x00200000
+, SQL_CVT_WLONGVARCHAR => 0x00400000
+, SQL_CVT_WVARCHAR => 0x00800000
+, SQL_CVT_GUID => 0x01000000
+};
+$ReturnValues{SQL_CONVERT_BIGINT } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_BINARY } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_BIT } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_CHAR } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_DATE } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_DECIMAL } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_DOUBLE } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_FLOAT } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_GUID } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_INTEGER } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_INTERVAL_DAY_TIME } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_INTERVAL_YEAR_MONTH} = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_LONGVARBINARY } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_LONGVARCHAR } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_NUMERIC } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_REAL } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_SMALLINT } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_TIME } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_TIMESTAMP } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_TINYINT } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_VARBINARY } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_VARCHAR } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_WCHAR } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_WLONGVARCHAR } = $ReturnValues{SQL_CONVERT_};
+$ReturnValues{SQL_CONVERT_WVARCHAR } = $ReturnValues{SQL_CONVERT_};
+
+$ReturnValues{SQL_CONVERT_FUNCTIONS} =
+{
+ SQL_FN_CVT_CONVERT => 0x00000001
+, SQL_FN_CVT_CAST => 0x00000002
+};
+$ReturnValues{SQL_CORRELATION_NAME} =
+{
+ SQL_CN_NONE => 0x0000
+, SQL_CN_DIFFERENT => 0x0001
+, SQL_CN_ANY => 0x0002
+};
+$ReturnValues{SQL_CREATE_ASSERTION} =
+{
+ SQL_CA_CREATE_ASSERTION => 0x00000001
+, SQL_CA_CONSTRAINT_INITIALLY_DEFERRED => 0x00000010
+, SQL_CA_CONSTRAINT_INITIALLY_IMMEDIATE => 0x00000020
+, SQL_CA_CONSTRAINT_DEFERRABLE => 0x00000040
+, SQL_CA_CONSTRAINT_NON_DEFERRABLE => 0x00000080
+};
+$ReturnValues{SQL_CREATE_CHARACTER_SET} =
+{
+ SQL_CCS_CREATE_CHARACTER_SET => 0x00000001
+, SQL_CCS_COLLATE_CLAUSE => 0x00000002
+, SQL_CCS_LIMITED_COLLATION => 0x00000004
+};
+$ReturnValues{SQL_CREATE_COLLATION} =
+{
+ SQL_CCOL_CREATE_COLLATION => 0x00000001
+};
+$ReturnValues{SQL_CREATE_DOMAIN} =
+{
+ SQL_CDO_CREATE_DOMAIN => 0x00000001
+, SQL_CDO_DEFAULT => 0x00000002
+, SQL_CDO_CONSTRAINT => 0x00000004
+, SQL_CDO_COLLATION => 0x00000008
+, SQL_CDO_CONSTRAINT_NAME_DEFINITION => 0x00000010
+, SQL_CDO_CONSTRAINT_INITIALLY_DEFERRED => 0x00000020
+, SQL_CDO_CONSTRAINT_INITIALLY_IMMEDIATE => 0x00000040
+, SQL_CDO_CONSTRAINT_DEFERRABLE => 0x00000080
+, SQL_CDO_CONSTRAINT_NON_DEFERRABLE => 0x00000100
+};
+$ReturnValues{SQL_CREATE_SCHEMA} =
+{
+ SQL_CS_CREATE_SCHEMA => 0x00000001
+, SQL_CS_AUTHORIZATION => 0x00000002
+, SQL_CS_DEFAULT_CHARACTER_SET => 0x00000004
+};
+$ReturnValues{SQL_CREATE_TABLE} =
+{
+ SQL_CT_CREATE_TABLE => 0x00000001
+, SQL_CT_COMMIT_PRESERVE => 0x00000002
+, SQL_CT_COMMIT_DELETE => 0x00000004
+, SQL_CT_GLOBAL_TEMPORARY => 0x00000008
+, SQL_CT_LOCAL_TEMPORARY => 0x00000010
+, SQL_CT_CONSTRAINT_INITIALLY_DEFERRED => 0x00000020
+, SQL_CT_CONSTRAINT_INITIALLY_IMMEDIATE => 0x00000040
+, SQL_CT_CONSTRAINT_DEFERRABLE => 0x00000080
+, SQL_CT_CONSTRAINT_NON_DEFERRABLE => 0x00000100
+, SQL_CT_COLUMN_CONSTRAINT => 0x00000200
+, SQL_CT_COLUMN_DEFAULT => 0x00000400
+, SQL_CT_COLUMN_COLLATION => 0x00000800
+, SQL_CT_TABLE_CONSTRAINT => 0x00001000
+, SQL_CT_CONSTRAINT_NAME_DEFINITION => 0x00002000
+};
+$ReturnValues{SQL_CREATE_TRANSLATION} =
+{
+ SQL_CTR_CREATE_TRANSLATION => 0x00000001
+};
+$ReturnValues{SQL_CREATE_VIEW} =
+{
+ SQL_CV_CREATE_VIEW => 0x00000001
+, SQL_CV_CHECK_OPTION => 0x00000002
+, SQL_CV_CASCADED => 0x00000004
+, SQL_CV_LOCAL => 0x00000008
+};
+$ReturnValues{SQL_CURSOR_COMMIT_BEHAVIOR} =
+{
+ SQL_CB_DELETE => 0
+, SQL_CB_CLOSE => 1
+, SQL_CB_PRESERVE => 2
+};
+$ReturnValues{SQL_CURSOR_ROLLBACK_BEHAVIOR} = $ReturnValues{SQL_CURSOR_COMMIT_BEHAVIOR};
+
+$ReturnValues{SQL_CURSOR_SENSITIVITY} =
+{
+ SQL_UNSPECIFIED => 0
+, SQL_INSENSITIVE => 1
+, SQL_SENSITIVE => 2
+};
+$ReturnValues{SQL_DATETIME_LITERALS} =
+{
+ SQL_DL_SQL92_DATE => 0x00000001
+, SQL_DL_SQL92_TIME => 0x00000002
+, SQL_DL_SQL92_TIMESTAMP => 0x00000004
+, SQL_DL_SQL92_INTERVAL_YEAR => 0x00000008
+, SQL_DL_SQL92_INTERVAL_MONTH => 0x00000010
+, SQL_DL_SQL92_INTERVAL_DAY => 0x00000020
+, SQL_DL_SQL92_INTERVAL_HOUR => 0x00000040
+, SQL_DL_SQL92_INTERVAL_MINUTE => 0x00000080
+, SQL_DL_SQL92_INTERVAL_SECOND => 0x00000100
+, SQL_DL_SQL92_INTERVAL_YEAR_TO_MONTH => 0x00000200
+, SQL_DL_SQL92_INTERVAL_DAY_TO_HOUR => 0x00000400
+, SQL_DL_SQL92_INTERVAL_DAY_TO_MINUTE => 0x00000800
+, SQL_DL_SQL92_INTERVAL_DAY_TO_SECOND => 0x00001000
+, SQL_DL_SQL92_INTERVAL_HOUR_TO_MINUTE => 0x00002000
+, SQL_DL_SQL92_INTERVAL_HOUR_TO_SECOND => 0x00004000
+, SQL_DL_SQL92_INTERVAL_MINUTE_TO_SECOND => 0x00008000
+};
+$ReturnValues{SQL_DDL_INDEX} =
+{
+ SQL_DI_CREATE_INDEX => 0x00000001
+, SQL_DI_DROP_INDEX => 0x00000002
+};
+$ReturnValues{SQL_DIAG_CURSOR_ROW_COUNT} =
+{
+ SQL_CA2_CRC_EXACT => 0x00001000
+, SQL_CA2_CRC_APPROXIMATE => 0x00002000
+, SQL_CA2_SIMULATE_NON_UNIQUE => 0x00004000
+, SQL_CA2_SIMULATE_TRY_UNIQUE => 0x00008000
+, SQL_CA2_SIMULATE_UNIQUE => 0x00010000
+};
+$ReturnValues{SQL_DROP_ASSERTION} =
+{
+ SQL_DA_DROP_ASSERTION => 0x00000001
+};
+$ReturnValues{SQL_DROP_CHARACTER_SET} =
+{
+ SQL_DCS_DROP_CHARACTER_SET => 0x00000001
+};
+$ReturnValues{SQL_DROP_COLLATION} =
+{
+ SQL_DC_DROP_COLLATION => 0x00000001
+};
+$ReturnValues{SQL_DROP_DOMAIN} =
+{
+ SQL_DD_DROP_DOMAIN => 0x00000001
+, SQL_DD_RESTRICT => 0x00000002
+, SQL_DD_CASCADE => 0x00000004
+};
+$ReturnValues{SQL_DROP_SCHEMA} =
+{
+ SQL_DS_DROP_SCHEMA => 0x00000001
+, SQL_DS_RESTRICT => 0x00000002
+, SQL_DS_CASCADE => 0x00000004
+};
+$ReturnValues{SQL_DROP_TABLE} =
+{
+ SQL_DT_DROP_TABLE => 0x00000001
+, SQL_DT_RESTRICT => 0x00000002
+, SQL_DT_CASCADE => 0x00000004
+};
+$ReturnValues{SQL_DROP_TRANSLATION} =
+{
+ SQL_DTR_DROP_TRANSLATION => 0x00000001
+};
+$ReturnValues{SQL_DROP_VIEW} =
+{
+ SQL_DV_DROP_VIEW => 0x00000001
+, SQL_DV_RESTRICT => 0x00000002
+, SQL_DV_CASCADE => 0x00000004
+};
+$ReturnValues{SQL_CURSOR_ATTRIBUTES1} =
+{
+ SQL_CA1_NEXT => 0x00000001
+, SQL_CA1_ABSOLUTE => 0x00000002
+, SQL_CA1_RELATIVE => 0x00000004
+, SQL_CA1_BOOKMARK => 0x00000008
+, SQL_CA1_LOCK_NO_CHANGE => 0x00000040
+, SQL_CA1_LOCK_EXCLUSIVE => 0x00000080
+, SQL_CA1_LOCK_UNLOCK => 0x00000100
+, SQL_CA1_POS_POSITION => 0x00000200
+, SQL_CA1_POS_UPDATE => 0x00000400
+, SQL_CA1_POS_DELETE => 0x00000800
+, SQL_CA1_POS_REFRESH => 0x00001000
+, SQL_CA1_POSITIONED_UPDATE => 0x00002000
+, SQL_CA1_POSITIONED_DELETE => 0x00004000
+, SQL_CA1_SELECT_FOR_UPDATE => 0x00008000
+, SQL_CA1_BULK_ADD => 0x00010000
+, SQL_CA1_BULK_UPDATE_BY_BOOKMARK => 0x00020000
+, SQL_CA1_BULK_DELETE_BY_BOOKMARK => 0x00040000
+, SQL_CA1_BULK_FETCH_BY_BOOKMARK => 0x00080000
+};
+$ReturnValues{ SQL_DYNAMIC_CURSOR_ATTRIBUTES1} = $ReturnValues{SQL_CURSOR_ATTRIBUTES1};
+$ReturnValues{SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1} = $ReturnValues{SQL_CURSOR_ATTRIBUTES1};
+$ReturnValues{ SQL_KEYSET_CURSOR_ATTRIBUTES1} = $ReturnValues{SQL_CURSOR_ATTRIBUTES1};
+$ReturnValues{ SQL_STATIC_CURSOR_ATTRIBUTES1} = $ReturnValues{SQL_CURSOR_ATTRIBUTES1};
+
+$ReturnValues{SQL_CURSOR_ATTRIBUTES2} =
+{
+ SQL_CA2_READ_ONLY_CONCURRENCY => 0x00000001
+, SQL_CA2_LOCK_CONCURRENCY => 0x00000002
+, SQL_CA2_OPT_ROWVER_CONCURRENCY => 0x00000004
+, SQL_CA2_OPT_VALUES_CONCURRENCY => 0x00000008
+, SQL_CA2_SENSITIVITY_ADDITIONS => 0x00000010
+, SQL_CA2_SENSITIVITY_DELETIONS => 0x00000020
+, SQL_CA2_SENSITIVITY_UPDATES => 0x00000040
+, SQL_CA2_MAX_ROWS_SELECT => 0x00000080
+, SQL_CA2_MAX_ROWS_INSERT => 0x00000100
+, SQL_CA2_MAX_ROWS_DELETE => 0x00000200
+, SQL_CA2_MAX_ROWS_UPDATE => 0x00000400
+, SQL_CA2_MAX_ROWS_CATALOG => 0x00000800
+, SQL_CA2_CRC_EXACT => 0x00001000
+, SQL_CA2_CRC_APPROXIMATE => 0x00002000
+, SQL_CA2_SIMULATE_NON_UNIQUE => 0x00004000
+, SQL_CA2_SIMULATE_TRY_UNIQUE => 0x00008000
+, SQL_CA2_SIMULATE_UNIQUE => 0x00010000
+};
+$ReturnValues{ SQL_DYNAMIC_CURSOR_ATTRIBUTES2} = $ReturnValues{SQL_CURSOR_ATTRIBUTES2};
+$ReturnValues{SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2} = $ReturnValues{SQL_CURSOR_ATTRIBUTES2};
+$ReturnValues{ SQL_KEYSET_CURSOR_ATTRIBUTES2} = $ReturnValues{SQL_CURSOR_ATTRIBUTES2};
+$ReturnValues{ SQL_STATIC_CURSOR_ATTRIBUTES2} = $ReturnValues{SQL_CURSOR_ATTRIBUTES2};
+
+$ReturnValues{SQL_FETCH_DIRECTION} =
+{
+ SQL_FD_FETCH_NEXT => 0x00000001
+, SQL_FD_FETCH_FIRST => 0x00000002
+, SQL_FD_FETCH_LAST => 0x00000004
+, SQL_FD_FETCH_PRIOR => 0x00000008
+, SQL_FD_FETCH_ABSOLUTE => 0x00000010
+, SQL_FD_FETCH_RELATIVE => 0x00000020
+, SQL_FD_FETCH_RESUME => 0x00000040
+, SQL_FD_FETCH_BOOKMARK => 0x00000080
+};
+$ReturnValues{SQL_FILE_USAGE} =
+{
+ SQL_FILE_NOT_SUPPORTED => 0x0000
+, SQL_FILE_TABLE => 0x0001
+, SQL_FILE_QUALIFIER => 0x0002
+, SQL_FILE_CATALOG => 0x0002 # SQL_FILE_QUALIFIER
+};
+$ReturnValues{SQL_GETDATA_EXTENSIONS} =
+{
+ SQL_GD_ANY_COLUMN => 0x00000001
+, SQL_GD_ANY_ORDER => 0x00000002
+, SQL_GD_BLOCK => 0x00000004
+, SQL_GD_BOUND => 0x00000008
+};
+$ReturnValues{SQL_GROUP_BY} =
+{
+ SQL_GB_NOT_SUPPORTED => 0x0000
+, SQL_GB_GROUP_BY_EQUALS_SELECT => 0x0001
+, SQL_GB_GROUP_BY_CONTAINS_SELECT => 0x0002
+, SQL_GB_NO_RELATION => 0x0003
+, SQL_GB_COLLATE => 0x0004
+};
+$ReturnValues{SQL_IDENTIFIER_CASE} =
+{
+ SQL_IC_UPPER => 1
+, SQL_IC_LOWER => 2
+, SQL_IC_SENSITIVE => 3
+, SQL_IC_MIXED => 4
+};
+$ReturnValues{SQL_INDEX_KEYWORDS} =
+{
+ SQL_IK_NONE => 0x00000000
+, SQL_IK_ASC => 0x00000001
+, SQL_IK_DESC => 0x00000002
+# SQL_IK_ALL =>
+};
+$ReturnValues{SQL_INFO_SCHEMA_VIEWS} =
+{
+ SQL_ISV_ASSERTIONS => 0x00000001
+, SQL_ISV_CHARACTER_SETS => 0x00000002
+, SQL_ISV_CHECK_CONSTRAINTS => 0x00000004
+, SQL_ISV_COLLATIONS => 0x00000008
+, SQL_ISV_COLUMN_DOMAIN_USAGE => 0x00000010
+, SQL_ISV_COLUMN_PRIVILEGES => 0x00000020
+, SQL_ISV_COLUMNS => 0x00000040
+, SQL_ISV_CONSTRAINT_COLUMN_USAGE => 0x00000080
+, SQL_ISV_CONSTRAINT_TABLE_USAGE => 0x00000100
+, SQL_ISV_DOMAIN_CONSTRAINTS => 0x00000200
+, SQL_ISV_DOMAINS => 0x00000400
+, SQL_ISV_KEY_COLUMN_USAGE => 0x00000800
+, SQL_ISV_REFERENTIAL_CONSTRAINTS => 0x00001000
+, SQL_ISV_SCHEMATA => 0x00002000
+, SQL_ISV_SQL_LANGUAGES => 0x00004000
+, SQL_ISV_TABLE_CONSTRAINTS => 0x00008000
+, SQL_ISV_TABLE_PRIVILEGES => 0x00010000
+, SQL_ISV_TABLES => 0x00020000
+, SQL_ISV_TRANSLATIONS => 0x00040000
+, SQL_ISV_USAGE_PRIVILEGES => 0x00080000
+, SQL_ISV_VIEW_COLUMN_USAGE => 0x00100000
+, SQL_ISV_VIEW_TABLE_USAGE => 0x00200000
+, SQL_ISV_VIEWS => 0x00400000
+};
+$ReturnValues{SQL_INSERT_STATEMENT} =
+{
+ SQL_IS_INSERT_LITERALS => 0x00000001
+, SQL_IS_INSERT_SEARCHED => 0x00000002
+, SQL_IS_SELECT_INTO => 0x00000004
+};
+$ReturnValues{SQL_LOCK_TYPES} =
+{
+ SQL_LCK_NO_CHANGE => 0x00000001
+, SQL_LCK_EXCLUSIVE => 0x00000002
+, SQL_LCK_UNLOCK => 0x00000004
+};
+$ReturnValues{SQL_NON_NULLABLE_COLUMNS} =
+{
+ SQL_NNC_NULL => 0x0000
+, SQL_NNC_NON_NULL => 0x0001
+};
+$ReturnValues{SQL_NULL_COLLATION} =
+{
+ SQL_NC_HIGH => 0
+, SQL_NC_LOW => 1
+, SQL_NC_START => 0x0002
+, SQL_NC_END => 0x0004
+};
+$ReturnValues{SQL_NUMERIC_FUNCTIONS} =
+{
+ SQL_FN_NUM_ABS => 0x00000001
+, SQL_FN_NUM_ACOS => 0x00000002
+, SQL_FN_NUM_ASIN => 0x00000004
+, SQL_FN_NUM_ATAN => 0x00000008
+, SQL_FN_NUM_ATAN2 => 0x00000010
+, SQL_FN_NUM_CEILING => 0x00000020
+, SQL_FN_NUM_COS => 0x00000040
+, SQL_FN_NUM_COT => 0x00000080
+, SQL_FN_NUM_EXP => 0x00000100
+, SQL_FN_NUM_FLOOR => 0x00000200
+, SQL_FN_NUM_LOG => 0x00000400
+, SQL_FN_NUM_MOD => 0x00000800
+, SQL_FN_NUM_SIGN => 0x00001000
+, SQL_FN_NUM_SIN => 0x00002000
+, SQL_FN_NUM_SQRT => 0x00004000
+, SQL_FN_NUM_TAN => 0x00008000
+, SQL_FN_NUM_PI => 0x00010000
+, SQL_FN_NUM_RAND => 0x00020000
+, SQL_FN_NUM_DEGREES => 0x00040000
+, SQL_FN_NUM_LOG10 => 0x00080000
+, SQL_FN_NUM_POWER => 0x00100000
+, SQL_FN_NUM_RADIANS => 0x00200000
+, SQL_FN_NUM_ROUND => 0x00400000
+, SQL_FN_NUM_TRUNCATE => 0x00800000
+};
+$ReturnValues{SQL_ODBC_API_CONFORMANCE} =
+{
+ SQL_OAC_NONE => 0x0000
+, SQL_OAC_LEVEL1 => 0x0001
+, SQL_OAC_LEVEL2 => 0x0002
+};
+$ReturnValues{SQL_ODBC_INTERFACE_CONFORMANCE} =
+{
+ SQL_OIC_CORE => 1
+, SQL_OIC_LEVEL1 => 2
+, SQL_OIC_LEVEL2 => 3
+};
+$ReturnValues{SQL_ODBC_SAG_CLI_CONFORMANCE} =
+{
+ SQL_OSCC_NOT_COMPLIANT => 0x0000
+, SQL_OSCC_COMPLIANT => 0x0001
+};
+$ReturnValues{SQL_ODBC_SQL_CONFORMANCE} =
+{
+ SQL_OSC_MINIMUM => 0x0000
+, SQL_OSC_CORE => 0x0001
+, SQL_OSC_EXTENDED => 0x0002
+};
+$ReturnValues{SQL_OJ_CAPABILITIES} =
+{
+ SQL_OJ_LEFT => 0x00000001
+, SQL_OJ_RIGHT => 0x00000002
+, SQL_OJ_FULL => 0x00000004
+, SQL_OJ_NESTED => 0x00000008
+, SQL_OJ_NOT_ORDERED => 0x00000010
+, SQL_OJ_INNER => 0x00000020
+, SQL_OJ_ALL_COMPARISON_OPS => 0x00000040
+};
+$ReturnValues{SQL_OWNER_USAGE} =
+{
+ SQL_OU_DML_STATEMENTS => 0x00000001
+, SQL_OU_PROCEDURE_INVOCATION => 0x00000002
+, SQL_OU_TABLE_DEFINITION => 0x00000004
+, SQL_OU_INDEX_DEFINITION => 0x00000008
+, SQL_OU_PRIVILEGE_DEFINITION => 0x00000010
+};
+$ReturnValues{SQL_PARAM_ARRAY_ROW_COUNTS} =
+{
+ SQL_PARC_BATCH => 1
+, SQL_PARC_NO_BATCH => 2
+};
+$ReturnValues{SQL_PARAM_ARRAY_SELECTS} =
+{
+ SQL_PAS_BATCH => 1
+, SQL_PAS_NO_BATCH => 2
+, SQL_PAS_NO_SELECT => 3
+};
+$ReturnValues{SQL_POSITIONED_STATEMENTS} =
+{
+ SQL_PS_POSITIONED_DELETE => 0x00000001
+, SQL_PS_POSITIONED_UPDATE => 0x00000002
+, SQL_PS_SELECT_FOR_UPDATE => 0x00000004
+};
+$ReturnValues{SQL_POS_OPERATIONS} =
+{
+ SQL_POS_POSITION => 0x00000001
+, SQL_POS_REFRESH => 0x00000002
+, SQL_POS_UPDATE => 0x00000004
+, SQL_POS_DELETE => 0x00000008
+, SQL_POS_ADD => 0x00000010
+};
+$ReturnValues{SQL_QUALIFIER_LOCATION} =
+{
+ SQL_QL_START => 0x0001
+, SQL_QL_END => 0x0002
+};
+$ReturnValues{SQL_QUALIFIER_USAGE} =
+{
+ SQL_QU_DML_STATEMENTS => 0x00000001
+, SQL_QU_PROCEDURE_INVOCATION => 0x00000002
+, SQL_QU_TABLE_DEFINITION => 0x00000004
+, SQL_QU_INDEX_DEFINITION => 0x00000008
+, SQL_QU_PRIVILEGE_DEFINITION => 0x00000010
+};
+$ReturnValues{SQL_QUOTED_IDENTIFIER_CASE} = $ReturnValues{SQL_IDENTIFIER_CASE};
+
+$ReturnValues{SQL_SCHEMA_USAGE} =
+{
+ SQL_SU_DML_STATEMENTS => 0x00000001 # SQL_OU_DML_STATEMENTS
+, SQL_SU_PROCEDURE_INVOCATION => 0x00000002 # SQL_OU_PROCEDURE_INVOCATION
+, SQL_SU_TABLE_DEFINITION => 0x00000004 # SQL_OU_TABLE_DEFINITION
+, SQL_SU_INDEX_DEFINITION => 0x00000008 # SQL_OU_INDEX_DEFINITION
+, SQL_SU_PRIVILEGE_DEFINITION => 0x00000010 # SQL_OU_PRIVILEGE_DEFINITION
+};
+$ReturnValues{SQL_SCROLL_CONCURRENCY} =
+{
+ SQL_SCCO_READ_ONLY => 0x00000001
+, SQL_SCCO_LOCK => 0x00000002
+, SQL_SCCO_OPT_ROWVER => 0x00000004
+, SQL_SCCO_OPT_VALUES => 0x00000008
+};
+$ReturnValues{SQL_SCROLL_OPTIONS} =
+{
+ SQL_SO_FORWARD_ONLY => 0x00000001
+, SQL_SO_KEYSET_DRIVEN => 0x00000002
+, SQL_SO_DYNAMIC => 0x00000004
+, SQL_SO_MIXED => 0x00000008
+, SQL_SO_STATIC => 0x00000010
+};
+$ReturnValues{SQL_SQL92_DATETIME_FUNCTIONS} =
+{
+ SQL_SDF_CURRENT_DATE => 0x00000001
+, SQL_SDF_CURRENT_TIME => 0x00000002
+, SQL_SDF_CURRENT_TIMESTAMP => 0x00000004
+};
+$ReturnValues{SQL_SQL92_FOREIGN_KEY_DELETE_RULE} =
+{
+ SQL_SFKD_CASCADE => 0x00000001
+, SQL_SFKD_NO_ACTION => 0x00000002
+, SQL_SFKD_SET_DEFAULT => 0x00000004
+, SQL_SFKD_SET_NULL => 0x00000008
+};
+$ReturnValues{SQL_SQL92_FOREIGN_KEY_UPDATE_RULE} =
+{
+ SQL_SFKU_CASCADE => 0x00000001
+, SQL_SFKU_NO_ACTION => 0x00000002
+, SQL_SFKU_SET_DEFAULT => 0x00000004
+, SQL_SFKU_SET_NULL => 0x00000008
+};
+$ReturnValues{SQL_SQL92_GRANT} =
+{
+ SQL_SG_USAGE_ON_DOMAIN => 0x00000001
+, SQL_SG_USAGE_ON_CHARACTER_SET => 0x00000002
+, SQL_SG_USAGE_ON_COLLATION => 0x00000004
+, SQL_SG_USAGE_ON_TRANSLATION => 0x00000008
+, SQL_SG_WITH_GRANT_OPTION => 0x00000010
+, SQL_SG_DELETE_TABLE => 0x00000020
+, SQL_SG_INSERT_TABLE => 0x00000040
+, SQL_SG_INSERT_COLUMN => 0x00000080
+, SQL_SG_REFERENCES_TABLE => 0x00000100
+, SQL_SG_REFERENCES_COLUMN => 0x00000200
+, SQL_SG_SELECT_TABLE => 0x00000400
+, SQL_SG_UPDATE_TABLE => 0x00000800
+, SQL_SG_UPDATE_COLUMN => 0x00001000
+};
+$ReturnValues{SQL_SQL92_NUMERIC_VALUE_FUNCTIONS} =
+{
+ SQL_SNVF_BIT_LENGTH => 0x00000001
+, SQL_SNVF_CHAR_LENGTH => 0x00000002
+, SQL_SNVF_CHARACTER_LENGTH => 0x00000004
+, SQL_SNVF_EXTRACT => 0x00000008
+, SQL_SNVF_OCTET_LENGTH => 0x00000010
+, SQL_SNVF_POSITION => 0x00000020
+};
+$ReturnValues{SQL_SQL92_PREDICATES} =
+{
+ SQL_SP_EXISTS => 0x00000001
+, SQL_SP_ISNOTNULL => 0x00000002
+, SQL_SP_ISNULL => 0x00000004
+, SQL_SP_MATCH_FULL => 0x00000008
+, SQL_SP_MATCH_PARTIAL => 0x00000010
+, SQL_SP_MATCH_UNIQUE_FULL => 0x00000020
+, SQL_SP_MATCH_UNIQUE_PARTIAL => 0x00000040
+, SQL_SP_OVERLAPS => 0x00000080
+, SQL_SP_UNIQUE => 0x00000100
+, SQL_SP_LIKE => 0x00000200
+, SQL_SP_IN => 0x00000400
+, SQL_SP_BETWEEN => 0x00000800
+, SQL_SP_COMPARISON => 0x00001000
+, SQL_SP_QUANTIFIED_COMPARISON => 0x00002000
+};
+$ReturnValues{SQL_SQL92_RELATIONAL_JOIN_OPERATORS} =
+{
+ SQL_SRJO_CORRESPONDING_CLAUSE => 0x00000001
+, SQL_SRJO_CROSS_JOIN => 0x00000002
+, SQL_SRJO_EXCEPT_JOIN => 0x00000004
+, SQL_SRJO_FULL_OUTER_JOIN => 0x00000008
+, SQL_SRJO_INNER_JOIN => 0x00000010
+, SQL_SRJO_INTERSECT_JOIN => 0x00000020
+, SQL_SRJO_LEFT_OUTER_JOIN => 0x00000040
+, SQL_SRJO_NATURAL_JOIN => 0x00000080
+, SQL_SRJO_RIGHT_OUTER_JOIN => 0x00000100
+, SQL_SRJO_UNION_JOIN => 0x00000200
+};
+$ReturnValues{SQL_SQL92_REVOKE} =
+{
+ SQL_SR_USAGE_ON_DOMAIN => 0x00000001
+, SQL_SR_USAGE_ON_CHARACTER_SET => 0x00000002
+, SQL_SR_USAGE_ON_COLLATION => 0x00000004
+, SQL_SR_USAGE_ON_TRANSLATION => 0x00000008
+, SQL_SR_GRANT_OPTION_FOR => 0x00000010
+, SQL_SR_CASCADE => 0x00000020
+, SQL_SR_RESTRICT => 0x00000040
+, SQL_SR_DELETE_TABLE => 0x00000080
+, SQL_SR_INSERT_TABLE => 0x00000100
+, SQL_SR_INSERT_COLUMN => 0x00000200
+, SQL_SR_REFERENCES_TABLE => 0x00000400
+, SQL_SR_REFERENCES_COLUMN => 0x00000800
+, SQL_SR_SELECT_TABLE => 0x00001000
+, SQL_SR_UPDATE_TABLE => 0x00002000
+, SQL_SR_UPDATE_COLUMN => 0x00004000
+};
+$ReturnValues{SQL_SQL92_ROW_VALUE_CONSTRUCTOR} =
+{
+ SQL_SRVC_VALUE_EXPRESSION => 0x00000001
+, SQL_SRVC_NULL => 0x00000002
+, SQL_SRVC_DEFAULT => 0x00000004
+, SQL_SRVC_ROW_SUBQUERY => 0x00000008
+};
+$ReturnValues{SQL_SQL92_STRING_FUNCTIONS} =
+{
+ SQL_SSF_CONVERT => 0x00000001
+, SQL_SSF_LOWER => 0x00000002
+, SQL_SSF_UPPER => 0x00000004
+, SQL_SSF_SUBSTRING => 0x00000008
+, SQL_SSF_TRANSLATE => 0x00000010
+, SQL_SSF_TRIM_BOTH => 0x00000020
+, SQL_SSF_TRIM_LEADING => 0x00000040
+, SQL_SSF_TRIM_TRAILING => 0x00000080
+};
+$ReturnValues{SQL_SQL92_VALUE_EXPRESSIONS} =
+{
+ SQL_SVE_CASE => 0x00000001
+, SQL_SVE_CAST => 0x00000002
+, SQL_SVE_COALESCE => 0x00000004
+, SQL_SVE_NULLIF => 0x00000008
+};
+$ReturnValues{SQL_SQL_CONFORMANCE} =
+{
+ SQL_SC_SQL92_ENTRY => 0x00000001
+, SQL_SC_FIPS127_2_TRANSITIONAL => 0x00000002
+, SQL_SC_SQL92_INTERMEDIATE => 0x00000004
+, SQL_SC_SQL92_FULL => 0x00000008
+};
+$ReturnValues{SQL_STANDARD_CLI_CONFORMANCE} =
+{
+ SQL_SCC_XOPEN_CLI_VERSION1 => 0x00000001
+, SQL_SCC_ISO92_CLI => 0x00000002
+};
+$ReturnValues{SQL_STATIC_SENSITIVITY} =
+{
+ SQL_SS_ADDITIONS => 0x00000001
+, SQL_SS_DELETIONS => 0x00000002
+, SQL_SS_UPDATES => 0x00000004
+};
+$ReturnValues{SQL_STRING_FUNCTIONS} =
+{
+ SQL_FN_STR_CONCAT => 0x00000001
+, SQL_FN_STR_INSERT => 0x00000002
+, SQL_FN_STR_LEFT => 0x00000004
+, SQL_FN_STR_LTRIM => 0x00000008
+, SQL_FN_STR_LENGTH => 0x00000010
+, SQL_FN_STR_LOCATE => 0x00000020
+, SQL_FN_STR_LCASE => 0x00000040
+, SQL_FN_STR_REPEAT => 0x00000080
+, SQL_FN_STR_REPLACE => 0x00000100
+, SQL_FN_STR_RIGHT => 0x00000200
+, SQL_FN_STR_RTRIM => 0x00000400
+, SQL_FN_STR_SUBSTRING => 0x00000800
+, SQL_FN_STR_UCASE => 0x00001000
+, SQL_FN_STR_ASCII => 0x00002000
+, SQL_FN_STR_CHAR => 0x00004000
+, SQL_FN_STR_DIFFERENCE => 0x00008000
+, SQL_FN_STR_LOCATE_2 => 0x00010000
+, SQL_FN_STR_SOUNDEX => 0x00020000
+, SQL_FN_STR_SPACE => 0x00040000
+, SQL_FN_STR_BIT_LENGTH => 0x00080000
+, SQL_FN_STR_CHAR_LENGTH => 0x00100000
+, SQL_FN_STR_CHARACTER_LENGTH => 0x00200000
+, SQL_FN_STR_OCTET_LENGTH => 0x00400000
+, SQL_FN_STR_POSITION => 0x00800000
+};
+$ReturnValues{SQL_SUBQUERIES} =
+{
+ SQL_SQ_COMPARISON => 0x00000001
+, SQL_SQ_EXISTS => 0x00000002
+, SQL_SQ_IN => 0x00000004
+, SQL_SQ_QUANTIFIED => 0x00000008
+, SQL_SQ_CORRELATED_SUBQUERIES => 0x00000010
+};
+$ReturnValues{SQL_SYSTEM_FUNCTIONS} =
+{
+ SQL_FN_SYS_USERNAME => 0x00000001
+, SQL_FN_SYS_DBNAME => 0x00000002
+, SQL_FN_SYS_IFNULL => 0x00000004
+};
+$ReturnValues{SQL_TIMEDATE_ADD_INTERVALS} =
+{
+ SQL_FN_TSI_FRAC_SECOND => 0x00000001
+, SQL_FN_TSI_SECOND => 0x00000002
+, SQL_FN_TSI_MINUTE => 0x00000004
+, SQL_FN_TSI_HOUR => 0x00000008
+, SQL_FN_TSI_DAY => 0x00000010
+, SQL_FN_TSI_WEEK => 0x00000020
+, SQL_FN_TSI_MONTH => 0x00000040
+, SQL_FN_TSI_QUARTER => 0x00000080
+, SQL_FN_TSI_YEAR => 0x00000100
+};
+$ReturnValues{SQL_TIMEDATE_FUNCTIONS} =
+{
+ SQL_FN_TD_NOW => 0x00000001
+, SQL_FN_TD_CURDATE => 0x00000002
+, SQL_FN_TD_DAYOFMONTH => 0x00000004
+, SQL_FN_TD_DAYOFWEEK => 0x00000008
+, SQL_FN_TD_DAYOFYEAR => 0x00000010
+, SQL_FN_TD_MONTH => 0x00000020
+, SQL_FN_TD_QUARTER => 0x00000040
+, SQL_FN_TD_WEEK => 0x00000080
+, SQL_FN_TD_YEAR => 0x00000100
+, SQL_FN_TD_CURTIME => 0x00000200
+, SQL_FN_TD_HOUR => 0x00000400
+, SQL_FN_TD_MINUTE => 0x00000800
+, SQL_FN_TD_SECOND => 0x00001000
+, SQL_FN_TD_TIMESTAMPADD => 0x00002000
+, SQL_FN_TD_TIMESTAMPDIFF => 0x00004000
+, SQL_FN_TD_DAYNAME => 0x00008000
+, SQL_FN_TD_MONTHNAME => 0x00010000
+, SQL_FN_TD_CURRENT_DATE => 0x00020000
+, SQL_FN_TD_CURRENT_TIME => 0x00040000
+, SQL_FN_TD_CURRENT_TIMESTAMP => 0x00080000
+, SQL_FN_TD_EXTRACT => 0x00100000
+};
+$ReturnValues{SQL_TXN_CAPABLE} =
+{
+ SQL_TC_NONE => 0
+, SQL_TC_DML => 1
+, SQL_TC_ALL => 2
+, SQL_TC_DDL_COMMIT => 3
+, SQL_TC_DDL_IGNORE => 4
+};
+$ReturnValues{SQL_TRANSACTION_ISOLATION_OPTION} =
+{
+ SQL_TRANSACTION_READ_UNCOMMITTED => 0x00000001 # SQL_TXN_READ_UNCOMMITTED
+, SQL_TRANSACTION_READ_COMMITTED => 0x00000002 # SQL_TXN_READ_COMMITTED
+, SQL_TRANSACTION_REPEATABLE_READ => 0x00000004 # SQL_TXN_REPEATABLE_READ
+, SQL_TRANSACTION_SERIALIZABLE => 0x00000008 # SQL_TXN_SERIALIZABLE
+};
+$ReturnValues{SQL_DEFAULT_TRANSACTION_ISOLATION} = $ReturnValues{SQL_TRANSACTION_ISOLATION_OPTION};
+
+$ReturnValues{SQL_TXN_ISOLATION_OPTION} =
+{
+ SQL_TXN_READ_UNCOMMITTED => 0x00000001
+, SQL_TXN_READ_COMMITTED => 0x00000002
+, SQL_TXN_REPEATABLE_READ => 0x00000004
+, SQL_TXN_SERIALIZABLE => 0x00000008
+};
+$ReturnValues{SQL_DEFAULT_TXN_ISOLATION} = $ReturnValues{SQL_TXN_ISOLATION_OPTION};
+
+$ReturnValues{SQL_TXN_VERSIONING} =
+{
+ SQL_TXN_VERSIONING => 0x00000010
+};
+$ReturnValues{SQL_UNION} =
+{
+ SQL_U_UNION => 0x00000001
+, SQL_U_UNION_ALL => 0x00000002
+};
+$ReturnValues{SQL_UNION_STATEMENT} =
+{
+ SQL_US_UNION => 0x00000001 # SQL_U_UNION
+, SQL_US_UNION_ALL => 0x00000002 # SQL_U_UNION_ALL
+};
+
+1;
+
+=head1 TODO
+
+ Corrections?
+ SQL_NULL_COLLATION: ODBC vs ANSI
+ Unique values for $ReturnValues{...}?, e.g. SQL_FILE_USAGE
+
+=cut
diff --git a/lib/DBI/Const/GetInfoReturn.pm b/lib/DBI/Const/GetInfoReturn.pm
new file mode 100644
index 0000000..d07b7ac
--- /dev/null
+++ b/lib/DBI/Const/GetInfoReturn.pm
@@ -0,0 +1,105 @@
+# $Id: GetInfoReturn.pm 8696 2007-01-24 23:12:38Z timbo $
+#
+# Copyright (c) 2002 Tim Bunce Ireland
+#
+# Constant data describing return values from the DBI getinfo function.
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+package DBI::Const::GetInfoReturn;
+
+use strict;
+
+use Exporter ();
+
+use vars qw(@ISA @EXPORT @EXPORT_OK %GetInfoReturnTypes %GetInfoReturnValues);
+
+@ISA = qw(Exporter);
+@EXPORT = qw(%GetInfoReturnTypes %GetInfoReturnValues);
+
+my
+$VERSION = sprintf("2.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
+
+
+=head1 NAME
+
+DBI::Const::GetInfoReturn - Data and functions for describing GetInfo results
+
+=head1 SYNOPSIS
+
+The interface to this module is undocumented and liable to change.
+
+=head1 DESCRIPTION
+
+Data and functions for describing GetInfo results
+
+=cut
+
+use DBI::Const::GetInfoType;
+
+use DBI::Const::GetInfo::ANSI ();
+use DBI::Const::GetInfo::ODBC ();
+
+%GetInfoReturnTypes =
+(
+ %DBI::Const::GetInfo::ANSI::ReturnTypes
+, %DBI::Const::GetInfo::ODBC::ReturnTypes
+);
+
+%GetInfoReturnValues = ();
+{
+ my $A = \%DBI::Const::GetInfo::ANSI::ReturnValues;
+ my $O = \%DBI::Const::GetInfo::ODBC::ReturnValues;
+ while ( my ($k, $v) = each %$A ) {
+ my %h = ( exists $O->{$k} ) ? ( %$v, %{$O->{$k}} ) : %$v;
+ $GetInfoReturnValues{$k} = \%h;
+ }
+ while ( my ($k, $v) = each %$O ) {
+ next if exists $A->{$k};
+ my %h = %$v;
+ $GetInfoReturnValues{$k} = \%h;
+ }
+}
+
+# -----------------------------------------------------------------------------
+
+sub Format {
+ my $InfoType = shift;
+ my $Value = shift;
+
+ return '' unless defined $Value;
+
+ my $ReturnType = $GetInfoReturnTypes{$InfoType};
+
+ return sprintf '0x%08X', $Value if $ReturnType eq 'SQLUINTEGER bitmask';
+ return sprintf '0x%08X', $Value if $ReturnType eq 'SQLINTEGER bitmask';
+# return '"' . $Value . '"' if $ReturnType eq 'SQLCHAR';
+ return $Value;
+}
+
+
+sub Explain {
+ my $InfoType = shift;
+ my $Value = shift;
+
+ return '' unless defined $Value;
+ return '' unless exists $GetInfoReturnValues{$InfoType};
+
+ $Value = int $Value;
+ my $ReturnType = $GetInfoReturnTypes{$InfoType};
+ my %h = reverse %{$GetInfoReturnValues{$InfoType}};
+
+ if ( $ReturnType eq 'SQLUINTEGER bitmask'|| $ReturnType eq 'SQLINTEGER bitmask') {
+ my @a = ();
+ for my $k ( sort { $a <=> $b } keys %h ) {
+ push @a, $h{$k} if $Value & $k;
+ }
+ return wantarray ? @a : join(' ', @a );
+ }
+ else {
+ return $h{$Value} ||'?';
+ }
+}
+
+1;
diff --git a/lib/DBI/Const/GetInfoType.pm b/lib/DBI/Const/GetInfoType.pm
new file mode 100644
index 0000000..7c01778
--- /dev/null
+++ b/lib/DBI/Const/GetInfoType.pm
@@ -0,0 +1,54 @@
+# $Id: GetInfoType.pm 8696 2007-01-24 23:12:38Z timbo $
+#
+# Copyright (c) 2002 Tim Bunce Ireland
+#
+# Constant data describing info type codes for the DBI getinfo function.
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+package DBI::Const::GetInfoType;
+
+use strict;
+
+use Exporter ();
+
+use vars qw(@ISA @EXPORT @EXPORT_OK %GetInfoType);
+
+@ISA = qw(Exporter);
+@EXPORT = qw(%GetInfoType);
+
+my
+$VERSION = sprintf("2.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
+
+
+=head1 NAME
+
+DBI::Const::GetInfoType - Data describing GetInfo type codes
+
+=head1 SYNOPSIS
+
+ use DBI::Const::GetInfoType;
+
+=head1 DESCRIPTION
+
+Imports a %GetInfoType hash which maps names for GetInfo Type Codes
+into their corresponding numeric values. For example:
+
+ $database_version = $dbh->get_info( $GetInfoType{SQL_DBMS_VER} );
+
+The interface to this module is new and nothing beyond what is
+written here is guaranteed.
+
+=cut
+
+use DBI::Const::GetInfo::ANSI (); # liable to change
+use DBI::Const::GetInfo::ODBC (); # liable to change
+
+%GetInfoType =
+(
+ %DBI::Const::GetInfo::ANSI::InfoTypes # liable to change
+, %DBI::Const::GetInfo::ODBC::InfoTypes # liable to change
+);
+
+1;
diff --git a/lib/DBI/DBD.pm b/lib/DBI/DBD.pm
new file mode 100644
index 0000000..6f8bf8c
--- /dev/null
+++ b/lib/DBI/DBD.pm
@@ -0,0 +1,3489 @@
+package DBI::DBD;
+# vim:ts=8:sw=4
+
+use vars qw($VERSION); # set $VERSION early so we don't confuse PAUSE/CPAN etc
+
+# don't use Revision here because that's not in svn:keywords so that the
+# examples that use it below won't be messed up
+$VERSION = sprintf("12.%06d", q$Id: DBD.pm 15128 2012-02-04 20:51:39Z timbo $ =~ /(\d+)/o);
+
+
+# $Id: DBD.pm 15128 2012-02-04 20:51:39Z timbo $
+#
+# Copyright (c) 1997-2006 Jonathan Leffler, Jochen Wiedmann, Steffen
+# Goeldner and Tim Bunce
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+=head1 NAME
+
+DBI::DBD - Perl DBI Database Driver Writer's Guide
+
+=head1 SYNOPSIS
+
+ perldoc DBI::DBD
+
+=head2 Version and volatility
+
+This document is I<still> a minimal draft which is in need of further work.
+
+The changes will occur both because the B<DBI> specification is changing
+and hence the requirements on B<DBD> drivers change, and because feedback
+from people reading this document will suggest improvements to it.
+
+Please read the B<DBI> documentation first and fully, including the B<DBI> FAQ.
+Then reread the B<DBI> specification again as you're reading this. It'll help.
+
+This document is a patchwork of contributions from various authors.
+More contributions (preferably as patches) are very welcome.
+
+=head1 DESCRIPTION
+
+This document is primarily intended to help people writing new
+database drivers for the Perl Database Interface (Perl DBI).
+It may also help others interested in discovering why the internals of
+a B<DBD> driver are written the way they are.
+
+This is a guide. Few (if any) of the statements in it are completely
+authoritative under all possible circumstances. This means you will
+need to use judgement in applying the guidelines in this document.
+If in I<any> doubt at all, please do contact the I<dbi-dev> mailing list
+(details given below) where Tim Bunce and other driver authors can help.
+
+=head1 CREATING A NEW DRIVER
+
+The first rule for creating a new database driver for the Perl DBI is
+very simple: B<DON'T!>
+
+There is usually a driver already available for the database you want
+to use, almost regardless of which database you choose. Very often, the
+database will provide an ODBC driver interface, so you can often use
+B<DBD::ODBC> to access the database. This is typically less convenient
+on a Unix box than on a Microsoft Windows box, but there are numerous
+options for ODBC driver managers on Unix too, and very often the ODBC
+driver is provided by the database supplier.
+
+Before deciding that you need to write a driver, do your homework to
+ensure that you are not wasting your energies.
+
+[As of December 2002, the consensus is that if you need an ODBC driver
+manager on Unix, then the unixODBC driver (available from
+L<http://www.unixodbc.org/>) is the way to go.]
+
+The second rule for creating a new database driver for the Perl DBI is
+also very simple: B<Don't -- get someone else to do it for you!>
+
+Nevertheless, there are occasions when it is necessary to write a new
+driver, often to use a proprietary language or API to access the
+database more swiftly, or more comprehensively, than an ODBC driver can.
+Then you should read this document very carefully, but with a suitably
+sceptical eye.
+
+If there is something in here that does not make any sense, question it.
+You might be right that the information is bogus, but don't come to that
+conclusion too quickly.
+
+=head2 URLs and mailing lists
+
+The primary web-site for locating B<DBI> software and information is
+
+ http://dbi.perl.org/
+
+There are two main and one auxiliary mailing lists for people working
+with B<DBI>. The primary lists are I<dbi-users@perl.org> for general users
+of B<DBI> and B<DBD> drivers, and I<dbi-dev@perl.org> mainly for B<DBD> driver
+writers (don't join the I<dbi-dev> list unless you have a good reason).
+The auxiliary list is I<dbi-announce@perl.org> for announcing new
+releases of B<DBI> or B<DBD> drivers.
+
+You can join these lists by accessing the web-site L<http://dbi.perl.org/>.
+The lists are closed so you cannot send email to any of the lists
+unless you join the list first.
+
+You should also consider monitoring the I<comp.lang.perl.*> newsgroups,
+especially I<comp.lang.perl.modules>.
+
+=head2 The Cheetah book
+
+The definitive book on Perl DBI is the Cheetah book, so called because
+of the picture on the cover. Its proper title is 'I<Programming the
+Perl DBI: Database programming with Perl>' by Alligator Descartes
+and Tim Bunce, published by O'Reilly Associates, February 2000, ISBN
+1-56592-699-4. Buy it now if you have not already done so, and read it.
+
+=head2 Locating drivers
+
+Before writing a new driver, it is in your interests to find out
+whether there already is a driver for your database. If there is such
+a driver, it would be much easier to make use of it than to write your
+own!
+
+The primary web-site for locating Perl software is
+L<http://search.cpan.org/>. You should look under the various
+modules listings for the software you are after. For example:
+
+ http://search.cpan.org/modlist/Database_Interfaces
+
+Follow the B<DBD::> and B<DBIx::> links at the top to see those subsets.
+
+See the B<DBI> docs for information on B<DBI> web sites and mailing lists.
+
+=head2 Registering a new driver
+
+Before going through any official registration process, you will need
+to establish that there is no driver already in the works. You'll do
+that by asking the B<DBI> mailing lists whether there is such a driver
+available, or whether anybody is working on one.
+
+When you get the go ahead, you will need to establish the name of the
+driver and a prefix for the driver. Typically, the name is based on the
+name of the database software it uses, and the prefix is a contraction
+of that. Hence, B<DBD::Oracle> has the name I<Oracle> and the prefix
+'I<ora_>'. The prefix must be lowercase and contain no underscores other
+than the one at the end.
+
+This information will be recorded in the B<DBI> module. Apart from
+documentation purposes, registration is a prerequisite for
+L<installing private methods|DBI/install_method>.
+
+If you are writing a driver which will not be distributed on CPAN, then
+you should choose a prefix beginning with 'I<x_>', to avoid potential
+prefix collisions with drivers registered in the future. Thus, if you
+wrote a non-CPAN distributed driver called B<DBD::CustomDB>, the prefix
+might be 'I<x_cdb_>'.
+
+This document assumes you are writing a driver called B<DBD::Driver>, and
+that the prefix 'I<drv_>' is assigned to the driver.
+
+=head2 Two styles of database driver
+
+There are two distinct styles of database driver that can be written to
+work with the Perl DBI.
+
+Your driver can be written in pure Perl, requiring no C compiler.
+When feasible, this is the best solution, but most databases are not
+written in such a way that this can be done. Some examples of pure
+Perl drivers are B<DBD::File> and B<DBD::CSV>.
+
+Alternatively, and most commonly, your driver will need to use some C
+code to gain access to the database. This will be classified as a C/XS
+driver.
+
+=head2 What code will you write?
+
+There are a number of files that need to be written for either a pure
+Perl driver or a C/XS driver. There are no extra files needed only by
+a pure Perl driver, but there are several extra files needed only by a
+C/XS driver.
+
+=head3 Files common to pure Perl and C/XS drivers
+
+Assuming that your driver is called B<DBD::Driver>, these files are:
+
+=over 4
+
+=item * F<Makefile.PL>
+
+=item * F<META.yml>
+
+=item * F<README>
+
+=item * F<MANIFEST>
+
+=item * F<Driver.pm>
+
+=item * F<lib/Bundle/DBD/Driver.pm>
+
+=item * F<lib/DBD/Driver/Summary.pm>
+
+=item * F<t/*.t>
+
+=back
+
+The first four files are mandatory. F<Makefile.PL> is used to control
+how the driver is built and installed. The F<README> file tells people
+who download the file about how to build the module and any prerequisite
+software that must be installed. The F<MANIFEST> file is used by the
+standard Perl module distribution mechanism. It lists all the source
+files that need to be distributed with your module. F<Driver.pm> is what
+is loaded by the B<DBI> code; it contains the methods peculiar to your
+driver.
+
+Although the F<META.yml> file is not B<required> you are advised to
+create one. Of particular importance are the I<build_requires> and
+I<configure_requires> attributes which newer CPAN modules understand.
+You use these to tell the CPAN module (and CPANPLUS) that your build
+and configure mechanisms require DBI. The best reference for META.yml
+(at the time of writing) is
+L<http://module-build.sourceforge.net/META-spec-v1.4.html>. You can find
+a reasonable example of a F<META.yml> in DBD::ODBC.
+
+The F<lib/Bundle/DBD/Driver.pm> file allows you to specify other Perl
+modules on which yours depends in a format that allows someone to type a
+simple command and ensure that all the pre-requisites are in place as
+well as building your driver.
+
+The F<lib/DBD/Driver/Summary.pm> file contains (an updated version of) the
+information that was included - or that would have been included - in
+the appendices of the Cheetah book as a summary of the abilities of your
+driver and the associated database.
+
+The files in the F<t> subdirectory are unit tests for your driver.
+You should write your tests as stringently as possible, while taking
+into account the diversity of installations that you can encounter:
+
+=over 4
+
+=item *
+
+Your tests should not casually modify operational databases.
+
+=item *
+
+You should never damage existing tables in a database.
+
+=item *
+
+You should code your tests to use a constrained name space within the
+database. For example, the tables (and all other named objects) that are
+created could all begin with 'I<dbd_drv_>'.
+
+=item *
+
+At the end of a test run, there should be no testing objects left behind
+in the database.
+
+=item *
+
+If you create any databases, you should remove them.
+
+=item *
+
+If your database supports temporary tables that are automatically
+removed at the end of a session, then exploit them as often as possible.
+
+=item *
+
+Try to make your tests independent of each other. If you have a
+test F<t/t11dowhat.t> that depends upon the successful running
+of F<t/t10thingamy.t>, people cannot run the single test case
+F<t/t11dowhat.t>. Further, running F<t/t11dowhat.t> twice in a row is
+likely to fail (at least, if F<t/t11dowhat.t> modifies the database at
+all) because the database at the start of the second run is not what you
+saw at the start of the first run.
+
+=item *
+
+Document in your F<README> file what you do, and what privileges people
+need to do it.
+
+=item *
+
+You can, and probably should, sequence your tests by including a test
+number before an abbreviated version of the test name; the tests are run
+in the order in which the names are expanded by shell-style globbing.
+
+=item *
+
+It is in your interests to ensure that your tests work as widely
+as possible.
+
+=back
+
+Many drivers also install sub-modules B<DBD::Driver::SubModule>
+for any of a variety of different reasons, such as to support
+the metadata methods (see the discussion of L</METADATA METHODS>
+below). Such sub-modules are conventionally stored in the directory
+F<lib/DBD/Driver>. The module itself would usually be in a file
+F<SubModule.pm>. All such sub-modules should themselves be version
+stamped (see the discussions far below).
+
+=head3 Extra files needed by C/XS drivers
+
+The software for a C/XS driver will typically contain at least four
+extra files that are not relevant to a pure Perl driver.
+
+=over 4
+
+=item * F<Driver.xs>
+
+=item * F<Driver.h>
+
+=item * F<dbdimp.h>
+
+=item * F<dbdimp.c>
+
+=back
+
+The F<Driver.xs> file is used to generate C code that Perl can call to gain
+access to the C functions you write that will, in turn, call down onto
+your database software.
+
+The F<Driver.h> header is a stylized header that ensures you can access the
+necessary Perl and B<DBI> macros, types, and function declarations.
+
+The F<dbdimp.h> is used to specify which functions have been implemented by
+your driver.
+
+The F<dbdimp.c> file is where you write the C code that does the real work
+of translating between Perl-ish data types and what the database expects
+to use and return.
+
+There are some (mainly small, but very important) differences between
+the contents of F<Makefile.PL> and F<Driver.pm> for pure Perl and C/XS
+drivers, so those files are described both in the section on creating a
+pure Perl driver and in the section on creating a C/XS driver.
+
+Obviously, you can add extra source code files to the list.
+
+=head2 Requirements on a driver and driver writer
+
+To be remotely useful, your driver must be implemented in a format that
+allows it to be distributed via CPAN, the Comprehensive Perl Archive
+Network (L<http://www.cpan.org/> and L<http://search.cpan.org>).
+Of course, it is easier if you do not have to meet this criterion, but
+you will not be able to ask for much help if you do not do so, and
+no-one is likely to want to install your module if they have to learn a
+new installation mechanism.
+
+=head1 CREATING A PURE PERL DRIVER
+
+Writing a pure Perl driver is surprisingly simple. However, there are
+some problems you should be aware of. The best option is of course
+picking up an existing driver and carefully modifying one method
+after the other.
+
+Also look carefully at B<DBD::AnyData> and B<DBD::Template>.
+
+As an example we take a look at the B<DBD::File> driver, a driver for
+accessing plain files as tables, which is part of the B<DBD::CSV> package.
+
+The minimal set of files we have to implement are F<Makefile.PL>,
+F<README>, F<MANIFEST> and F<Driver.pm>.
+
+=head2 Pure Perl version of Makefile.PL
+
+You typically start with writing F<Makefile.PL>, a Makefile
+generator. The contents of this file are described in detail in
+the L<ExtUtils::MakeMaker> man pages. It is definitely a good idea
+if you start reading them. At least you should know about the
+variables I<CONFIGURE>, I<DEFINED>, I<PM>, I<DIR>, I<EXE_FILES>,
+I<INC>, I<LIBS>, I<LINKTYPE>, I<NAME>, I<OPTIMIZE>, I<PL_FILES>,
+I<VERSION>, I<VERSION_FROM>, I<clean>, I<depend>, I<realclean> from
+the L<ExtUtils::MakeMaker> man page: these are used in almost any
+F<Makefile.PL>.
+
+Additionally read the section on I<Overriding MakeMaker Methods> and the
+descriptions of the I<distcheck>, I<disttest> and I<dist> targets: They
+will definitely be useful for you.
+
+Of special importance for B<DBI> drivers is the I<postamble> method from
+the L<ExtUtils::MM_Unix> man page.
+
+For Emacs users, I recommend the I<libscan> method, which removes
+Emacs backup files (file names which end with a tilde '~') from lists of
+files.
+
+Now an example, I use the word C<Driver> wherever you should insert
+your driver's name:
+
+ # -*- perl -*-
+
+ use ExtUtils::MakeMaker;
+
+ WriteMakefile(
+ dbd_edit_mm_attribs( {
+ 'NAME' => 'DBD::Driver',
+ 'VERSION_FROM' => 'Driver.pm',
+ 'INC' => '',
+ 'dist' => { 'SUFFIX' => '.gz',
+ 'COMPRESS' => 'gzip -9f' },
+ 'realclean' => { FILES => '*.xsi' },
+ 'PREREQ_PM' => '1.03',
+ 'CONFIGURE' => sub {
+ eval {require DBI::DBD;};
+ if ($@) {
+ warn $@;
+ exit 0;
+ }
+ my $dbi_arch_dir = dbd_dbi_arch_dir();
+ if (exists($opts{INC})) {
+ return {INC => "$opts{INC} -I$dbi_arch_dir"};
+ } else {
+ return {INC => "-I$dbi_arch_dir"};
+ }
+ }
+ },
+ { create_pp_tests => 1})
+ );
+
+ package MY;
+ sub postamble { return main::dbd_postamble(@_); }
+ sub libscan {
+ my ($self, $path) = @_;
+ ($path =~ m/\~$/) ? undef : $path;
+ }
+
+Note the calls to C<dbd_edit_mm_attribs()> and C<dbd_postamble()>.
+
+The second hash reference in the call to C<dbd_edit_mm_attribs()>
+(containing C<create_pp_tests()>) is optional; you should not use it
+unless your driver is a pure Perl driver (that is, it does not use C and
+XS code). Therefore, the call to C<dbd_edit_mm_attribs()> is not
+relevant for C/XS drivers and may be omitted; simply use the (single)
+hash reference containing NAME etc as the only argument to C<WriteMakefile()>.
+
+Note that the C<dbd_edit_mm_attribs()> code will fail if you do not have a
+F<t> sub-directory containing at least one test case.
+
+I<PREREQ_PM> tells MakeMaker that DBI (version 1.03 in this case) is
+required for this module. This will issue a warning that DBI 1.03 is
+missing if someone attempts to install your DBD without DBI 1.03. See
+I<CONFIGURE> below for why this does not work reliably in stopping cpan
+testers failing your module if DBI is not installed.
+
+I<CONFIGURE> is a subroutine called by MakeMaker during
+C<WriteMakefile>. By putting the C<require DBI::DBD> in this section
+we can attempt to load DBI::DBD but if it is missing we exit with
+success. As we exit successfully without creating a Makefile when
+DBI::DBD is missing cpan testers will not report a failure. This may
+seem at odds with I<PREREQ_PM> but I<PREREQ_PM> does not cause
+C<WriteMakefile> to fail (unless you also specify PREREQ_FATAL which
+is strongly discouraged by MakeMaker) so C<WriteMakefile> would
+continue to call C<dbd_dbi_arch_dir> and fail.
+
+All drivers must use C<dbd_postamble()> or risk running into problems.
+
+Note the specification of I<VERSION_FROM>; the named file
+(F<Driver.pm>) will be scanned for the first line that looks like an
+assignment to I<$VERSION>, and the subsequent text will be used to
+determine the version number. Note the commentary in
+L<ExtUtils::MakeMaker> on the subject of correctly formatted version
+numbers.
+
+If your driver depends upon external software (it usually will), you
+will need to add code to ensure that your environment is workable
+before the call to C<WriteMakefile()>. If you need to check for the
+existence of an external library and perhaps modify I<INC> to include
+the paths to where the external library header files are located and
+you cannot find the library or header files make sure you output a
+message saying they cannot be found but C<exit 0> (success) B<before>
+calling C<WriteMakefile> or CPAN testers will fail your module if the
+external library is not found.
+
+A full-fledged I<Makefile.PL> can be quite large (for example, the
+files for B<DBD::Oracle> and B<DBD::Informix> are both over 1000 lines
+long, and the Informix one uses - and creates - auxiliary modules
+too).
+
+See also L<ExtUtils::MakeMaker> and L<ExtUtils::MM_Unix>. Consider using
+L<CPAN::MakeMaker> in place of I<ExtUtils::MakeMaker>.
+
+=head2 README
+
+The L<README> file should describe what the driver is for, the
+pre-requisites for the build process, the actual build process, how to
+report errors, and who to report them to.
+
+Users will find ways of breaking the driver build and test process
+which you would never even have dreamed to be possible in your worst
+nightmares. Therefore, you need to write this document defensively,
+precisely and concisely.
+
+As always, use the F<README> from one of the established drivers as a basis
+for your own; the version in B<DBD::Informix> is worth a look as it has
+been quite successful in heading off problems.
+
+=over 4
+
+=item *
+
+Note that users will have versions of Perl and B<DBI> that are both older
+and newer than you expected, but this will seldom cause much trouble.
+When it does, it will be because you are using features of B<DBI> that are
+not supported in the version they are using.
+
+=item *
+
+Note that users will have versions of the database software that are
+both older and newer than you expected. You will save yourself time in
+the long run if you can identify the range of versions which have been
+tested and warn about versions which are not known to be OK.
+
+=item *
+
+Note that many people trying to install your driver will not be experts
+in the database software.
+
+=item *
+
+Note that many people trying to install your driver will not be experts
+in C or Perl.
+
+=back
+
+=head2 MANIFEST
+
+The F<MANIFEST> will be used by the Makefile's dist target to build the
+distribution tar file that is uploaded to CPAN. It should list every
+file that you want to include in your distribution, one per line.
+
+=head2 lib/Bundle/DBD/Driver.pm
+
+The CPAN module provides an extremely powerful bundle mechanism that
+allows you to specify pre-requisites for your driver.
+
+The primary pre-requisite is B<Bundle::DBI>; you may want or need to add
+some more. With the bundle set up correctly, the user can type:
+
+ perl -MCPAN -e 'install Bundle::DBD::Driver'
+
+and Perl will download, compile, test and install all the Perl modules
+needed to build your driver.
+
+The prerequisite modules are listed in the C<CONTENTS> section, with the
+official name of the module followed by a dash and an informal name or
+description.
+
+=over 4
+
+=item *
+
+Listing B<Bundle::DBI> as the main pre-requisite simplifies life.
+
+=item *
+
+Don't forget to list your driver.
+
+=item *
+
+Note that unless the DBMS is itself a Perl module, you cannot list it as
+a pre-requisite in this file.
+
+=item *
+
+You should keep the version of the bundle the same as the version of
+your driver.
+
+=item *
+
+You should add configuration management, copyright, and licencing
+information at the top.
+
+=back
+
+A suitable skeleton for this file is shown below.
+
+ package Bundle::DBD::Driver;
+
+ $VERSION = '0.01';
+
+ 1;
+
+ __END__
+
+ =head1 NAME
+
+ Bundle::DBD::Driver - A bundle to install all DBD::Driver related modules
+
+ =head1 SYNOPSIS
+
+ C<perl -MCPAN -e 'install Bundle::DBD::Driver'>
+
+ =head1 CONTENTS
+
+ Bundle::DBI - Bundle for DBI by TIMB (Tim Bunce)
+
+ DBD::Driver - DBD::Driver by YOU (Your Name)
+
+ =head1 DESCRIPTION
+
+ This bundle includes all the modules used by the Perl Database
+ Interface (DBI) driver for Driver (DBD::Driver), assuming the
+ use of DBI version 1.13 or later, created by Tim Bunce.
+
+ If you've not previously used the CPAN module to install any
+ bundles, you will be interrogated during its setup phase.
+ But when you've done it once, it remembers what you told it.
+ You could start by running:
+
+ C<perl -MCPAN -e 'install Bundle::CPAN'>
+
+ =head1 SEE ALSO
+
+ Bundle::DBI
+
+ =head1 AUTHOR
+
+ Your Name E<lt>F<you@yourdomain.com>E<gt>
+
+ =head1 THANKS
+
+ This bundle was created by ripping off Bundle::libnet created by
+ Graham Barr E<lt>F<gbarr@ti.com>E<gt>, and radically simplified
+ with some information from Jochen Wiedmann E<lt>F<joe@ispsoft.de>E<gt>.
+ The template was then included in the DBI::DBD documentation by
+ Jonathan Leffler E<lt>F<jleffler@informix.com>E<gt>.
+
+ =cut
+
+=head2 lib/DBD/Driver/Summary.pm
+
+There is no substitute for taking the summary file from a driver that
+was documented in the Perl book (such as B<DBD::Oracle> or B<DBD::Informix> or
+B<DBD::ODBC>, to name but three), and adapting it to describe the
+facilities available via B<DBD::Driver> when accessing the Driver database.
+
+=head2 Pure Perl version of Driver.pm
+
+The F<Driver.pm> file defines the Perl module B<DBD::Driver> for your driver.
+It will define a package B<DBD::Driver> along with some version information,
+some variable definitions, and a function C<driver()> which will have a more
+or less standard structure.
+
+It will also define three sub-packages of B<DBD::Driver>:
+
+=over 4
+
+=item DBD::Driver::dr
+
+with methods C<connect()>, C<data_sources()> and C<disconnect_all()>;
+
+=item DBD::Driver::db
+
+with methods such as C<prepare()>;
+
+=item DBD::Driver::st
+
+with methods such as C<execute()> and C<fetch()>.
+
+=back
+
+The F<Driver.pm> file will also contain the documentation specific to
+B<DBD::Driver> in the format used by perldoc.
+
+In a pure Perl driver, the F<Driver.pm> file is the core of the
+implementation. You will need to provide all the key methods needed by B<DBI>.
+
+Now let's take a closer look at an excerpt of F<File.pm> as an example.
+We ignore things that are common to any module (even non-DBI modules)
+or really specific to the B<DBD::File> package.
+
+=head3 The DBD::Driver package
+
+=head4 The header
+
+ package DBD::File;
+
+ use strict;
+ use vars qw($VERSION $drh);
+
+ $VERSION = "1.23.00" # Version number of DBD::File
+
+This is where the version number of your driver is specified, and is
+where F<Makefile.PL> looks for this information. Please ensure that any
+other modules added with your driver are also version stamped so that
+CPAN does not get confused.
+
+It is recommended that you use a two-part (1.23) or three-part (1.23.45)
+version number. Also consider the CPAN system, which gets confused and
+considers version 1.10 to precede version 1.9, so that using a raw CVS,
+RCS or SCCS version number is probably not appropriate (despite being
+very common).
+
+For Subversion you could use:
+
+ $VERSION = sprintf("12.%06d", q$Revision: 12345 $ =~ /(\d+)/o);
+
+(use lots of leading zeros on the second portion so if you move the code to a
+shared repository like svn.perl.org the much larger revision numbers won't
+cause a problem, at least not for a few years). For RCS or CVS you can use:
+
+ $VERSION = sprintf "%d.%02d", '$Revision: 11.21 $ ' =~ /(\d+)\.(\d+)/;
+
+which pads out the fractional part with leading zeros so all is well
+(so long as you don't go past x.99)
+
+ $drh = undef; # holds driver handle once initialized
+
+This is where the driver handle will be stored, once created.
+Note that you may assume there is only one handle for your driver.
+
+=head4 The driver constructor
+
+The C<driver()> method is the driver handle constructor. Note that
+the C<driver()> method is in the B<DBD::Driver> package, not in
+one of the sub-packages B<DBD::Driver::dr>, B<DBD::Driver::db>, or
+B<DBD::Driver::db>.
+
+ sub driver
+ {
+ return $drh if $drh; # already created - return same one
+ my ($class, $attr) = @_;
+
+ $class .= "::dr";
+
+ DBD::Driver::db->install_method('drv_example_dbh_method');
+ DBD::Driver::st->install_method('drv_example_sth_method');
+
+ # not a 'my' since we use it above to prevent multiple drivers
+ $drh = DBI::_new_drh($class, {
+ 'Name' => 'File',
+ 'Version' => $VERSION,
+ 'Attribution' => 'DBD::File by Jochen Wiedmann',
+ })
+ or return undef;
+
+ return $drh;
+ }
+
+This is a reasonable example of how B<DBI> implements its handles. There
+are three kinds: B<driver handles> (typically stored in I<$drh>; from
+now on called I<drh> or I<$drh>), B<database handles> (from now on
+called I<dbh> or I<$dbh>) and B<statement handles> (from now on called
+I<sth> or I<$sth>).
+
+The prototype of C<DBI::_new_drh()> is
+
+ $drh = DBI::_new_drh($class, $public_attrs, $private_attrs);
+
+with the following arguments:
+
+=over 4
+
+=item I<$class>
+
+is typically the class for your driver, (for example, "DBD::File::dr"),
+passed as the first argument to the C<driver()> method.
+
+=item I<$public_attrs>
+
+is a hash ref to attributes like I<Name>, I<Version>, and I<Attribution>.
+These are processed and used by B<DBI>. You had better not make any
+assumptions about them nor should you add private attributes here.
+
+=item I<$private_attrs>
+
+This is another (optional) hash ref with your private attributes.
+B<DBI> will store them and otherwise leave them alone.
+
+=back
+
+The C<DBI::_new_drh()> method and the C<driver()> method both return C<undef>
+for failure (in which case you must look at I<$DBI::err> and I<$DBI::errstr>
+for the failure information, because you have no driver handle to use).
+
+
+=head4 Using install_method() to expose driver-private methods
+
+ DBD::Foo::db->install_method($method_name, \%attr);
+
+Installs the driver-private method named by $method_name into the
+DBI method dispatcher so it can be called directly, avoiding the
+need to use the func() method.
+
+It is called as a static method on the driver class to which the
+method belongs. The method name must begin with the corresponding
+registered driver-private prefix. For example, for DBD::Oracle
+$method_name must being with 'C<ora_>', and for DBD::AnyData it
+must begin with 'C<ad_>'.
+
+The C<\%attr> attributes can be used to provide fine control over how the DBI
+dispatcher handles the dispatching of the method. However it's undocumented
+at the moment. See the IMA_* #define's in DBI.xs and the O=>0x000x values in
+the initialization of %DBI::DBI_methods in DBI.pm. (Volunteers to polish up
+and document the interface are very welcome to get in touch via dbi-dev@perl.org).
+
+Methods installed using install_method default to the standard error
+handling behaviour for DBI methods: clearing err and errstr before
+calling the method, and checking for errors to trigger RaiseError
+etc. on return. This differs from the default behaviour of func().
+
+Note for driver authors: The DBD::Foo::xx->install_method call won't
+work until the class-hierarchy has been setup. Normally the DBI
+looks after that just after the driver is loaded. This means
+install_method() can't be called at the time the driver is loaded
+unless the class-hierarchy is set up first. The way to do that is
+to call the setup_driver() method:
+
+ DBI->setup_driver('DBD::Foo');
+
+before using install_method().
+
+
+=head4 The CLONE special subroutine
+
+Also needed here, in the B<DBD::Driver> package, is a C<CLONE()> method
+that will be called by perl when an interpreter is cloned. All your
+C<CLONE()> method needs to do, currently, is clear the cached I<$drh> so
+the new interpreter won't start using the cached I<$drh> from the old
+interpreter:
+
+ sub CLONE {
+ undef $drh;
+ }
+
+See L<http://search.cpan.org/dist/perl/pod/perlmod.pod#Making_your_module_threadsafe>
+for details.
+
+=head3 The DBD::Driver::dr package
+
+The next lines of code look as follows:
+
+ package DBD::Driver::dr; # ====== DRIVER ======
+
+ $DBD::Driver::dr::imp_data_size = 0;
+
+Note that no I<@ISA> is needed here, or for the other B<DBD::Driver::*>
+classes, because the B<DBI> takes care of that for you when the driver is
+loaded.
+
+ *FIX ME* Explain what the imp_data_size is, so that implementors aren't
+ practicing cargo-cult programming.
+
+=head4 The database handle constructor
+
+The database handle constructor is the driver's (hence the changed
+namespace) C<connect()> method:
+
+ sub connect
+ {
+ my ($drh, $dr_dsn, $user, $auth, $attr) = @_;
+
+ # Some database specific verifications, default settings
+ # and the like can go here. This should only include
+ # syntax checks or similar stuff where it's legal to
+ # 'die' in case of errors.
+ # For example, many database packages requires specific
+ # environment variables to be set; this could be where you
+ # validate that they are set, or default them if they are not set.
+
+ my $driver_prefix = "drv_"; # the assigned prefix for this driver
+
+ # Process attributes from the DSN; we assume ODBC syntax
+ # here, that is, the DSN looks like var1=val1;...;varN=valN
+ foreach my $var ( split /;/, $dr_dsn ) {
+ my ($attr_name, $attr_value) = split '=', $var, 2;
+ return $drh->set_err($DBI::stderr, "Can't parse DSN part '$var'")
+ unless defined $attr_value;
+
+ # add driver prefix to attribute name if it doesn't have it already
+ $attr_name = $driver_prefix.$attr_name
+ unless $attr_name =~ /^$driver_prefix/o;
+
+ # Store attribute into %$attr, replacing any existing value.
+ # The DBI will STORE() these into $dbh after we've connected
+ $attr->{$attr_name} = $attr_value;
+ }
+
+ # Get the attributes we'll use to connect.
+ # We use delete here because these no need to STORE them
+ my $db = delete $attr->{drv_database} || delete $attr->{drv_db}
+ or return $drh->set_err($DBI::stderr, "No database name given in DSN '$dr_dsn'");
+ my $host = delete $attr->{drv_host} || 'localhost';
+ my $port = delete $attr->{drv_port} || 123456;
+
+ # Assume you can attach to your database via drv_connect:
+ my $connection = drv_connect($db, $host, $port, $user, $auth)
+ or return $drh->set_err($DBI::stderr, "Can't connect to $dr_dsn: ...");
+
+ # create a 'blank' dbh (call superclass constructor)
+ my ($outer, $dbh) = DBI::_new_dbh($drh, { Name => $dr_dsn });
+
+ $dbh->STORE('Active', 1 );
+ $dbh->{drv_connection} = $connection;
+
+ return $outer;
+ }
+
+This is mostly the same as in the I<driver handle constructor> above.
+The arguments are described in L<DBI>.
+
+The constructor C<DBI::_new_dbh()> is called, returning a database handle.
+The constructor's prototype is:
+
+ ($outer, $inner) = DBI::_new_dbh($drh, $public_attr, $private_attr);
+
+with similar arguments to those in the I<driver handle constructor>,
+except that the I<$class> is replaced by I<$drh>. The I<Name> attribute
+is a standard B<DBI> attribute (see L<DBI/Database Handle Attributes>).
+
+In scalar context, only the outer handle is returned.
+
+Note the use of the C<STORE()> method for setting the I<dbh> attributes.
+That's because within the driver code, the handle object you have is
+the 'inner' handle of a tied hash, not the outer handle that the
+users of your driver have.
+
+Because you have the inner handle, tie magic doesn't get invoked
+when you get or set values in the hash. This is often very handy for
+speed when you want to get or set simple non-special driver-specific
+attributes.
+
+However, some attribute values, such as those handled by the B<DBI> like
+I<PrintError>, don't actually exist in the hash and must be read via
+C<$h-E<gt>FETCH($attrib)> and set via C<$h-E<gt>STORE($attrib, $value)>.
+If in any doubt, use these methods.
+
+=head4 The data_sources() method
+
+The C<data_sources()> method must populate and return a list of valid data
+sources, prefixed with the "I<dbi:Driver>" incantation that allows them to
+be used in the first argument of the C<DBI-E<gt>connect()> method.
+An example of this might be scanning the F<$HOME/.odbcini> file on Unix
+for ODBC data sources (DSNs).
+
+As a trivial example, consider a fixed list of data sources:
+
+ sub data_sources
+ {
+ my($drh, $attr) = @_;
+ my(@list) = ();
+ # You need more sophisticated code than this to set @list...
+ push @list, "dbi:Driver:abc";
+ push @list, "dbi:Driver:def";
+ push @list, "dbi:Driver:ghi";
+ # End of code to set @list
+ return @list;
+ }
+
+=head4 The disconnect_all() method
+
+If you need to release any resources when the driver is unloaded, you
+can provide a disconnect_all method.
+
+=head4 Other driver handle methods
+
+If you need any other driver handle methods, they can follow here.
+
+=head4 Error handling
+
+It is quite likely that something fails in the connect method.
+With B<DBD::File> for example, you might catch an error when setting the
+current directory to something not existent by using the
+(driver-specific) I<f_dir> attribute.
+
+To report an error, you use the C<set_err()> method:
+
+ $h->set_err($err, $errmsg, $state);
+
+This will ensure that the error is recorded correctly and that
+I<RaiseError> and I<PrintError> etc are handled correctly.
+
+Typically you'll always use the method instance, aka your method's first
+argument.
+
+As C<set_err()> always returns C<undef> your error handling code can
+usually be simplified to something like this:
+
+ return $h->set_err($err, $errmsg, $state) if ...;
+
+=head3 The DBD::Driver::db package
+
+ package DBD::Driver::db; # ====== DATABASE ======
+
+ $DBD::Driver::db::imp_data_size = 0;
+
+=head4 The statement handle constructor
+
+There's nothing much new in the statement handle constructor, which
+is the C<prepare()> method:
+
+ sub prepare
+ {
+ my ($dbh, $statement, @attribs) = @_;
+
+ # create a 'blank' sth
+ my ($outer, $sth) = DBI::_new_sth($dbh, { Statement => $statement });
+
+ $sth->STORE('NUM_OF_PARAMS', ($statement =~ tr/?//));
+
+ $sth->{drv_params} = [];
+
+ return $outer;
+ }
+
+This is still the same -- check the arguments and call the super class
+constructor C<DBI::_new_sth()>. Again, in scalar context, only the outer
+handle is returned. The I<Statement> attribute should be cached as
+shown.
+
+Note the prefix I<drv_> in the attribute names: it is required that
+all your private attributes use a lowercase prefix unique to your driver.
+As mentioned earlier in this document, the B<DBI> contains a registry of
+known driver prefixes and may one day warn about unknown attributes
+that don't have a registered prefix.
+
+Note that we parse the statement here in order to set the attribute
+I<NUM_OF_PARAMS>. The technique illustrated is not very reliable; it can
+be confused by question marks appearing in quoted strings, delimited
+identifiers or in SQL comments that are part of the SQL statement. We
+could set I<NUM_OF_PARAMS> in the C<execute()> method instead because
+the B<DBI> specification explicitly allows a driver to defer this, but then
+the user could not call C<bind_param()>.
+
+=head4 Transaction handling
+
+Pure Perl drivers will rarely support transactions. Thus your C<commit()>
+and C<rollback()> methods will typically be quite simple:
+
+ sub commit
+ {
+ my ($dbh) = @_;
+ if ($dbh->FETCH('Warn')) {
+ warn("Commit ineffective while AutoCommit is on");
+ }
+ 0;
+ }
+
+ sub rollback {
+ my ($dbh) = @_;
+ if ($dbh->FETCH('Warn')) {
+ warn("Rollback ineffective while AutoCommit is on");
+ }
+ 0;
+ }
+
+Or even simpler, just use the default methods provided by the B<DBI> that
+do nothing except return C<undef>.
+
+The B<DBI>'s default C<begin_work()> method can be used by inheritance.
+
+=head4 The STORE() and FETCH() methods
+
+These methods (that we have already used, see above) are called for
+you, whenever the user does a:
+
+ $dbh->{$attr} = $val;
+
+or, respectively,
+
+ $val = $dbh->{$attr};
+
+See L<perltie> for details on tied hash refs to understand why these
+methods are required.
+
+The B<DBI> will handle most attributes for you, in particular attributes
+like I<RaiseError> or I<PrintError>. All you have to do is handle your
+driver's private attributes and any attributes, like I<AutoCommit> and
+I<ChopBlanks>, that the B<DBI> can't handle for you.
+
+A good example might look like this:
+
+ sub STORE
+ {
+ my ($dbh, $attr, $val) = @_;
+ if ($attr eq 'AutoCommit') {
+ # AutoCommit is currently the only standard attribute we have
+ # to consider.
+ if (!$val) { die "Can't disable AutoCommit"; }
+ return 1;
+ }
+ if ($attr =~ m/^drv_/) {
+ # Handle only our private attributes here
+ # Note that we could trigger arbitrary actions.
+ # Ideally we should warn about unknown attributes.
+ $dbh->{$attr} = $val; # Yes, we are allowed to do this,
+ return 1; # but only for our private attributes
+ }
+ # Else pass up to DBI to handle for us
+ $dbh->SUPER::STORE($attr, $val);
+ }
+
+ sub FETCH
+ {
+ my ($dbh, $attr) = @_;
+ if ($attr eq 'AutoCommit') { return 1; }
+ if ($attr =~ m/^drv_/) {
+ # Handle only our private attributes here
+ # Note that we could trigger arbitrary actions.
+ return $dbh->{$attr}; # Yes, we are allowed to do this,
+ # but only for our private attributes
+ }
+ # Else pass up to DBI to handle
+ $dbh->SUPER::FETCH($attr);
+ }
+
+The B<DBI> will actually store and fetch driver-specific attributes (with all
+lowercase names) without warning or error, so there's actually no need to
+implement driver-specific any code in your C<FETCH()> and C<STORE()>
+methods unless you need extra logic/checks, beyond getting or setting
+the value.
+
+Unless your driver documentation indicates otherwise, the return value of
+the C<STORE()> method is unspecified and the caller shouldn't use that value.
+
+=head4 Other database handle methods
+
+As with the driver package, other database handle methods may follow here.
+In particular you should consider a (possibly empty) C<disconnect()>
+method and possibly a C<quote()> method if B<DBI>'s default isn't correct for
+you. You may also need the C<type_info_all()> and C<get_info()> methods,
+as described elsewhere in this document.
+
+Where reasonable use C<$h-E<gt>SUPER::foo()> to call the B<DBI>'s method in
+some or all cases and just wrap your custom behavior around that.
+
+If you want to use private trace flags you'll probably want to be
+able to set them by name. To do that you'll need to define a
+C<parse_trace_flag()> method (note that's "parse_trace_flag", singular,
+not "parse_trace_flags", plural).
+
+ sub parse_trace_flag {
+ my ($h, $name) = @_;
+ return 0x01000000 if $name eq 'foo';
+ return 0x02000000 if $name eq 'bar';
+ return 0x04000000 if $name eq 'baz';
+ return 0x08000000 if $name eq 'boo';
+ return 0x10000000 if $name eq 'bop';
+ return $h->SUPER::parse_trace_flag($name);
+ }
+
+All private flag names must be lowercase, and all private flags
+must be in the top 8 of the 32 bits.
+
+=head3 The DBD::Driver::st package
+
+This package follows the same pattern the others do:
+
+ package DBD::Driver::st;
+
+ $DBD::Driver::st::imp_data_size = 0;
+
+=head4 The execute() and bind_param() methods
+
+This is perhaps the most difficult method because we have to consider
+parameter bindings here. In addition to that, there are a number of
+statement attributes which must be set for inherited B<DBI> methods to
+function correctly (see L</Statement attributes> below).
+
+We present a simplified implementation by using the I<drv_params>
+attribute from above:
+
+ sub bind_param
+ {
+ my ($sth, $pNum, $val, $attr) = @_;
+ my $type = (ref $attr) ? $attr->{TYPE} : $attr;
+ if ($type) {
+ my $dbh = $sth->{Database};
+ $val = $dbh->quote($sth, $type);
+ }
+ my $params = $sth->{drv_params};
+ $params->[$pNum-1] = $val;
+ 1;
+ }
+
+ sub execute
+ {
+ my ($sth, @bind_values) = @_;
+
+ # start of by finishing any previous execution if still active
+ $sth->finish if $sth->FETCH('Active');
+
+ my $params = (@bind_values) ?
+ \@bind_values : $sth->{drv_params};
+ my $numParam = $sth->FETCH('NUM_OF_PARAMS');
+ return $sth->set_err($DBI::stderr, "Wrong number of parameters")
+ if @$params != $numParam;
+ my $statement = $sth->{'Statement'};
+ for (my $i = 0; $i < $numParam; $i++) {
+ $statement =~ s/?/$params->[$i]/; # XXX doesn't deal with quoting etc!
+ }
+ # Do anything ... we assume that an array ref of rows is
+ # created and store it:
+ $sth->{'drv_data'} = $data;
+ $sth->{'drv_rows'} = @$data; # number of rows
+ $sth->STORE('NUM_OF_FIELDS') = $numFields;
+ $sth->{Active} = 1;
+ @$data || '0E0';
+ }
+
+There are a number of things you should note here.
+
+We initialize the I<NUM_OF_FIELDS> and I<Active> attributes here,
+because they are essential for C<bind_columns()> to work.
+
+We use attribute C<$sth-E<gt>{Statement}> which we created
+within C<prepare()>. The attribute C<$sth-E<gt>{Database}>, which is
+nothing else than the I<dbh>, was automatically created by B<DBI>.
+
+Finally, note that (as specified in the B<DBI> specification) we return the
+string C<'0E0'> instead of the number 0, so that the result tests true but
+equal to zero.
+
+ $sth->execute() or die $sth->errstr;
+
+=head4 The execute_array(), execute_for_fetch() and bind_param_array() methods
+
+In general, DBD's only need to implement C<execute_for_fetch()> and
+C<bind_param_array>. DBI's default C<execute_array()> will invoke the
+DBD's C<execute_for_fetch()> as needed.
+
+The following sequence describes the interaction between
+DBI C<execute_array> and a DBD's C<execute_for_fetch>:
+
+=over
+
+=item 1
+
+App calls C<$sth-E<gt>execute_array(\%attrs, @array_of_arrays)>
+
+=item 2
+
+If C<@array_of_arrays> was specified, DBI processes C<@array_of_arrays> by calling
+DBD's C<bind_param_array()>. Alternately, App may have directly called
+C<bind_param_array()>
+
+=item 3
+
+DBD validates and binds each array
+
+=item 4
+
+DBI retrieves the validated param arrays from DBD's ParamArray attribute
+
+=item 5
+
+DBI calls DBD's C<execute_for_fetch($fetch_tuple_sub, \@tuple_status)>,
+where C<&$fetch_tuple_sub> is a closure to iterate over the
+returned ParamArray values, and C<\@tuple_status> is an array to receive
+the disposition status of each tuple.
+
+=item 6
+
+DBD iteratively calls C<&$fetch_tuple_sub> to retrieve parameter tuples
+to be added to its bulk database operation/request.
+
+=item 7
+
+when DBD reaches the limit of tuples it can handle in a single database
+operation/request, or the C<&$fetch_tuple_sub> indicates no more
+tuples by returning undef, the DBD executes the bulk operation, and
+reports the disposition of each tuple in \@tuple_status.
+
+=item 8
+
+DBD repeats steps 6 and 7 until all tuples are processed.
+
+=back
+
+E.g., here's the essence of L<DBD::Oracle>'s execute_for_fetch:
+
+ while (1) {
+ my @tuple_batch;
+ for (my $i = 0; $i < $batch_size; $i++) {
+ push @tuple_batch, [ @{$fetch_tuple_sub->() || last} ];
+ }
+ last unless @tuple_batch;
+ my $res = ora_execute_array($sth, \@tuple_batch,
+ scalar(@tuple_batch), $tuple_batch_status);
+ push @$tuple_status, @$tuple_batch_status;
+ }
+
+Note that DBI's default execute_array()/execute_for_fetch() implementation
+requires the use of positional (i.e., '?') placeholders. Drivers
+which B<require> named placeholders must either emulate positional
+placeholders (e.g., see L<DBD::Oracle>), or must implement their own
+execute_array()/execute_for_fetch() methods to properly sequence bound
+parameter arrays.
+
+=head4 Fetching data
+
+Only one method needs to be written for fetching data, C<fetchrow_arrayref()>.
+The other methods, C<fetchrow_array()>, C<fetchall_arrayref()>, etc, as well
+as the database handle's C<select*> methods are part of B<DBI>, and call
+C<fetchrow_arrayref()> as necessary.
+
+ sub fetchrow_arrayref
+ {
+ my ($sth) = @_;
+ my $data = $sth->{drv_data};
+ my $row = shift @$data;
+ if (!$row) {
+ $sth->STORE(Active => 0); # mark as no longer active
+ return undef;
+ }
+ if ($sth->FETCH('ChopBlanks')) {
+ map { $_ =~ s/\s+$//; } @$row;
+ }
+ return $sth->_set_fbav($row);
+ }
+ *fetch = \&fetchrow_arrayref; # required alias for fetchrow_arrayref
+
+Note the use of the method C<_set_fbav()> -- this is required so that
+C<bind_col()> and C<bind_columns()> work.
+
+If an error occurs which leaves the I<$sth> in a state where remaining rows
+can't be fetched then I<Active> should be turned off before the method returns.
+
+The C<rows()> method for this driver can be implemented like this:
+
+ sub rows { shift->{drv_rows} }
+
+because it knows in advance how many rows it has fetched.
+Alternatively you could delete that method and so fallback
+to the B<DBI>'s own method which does the right thing based
+on the number of calls to C<_set_fbav()>.
+
+=head4 The more_results method
+
+If your driver doesn't support multiple result sets, then don't even implement this method.
+
+Otherwise, this method needs to get the statement handle ready to fetch results
+from the next result set, if there is one. Typically you'd start with:
+
+ $sth->finish;
+
+then you should delete all the attributes from the attribute cache that may no
+longer be relevant for the new result set:
+
+ delete $sth->{$_}
+ for qw(NAME TYPE PRECISION SCALE ...);
+
+for drivers written in C use:
+
+ hv_delete((HV*)SvRV(sth), "NAME", 4, G_DISCARD);
+ hv_delete((HV*)SvRV(sth), "NULLABLE", 8, G_DISCARD);
+ hv_delete((HV*)SvRV(sth), "NUM_OF_FIELDS", 13, G_DISCARD);
+ hv_delete((HV*)SvRV(sth), "PRECISION", 9, G_DISCARD);
+ hv_delete((HV*)SvRV(sth), "SCALE", 5, G_DISCARD);
+ hv_delete((HV*)SvRV(sth), "TYPE", 4, G_DISCARD);
+
+Don't forget to also delete, or update, any driver-private attributes that may
+not be correct for the next resultset.
+
+The NUM_OF_FIELDS attribute is a special case. It should be set using STORE:
+
+ $sth->STORE(NUM_OF_FIELDS => 0); /* for DBI <= 1.53 */
+ $sth->STORE(NUM_OF_FIELDS => $new_value);
+
+for drivers written in C use this incantation:
+
+ /* Adjust NUM_OF_FIELDS - which also adjusts the row buffer size */
+ DBIc_NUM_FIELDS(imp_sth) = 0; /* for DBI <= 1.53 */
+ DBIc_STATE(imp_xxh)->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0,
+ sv_2mortal(newSViv(mysql_num_fields(imp_sth->result)))
+ );
+
+For DBI versions prior to 1.54 you'll also need to explicitly adjust the
+number of elements in the row buffer array (C<DBIc_FIELDS_AV(imp_sth)>)
+to match the new result set. Fill any new values with newSV(0) not &sv_undef.
+Alternatively you could free DBIc_FIELDS_AV(imp_sth) and set it to null,
+but that would mean bind_columns() wouldn't work across result sets.
+
+
+=head4 Statement attributes
+
+The main difference between I<dbh> and I<sth> attributes is, that you
+should implement a lot of attributes here that are required by
+the B<DBI>, such as I<NAME>, I<NULLABLE>, I<TYPE>, etc. See
+L<DBI/Statement Handle Attributes> for a complete list.
+
+Pay attention to attributes which are marked as read only, such as
+I<NUM_OF_PARAMS>. These attributes can only be set the first time
+a statement is executed. If a statement is prepared, then executed
+multiple times, warnings may be generated.
+
+You can protect against these warnings, and prevent the recalculation
+of attributes which might be expensive to calculate (such as the
+I<NAME> and I<NAME_*> attributes):
+
+ my $storedNumParams = $sth->FETCH('NUM_OF_PARAMS');
+ if (!defined $storedNumParams or $storedNumFields < 0) {
+ $sth->STORE('NUM_OF_PARAMS') = $numParams;
+
+ # Set other useful attributes that only need to be set once
+ # for a statement, like $sth->{NAME} and $sth->{TYPE}
+ }
+
+One particularly important attribute to set correctly (mentioned in
+L<DBI/ATTRIBUTES COMMON TO ALL HANDLES> is I<Active>. Many B<DBI> methods,
+including C<bind_columns()>, depend on this attribute.
+
+Besides that the C<STORE()> and C<FETCH()> methods are mainly the same
+as above for I<dbh>'s.
+
+=head4 Other statement methods
+
+A trivial C<finish()> method to discard stored data, reset any attributes
+(such as I<Active>) and do C<$sth-E<gt>SUPER::finish()>.
+
+If you've defined a C<parse_trace_flag()> method in B<::db> you'll also want
+it in B<::st>, so just alias it in:
+
+ *parse_trace_flag = \&DBD::foo:db::parse_trace_flag;
+
+And perhaps some other methods that are not part of the B<DBI>
+specification, in particular to make metadata available.
+Remember that they must have names that begin with your drivers
+registered prefix so they can be installed using C<install_method()>.
+
+If C<DESTROY()> is called on a statement handle that's still active
+(C<$sth-E<gt>{Active}> is true) then it should effectively call C<finish()>.
+
+ sub DESTROY {
+ my $sth = shift;
+ $sth->finish if $sth->FETCH('Active');
+ }
+
+=head2 Tests
+
+The test process should conform as closely as possibly to the Perl
+standard test harness.
+
+In particular, most (all) of the tests should be run in the F<t> sub-directory,
+and should simply produce an C<ok> when run under C<make test>.
+For details on how this is done, see the Camel book and the section in
+Chapter 7, "The Standard Perl Library" on L<Test::Harness>.
+
+The tests may need to adapt to the type of database which is being used
+for testing, and to the privileges of the user testing the driver. For
+example, the B<DBD::Informix> test code has to adapt in a number of
+places to the type of database to which it is connected as different
+Informix databases have different capabilities: some of the tests are
+for databases without transaction logs; others are for databases with a
+transaction log; some versions of the server have support for blobs, or
+stored procedures, or user-defined data types, and others do not.
+
+When a complete file of tests must be skipped, you can provide a reason
+in a pseudo-comment:
+
+ if ($no_transactions_available)
+ {
+ print "1..0 # Skip: No transactions available\n";
+ exit 0;
+ }
+
+Consider downloading the B<DBD::Informix> code and look at the code in
+F<DBD/Informix/TestHarness.pm> which is used throughout the
+B<DBD::Informix> tests in the F<t> sub-directory.
+
+=head1 CREATING A C/XS DRIVER
+
+Please also see the section under L<CREATING A PURE PERL DRIVER>
+regarding the creation of the F<Makefile.PL>.
+
+Creating a new C/XS driver from scratch will always be a daunting task.
+You can and should greatly simplify your task by taking a good
+reference driver implementation and modifying that to match the
+database product for which you are writing a driver.
+
+The de facto reference driver has been the one for B<DBD::Oracle> written
+by Tim Bunce, who is also the author of the B<DBI> package. The B<DBD::Oracle>
+module is a good example of a driver implemented around a C-level API.
+
+Nowadays it it seems better to base on B<DBD::ODBC>, another driver
+maintained by Tim and Jeff Urlwin, because it offers a lot of metadata
+and seems to become the guideline for the future development. (Also as
+B<DBD::Oracle> digs deeper into the Oracle 8 OCI interface it'll get even
+more hairy than it is now.)
+
+The B<DBD::Informix> driver is one driver implemented using embedded SQL
+instead of a function-based API.
+B<DBD::Ingres> may also be worth a look.
+
+=head2 C/XS version of Driver.pm
+
+A lot of the code in the F<Driver.pm> file is very similar to the code for pure Perl modules
+- see above. However,
+there are also some subtle (and not so subtle) differences, including:
+
+=over 8
+
+=item *
+
+The variables I<$DBD::Driver::{dr|db|st}::imp_data_size> are not defined
+here, but in the XS code, because they declare the size of certain
+C structures.
+
+=item *
+
+Some methods are typically moved to the XS code, in particular
+C<prepare()>, C<execute()>, C<disconnect()>, C<disconnect_all()> and the
+C<STORE()> and C<FETCH()> methods.
+
+=item *
+
+Other methods are still part of F<Driver.pm>, but have callbacks to
+the XS code.
+
+=item *
+
+If the driver-specific parts of the I<imp_drh_t> structure need to be
+formally initialized (which does not seem to be a common requirement),
+then you need to add a call to an appropriate XS function in the driver
+method of C<DBD::Driver::driver()>, and you define the corresponding function
+in F<Driver.xs>, and you define the C code in F<dbdimp.c> and the prototype in
+F<dbdimp.h>.
+
+For example, B<DBD::Informix> has such a requirement, and adds the
+following call after the call to C<_new_drh()> in F<Informix.pm>:
+
+ DBD::Informix::dr::driver_init($drh);
+
+and the following code in F<Informix.xs>:
+
+ # Initialize the DBD::Informix driver data structure
+ void
+ driver_init(drh)
+ SV *drh
+ CODE:
+ ST(0) = dbd_ix_dr_driver_init(drh) ? &sv_yes : &sv_no;
+
+and the code in F<dbdimp.h> declares:
+
+ extern int dbd_ix_dr_driver_init(SV *drh);
+
+and the code in F<dbdimp.ec> (equivalent to F<dbdimp.c>) defines:
+
+ /* Formally initialize the DBD::Informix driver structure */
+ int
+ dbd_ix_dr_driver(SV *drh)
+ {
+ D_imp_drh(drh);
+ imp_drh->n_connections = 0; /* No active connections */
+ imp_drh->current_connection = 0; /* No current connection */
+ imp_drh->multipleconnections = (ESQLC_VERSION >= 600) ? True : False;
+ dbd_ix_link_newhead(&imp_drh->head); /* Empty linked list of connections */
+ return 1;
+ }
+
+B<DBD::Oracle> has a similar requirement but gets around it by checking
+whether the private data part of the driver handle is all zeroed out,
+rather than add extra functions.
+
+=back
+
+Now let's take a closer look at an excerpt from F<Oracle.pm> (revised
+heavily to remove idiosyncrasies) as an example, ignoring things that
+were already discussed for pure Perl drivers.
+
+=head3 The connect method
+
+The connect method is the database handle constructor.
+You could write either of two versions of this method: either one which
+takes connection attributes (new code) and one which ignores them (old
+code only).
+
+If you ignore the connection attributes, then you omit all mention of
+the I<$auth> variable (which is a reference to a hash of attributes), and
+the XS system manages the differences for you.
+
+ sub connect
+ {
+ my ($drh, $dbname, $user, $auth, $attr) = @_;
+
+ # Some database specific verifications, default settings
+ # and the like following here. This should only include
+ # syntax checks or similar stuff where it's legal to
+ # 'die' in case of errors.
+
+ my $dbh = DBI::_new_dbh($drh, {
+ 'Name' => $dbname,
+ })
+ or return undef;
+
+ # Call the driver-specific function _login in Driver.xs file which
+ # calls the DBMS-specific function(s) to connect to the database,
+ # and populate internal handle data.
+ DBD::Driver::db::_login($dbh, $dbname, $user, $auth, $attr)
+ or return undef;
+
+ $dbh;
+ }
+
+This is mostly the same as in the pure Perl case, the exception being
+the use of the private C<_login()> callback, which is the function
+that will really connect to the database. It is implemented in
+F<Driver.xst> (you should not implement it) and calls
+C<dbd_db_login6()> or C<dbd_db_login6_sv> from F<dbdimp.c>. See below
+for details.
+
+If your driver has driver-specific attributes which may be passed in the
+connect method and hence end up in C<$attr> in C<dbd_db_login6> then it
+is best to delete any you process so DBI does not send them again
+via STORE after connect. You can do this in C like this:
+
+ DBD_ATTRIB_DELETE(attr, "my_attribute_name",
+ strlen("my_attribute_name"));
+
+However, prior to DBI subversion version 11605 (and fixed post 1.607)
+DBD_ATTRIB_DELETE segfaulted so if you cannot guarantee the DBI version
+will be post 1.607 you need to use:
+
+ hv_delete((HV*)SvRV(attr), "my_attribute_name",
+ strlen("my_attribute_name"), G_DISCARD);
+
+ *FIX ME* Discuss removing attributes in Perl code.
+
+=head3 The disconnect_all method
+
+ *FIX ME* T.B.S
+
+=head3 The data_sources method
+
+If your C<data_sources()> method can be implemented in pure Perl, then do
+so because it is easier than doing it in XS code (see the section above
+for pure Perl drivers).
+
+If your C<data_sources()> method must call onto compiled functions, then
+you will need to define I<dbd_dr_data_sources> in your F<dbdimp.h> file, which
+will trigger F<Driver.xst> (in B<DBI> v1.33 or greater) to generate the XS
+code that calls your actual C function (see the discussion below for
+details) and you do not code anything in F<Driver.pm> to handle it.
+
+=head3 The prepare method
+
+The prepare method is the statement handle constructor, and most of it
+is not new. Like the C<connect()> method, it now has a C callback:
+
+ package DBD::Driver::db; # ====== DATABASE ======
+ use strict;
+
+ sub prepare
+ {
+ my ($dbh, $statement, $attribs) = @_;
+
+ # create a 'blank' sth
+ my $sth = DBI::_new_sth($dbh, {
+ 'Statement' => $statement,
+ })
+ or return undef;
+
+ # Call the driver-specific function _prepare in Driver.xs file
+ # which calls the DBMS-specific function(s) to prepare a statement
+ # and populate internal handle data.
+ DBD::Driver::st::_prepare($sth, $statement, $attribs)
+ or return undef;
+ $sth;
+ }
+
+=head3 The execute method
+
+ *FIX ME* T.B.S
+
+=head3 The fetchrow_arrayref method
+
+ *FIX ME* T.B.S
+
+=head3 Other methods?
+
+ *FIX ME* T.B.S
+
+=head2 Driver.xs
+
+F<Driver.xs> should look something like this:
+
+ #include "Driver.h"
+
+ DBISTATE_DECLARE;
+
+ INCLUDE: Driver.xsi
+
+ MODULE = DBD::Driver PACKAGE = DBD::Driver::dr
+
+ /* Non-standard drh XS methods following here, if any. */
+ /* If none (the usual case), omit the MODULE line above too. */
+
+ MODULE = DBD::Driver PACKAGE = DBD::Driver::db
+
+ /* Non-standard dbh XS methods following here, if any. */
+ /* Currently this includes things like _list_tables from */
+ /* DBD::mSQL and DBD::mysql. */
+
+ MODULE = DBD::Driver PACKAGE = DBD::Driver::st
+
+ /* Non-standard sth XS methods following here, if any. */
+ /* In particular this includes things like _list_fields from */
+ /* DBD::mSQL and DBD::mysql for accessing metadata. */
+
+Note especially the include of F<Driver.xsi> here: B<DBI> inserts stub
+functions for almost all private methods here which will typically do
+much work for you.
+
+Wherever you really have to implement something, it will call a private
+function in F<dbdimp.c>, and this is what you have to implement.
+
+You need to set up an extra routine if your driver needs to export
+constants of its own, analogous to the SQL types available when you say:
+
+ use DBI qw(:sql_types);
+
+ *FIX ME* T.B.S
+
+=head2 Driver.h
+
+F<Driver.h> is very simple and the operational contents should look like this:
+
+ #ifndef DRIVER_H_INCLUDED
+ #define DRIVER_H_INCLUDED
+
+ #define NEED_DBIXS_VERSION 93 /* 93 for DBI versions 1.00 to 1.51+ */
+ #define PERL_NO_GET_CONTEXT /* if used require DBI 1.51+ */
+
+ #include <DBIXS.h> /* installed by the DBI module */
+
+ #include "dbdimp.h"
+
+ #include "dbivport.h" /* see below */
+
+ #include <dbd_xsh.h> /* installed by the DBI module */
+
+ #endif /* DRIVER_H_INCLUDED */
+
+The F<DBIXS.h> header defines most of the interesting information that
+the writer of a driver needs.
+
+The file F<dbd_xsh.h> header provides prototype declarations for the C
+functions that you might decide to implement. Note that you should
+normally only define one of C<dbd_db_login()>, C<dbd_db_login6()> or
+C<dbd_db_login6_sv> unless you are intent on supporting really old
+versions of B<DBI> (prior to B<DBI> 1.06) as well as modern
+versions. The only standard, B<DBI>-mandated functions that you need
+write are those specified in the F<dbd_xsh.h> header. You might also
+add extra driver-specific functions in F<Driver.xs>.
+
+The F<dbivport.h> file should be I<copied> from the latest B<DBI> release
+into your distribution each time you modify your driver. Its job is to
+allow you to enhance your code to work with the latest B<DBI> API while
+still allowing your driver to be compiled and used with older versions
+of the B<DBI> (for example, when the C<DBIh_SET_ERR_CHAR()> macro was added
+to B<DBI> 1.41, an emulation of it was added to F<dbivport.h>). This makes
+users happy and your life easier. Always read the notes in F<dbivport.h>
+to check for any limitations in the emulation that you should be aware
+of.
+
+With B<DBI> v1.51 or better I recommend that the driver defines
+I<PERL_NO_GET_CONTEXT> before F<DBIXS.h> is included. This can significantly
+improve efficiency when running under a thread enabled perl. (Remember that
+the standard perl in most Linux distributions is built with threads enabled.
+So is ActiveState perl for Windows, and perl built for Apache mod_perl2.)
+If you do this there are some things to keep in mind:
+
+=over 4
+
+=item *
+
+If I<PERL_NO_GET_CONTEXT> is defined, then every function that calls the Perl
+API will need to start out with a C<dTHX;> declaration.
+
+=item *
+
+You'll know which functions need this, because the C compiler will
+complain that the undeclared identifier C<my_perl> is used if I<and only if>
+the perl you are using to develop and test your driver has threads enabled.
+
+=item *
+
+If you don't remember to test with a thread-enabled perl before making
+a release it's likely that you'll get failure reports from users who are.
+
+=item *
+
+For driver private functions it is possible to gain even more
+efficiency by replacing C<dTHX;> with C<pTHX_> prepended to the
+parameter list and then C<aTHX_> prepended to the argument list where
+the function is called.
+
+=back
+
+See L<perlguts/How multiple interpreters and concurrency are supported> for
+additional information about I<PERL_NO_GET_CONTEXT>.
+
+=head2 Implementation header dbdimp.h
+
+This header file has two jobs:
+
+First it defines data structures for your private part of the handles.
+
+Second it defines macros that rename the generic names like
+C<dbd_db_login()> to database specific names like C<ora_db_login()>. This
+avoids name clashes and enables use of different drivers when you work
+with a statically linked perl.
+
+It also will have the important task of disabling XS methods that you
+don't want to implement.
+
+Finally, the macros will also be used to select alternate
+implementations of some functions. For example, the C<dbd_db_login()>
+function is not passed the attribute hash.
+
+Since B<DBI> v1.06, if a C<dbd_db_login6()> macro is defined (for a function
+with 6 arguments), it will be used instead with the attribute hash
+passed as the sixth argument.
+
+Since B<DBI> post v1.607, if a C<dbd_db_login6_sv()> macro is defined (for
+a function like dbd_db_login6 but with scalar pointers for the dbname,
+username and password), it will be used instead. This will allow your
+login6 function to see if there are any Unicode characters in the
+dbname.
+
+People used to just pick Oracle's F<dbdimp.c> and use the same names,
+structures and types. I strongly recommend against that. At first glance
+this saves time, but your implementation will be less readable. It was
+just hell when I had to separate B<DBI> specific parts, Oracle specific
+parts, mSQL specific parts and mysql specific parts in B<DBD::mysql>'s
+I<dbdimp.h> and I<dbdimp.c>. (B<DBD::mysql> was a port of B<DBD::mSQL>
+which was based on B<DBD::Oracle>.) [Seconded, based on the experience
+taking B<DBD::Informix> apart, even though the version inherited in 1996
+was only based on B<DBD::Oracle>.]
+
+This part of the driver is I<your exclusive part>. Rewrite it from
+scratch, so it will be clean and short: in other words, a better piece
+of code. (Of course keep an eye on other people's work.)
+
+ struct imp_drh_st {
+ dbih_drc_t com; /* MUST be first element in structure */
+ /* Insert your driver handle attributes here */
+ };
+
+ struct imp_dbh_st {
+ dbih_dbc_t com; /* MUST be first element in structure */
+ /* Insert your database handle attributes here */
+ };
+
+ struct imp_sth_st {
+ dbih_stc_t com; /* MUST be first element in structure */
+ /* Insert your statement handle attributes here */
+ };
+
+ /* Rename functions for avoiding name clashes; prototypes are */
+ /* in dbd_xsh.h */
+ #define dbd_init drv_dr_init
+ #define dbd_db_login6_sv drv_db_login_sv
+ #define dbd_db_do drv_db_do
+ ... many more here ...
+
+These structures implement your private part of the handles.
+
+You I<have> to use the name C<imp_dbh_{dr|db|st}> and the first field
+I<must> be of type I<dbih_drc_t|_dbc_t|_stc_t> and I<must> be called
+C<com>.
+
+You should never access these fields directly, except by using the
+I<DBIc_xxx()> macros below.
+
+=head2 Implementation source dbdimp.c
+
+Conventionally, F<dbdimp.c> is the main implementation file (but
+B<DBD::Informix> calls the file F<dbdimp.ec>). This section includes a
+short note on each function that is used in the F<Driver.xsi> template
+and thus I<has> to be implemented.
+
+Of course, you will probably also need to implement other support
+functions, which should usually be file static if they are placed in
+F<dbdimp.c>. If they are placed in other files, you need to list those
+files in F<Makefile.PL> (and F<MANIFEST>) to handle them correctly.
+
+It is wise to adhere to a namespace convention for your functions to
+avoid conflicts. For example, for a driver with prefix I<drv_>, you
+might call externally visible functions I<dbd_drv_xxxx>. You should also
+avoid non-constant global variables as much as possible to improve the
+support for threading.
+
+Since Perl requires support for function prototypes (ANSI or ISO or
+Standard C), you should write your code using function prototypes too.
+
+It is possible to use either the unmapped names such as C<dbd_init()> or
+the mapped names such as C<dbd_ix_dr_init()> in the F<dbdimp.c> file.
+B<DBD::Informix> uses the mapped names which makes it easier to identify
+where to look for linkage problems at runtime (which will report errors
+using the mapped names).
+
+Most other drivers, and in particular B<DBD::Oracle>, use the unmapped
+names in the source code which makes it a little easier to compare code
+between drivers and eases discussions on the I<dbi-dev> mailing list.
+The majority of the code fragments here will use the unmapped names.
+
+Ultimately, you should provide implementations for most of the
+functions listed in the F<dbd_xsh.h> header. The exceptions are
+optional functions (such as C<dbd_st_rows()>) and those functions with
+alternative signatures, such as C<dbd_db_login6_sv>,
+C<dbd_db_login6()> and I<dbd_db_login()>. Then you should only
+implement one of the alternatives, and generally the newer one of the
+alternatives.
+
+=head3 The dbd_init method
+
+ #include "Driver.h"
+
+ DBISTATE_DECLARE;
+
+ void dbd_init(dbistate_t* dbistate)
+ {
+ DBISTATE_INIT; /* Initialize the DBI macros */
+ }
+
+The C<dbd_init()> function will be called when your driver is first
+loaded; the bootstrap command in C<DBD::Driver::dr::driver()> triggers this,
+and the call is generated in the I<BOOT> section of F<Driver.xst>.
+These statements are needed to allow your driver to use the B<DBI> macros.
+They will include your private header file F<dbdimp.h> in turn.
+Note that I<DBISTATE_INIT> requires the name of the argument to C<dbd_init()>
+to be called C<dbistate()>.
+
+=head3 The dbd_drv_error method
+
+You need a function to record errors so B<DBI> can access them properly.
+You can call it whatever you like, but we'll call it C<dbd_drv_error()>
+here.
+
+The argument list depends on your database software; different systems
+provide different ways to get at error information.
+
+ static void dbd_drv_error(SV *h, int rc, const char *what)
+ {
+
+Note that I<h> is a generic handle, may it be a driver handle, a
+database or a statement handle.
+
+ D_imp_xxh(h);
+
+This macro will declare and initialize a variable I<imp_xxh> with
+a pointer to your private handle pointer. You may cast this to
+to I<imp_drh_t>, I<imp_dbh_t> or I<imp_sth_t>.
+
+To record the error correctly, equivalent to the C<set_err()> method,
+use one of the C<DBIh_SET_ERR_CHAR(...)> or C<DBIh_SET_ERR_SV(...)> macros,
+which were added in B<DBI> 1.41:
+
+ DBIh_SET_ERR_SV(h, imp_xxh, err, errstr, state, method);
+ DBIh_SET_ERR_CHAR(h, imp_xxh, err_c, err_i, errstr, state, method);
+
+For C<DBIh_SET_ERR_SV> the I<err>, I<errstr>, I<state>, and I<method>
+parameters are C<SV*> (use &sv_undef instead of NULL).
+
+For C<DBIh_SET_ERR_CHAR> the I<err_c>, I<errstr>, I<state>, I<method>
+parameters are C<char*>.
+
+The I<err_i> parameter is an C<IV> that's used instead of I<err_c> if
+I<err_c> is C<Null>.
+
+The I<method> parameter can be ignored.
+
+The C<DBIh_SET_ERR_CHAR> macro is usually the simplest to use when you
+just have an integer error code and an error message string:
+
+ DBIh_SET_ERR_CHAR(h, imp_xxh, Nullch, rc, what, Nullch, Nullch);
+
+As you can see, any parameters that aren't relevant to you can be C<Null>.
+
+To make drivers compatible with B<DBI> < 1.41 you should be using F<dbivport.h>
+as described in L</Driver.h> above.
+
+The (obsolete) macros such as C<DBIh_EVENT2> should be removed from drivers.
+
+The names C<dbis> and C<DBIS>, which were used in previous versions of
+this document, should be replaced with the C<DBIc_DBISTATE(imp_xxh)> macro.
+
+The name C<DBILOGFP>, which was also used in previous versions of this
+document, should be replaced by C<DBIc_LOGPIO(imp_xxh)>.
+
+Your code should not call the C C<E<lt>stdio.hE<gt>> I/O functions; you
+should use C<PerlIO_printf()> as shown:
+
+ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)
+ PerlIO_printf(DBIc_LOGPIO(imp_xxh), "foobar %s: %s\n",
+ foo, neatsvpv(errstr,0));
+
+That's the first time we see how tracing works within a B<DBI> driver. Make
+use of this as often as you can, but don't output anything at a trace
+level less than 3. Levels 1 and 2 are reserved for the B<DBI>.
+
+You can define up to 8 private trace flags using the top 8 bits
+of C<DBIc_TRACE_FLAGS(imp)>, that is: C<0xFF000000>. See the
+C<parse_trace_flag()> method elsewhere in this document.
+
+=head3 The dbd_dr_data_sources method
+
+This method is optional; the support for it was added in B<DBI> v1.33.
+
+As noted in the discussion of F<Driver.pm>, if the data sources
+can be determined by pure Perl code, do it that way. If, as in
+B<DBD::Informix>, the information is obtained by a C function call, then
+you need to define a function that matches the prototype:
+
+ extern AV *dbd_dr_data_sources(SV *drh, imp_drh_t *imp_drh, SV *attrs);
+
+An outline implementation for B<DBD::Informix> follows, assuming that the
+C<sqgetdbs()> function call shown will return up to 100 databases names,
+with the pointers to each name in the array dbsname and the name strings
+themselves being stores in dbsarea.
+
+ AV *dbd_dr_data_sources(SV *drh, imp_drh_t *imp_drh, SV *attr)
+ {
+ int ndbs;
+ int i;
+ char *dbsname[100];
+ char dbsarea[10000];
+ AV *av = Nullav;
+
+ if (sqgetdbs(&ndbs, dbsname, 100, dbsarea, sizeof(dbsarea)) == 0)
+ {
+ av = NewAV();
+ av_extend(av, (I32)ndbs);
+ sv_2mortal((SV *)av);
+ for (i = 0; i < ndbs; i++)
+ av_store(av, i, newSVpvf("dbi:Informix:%s", dbsname[i]));
+ }
+ return(av);
+ }
+
+The actual B<DBD::Informix> implementation has a number of extra lines of
+code, logs function entry and exit, reports the error from C<sqgetdbs()>,
+and uses C<#define>'d constants for the array sizes.
+
+=head3 The dbd_db_login6 method
+
+ int dbd_db_login6_sv(SV* dbh, imp_dbh_t* imp_dbh, SV* dbname,
+ SV* user, SV* auth, SV *attr);
+
+ or
+
+ int dbd_db_login6(SV* dbh, imp_dbh_t* imp_dbh, char* dbname,
+ char* user, char* auth, SV *attr);
+
+This function will really connect to the database. The argument I<dbh>
+is the database handle. I<imp_dbh> is the pointer to the handles private
+data, as is I<imp_xxx> in C<dbd_drv_error()> above. The arguments
+I<dbname>, I<user>, I<auth> and I<attr> correspond to the arguments of
+the driver handle's C<connect()> method.
+
+You will quite often use database specific attributes here, that are
+specified in the DSN. I recommend you parse the DSN (using Perl) within
+the C<connect()> method and pass the segments of the DSN via the
+attributes parameter through C<_login()> to C<dbd_db_login6()>.
+
+Here's how you fetch them; as an example we use I<hostname> attribute,
+which can be up to 12 characters long excluding null terminator:
+
+ SV** svp;
+ STRLEN len;
+ char* hostname;
+
+ if ( (svp = DBD_ATTRIB_GET_SVP(attr, "drv_hostname", 12)) && SvTRUE(*svp)) {
+ hostname = SvPV(*svp, len);
+ DBD_ATTRIB_DELETE(attr, "drv_hostname", 12); /* avoid later STORE */
+ } else {
+ hostname = "localhost";
+ }
+
+If you handle any driver specific attributes in the dbd_db_login6
+method you probably want to delete them from C<attr> (as above with
+DBD_ATTRIB_DELETE). If you don't delete your handled attributes DBI
+will call C<STORE> for each attribute after the connect/login and this
+is at best redundant for attributes you have already processed.
+
+B<Note: Until revision 11605 (post DBI 1.607), there was a problem with
+DBD_ATTRIBUTE_DELETE so unless you require a DBI version after 1.607
+you need to replace each DBD_ATTRIBUTE_DELETE call with:>
+
+ hv_delete((HV*)SvRV(attr), key, key_len, G_DISCARD)
+
+Note that you can also obtain standard attributes such as I<AutoCommit> and
+I<ChopBlanks> from the attributes parameter, using C<DBD_ATTRIB_GET_IV> for
+integer attributes.
+
+If, for example, your database does not support transactions but
+I<AutoCommit> is set off (requesting transaction support), then you can
+emulate a 'failure to connect'.
+
+Now you should really connect to the database. In general, if the
+connection fails, it is best to ensure that all allocated resources are
+released so that the handle does not need to be destroyed separately. If
+you are successful (and possibly even if you fail but you have allocated
+some resources), you should use the following macros:
+
+ DBIc_IMPSET_on(imp_dbh);
+
+This indicates that the driver (implementor) has allocated resources in
+the I<imp_dbh> structure and that the implementors private C<dbd_db_destroy()>
+function should be called when the handle is destroyed.
+
+ DBIc_ACTIVE_on(imp_dbh);
+
+This indicates that the handle has an active connection to the server
+and that the C<dbd_db_disconnect()> function should be called before the
+handle is destroyed.
+
+Note that if you do need to fail, you should report errors via the I<drh>
+or I<imp_drh> rather than via I<dbh> or I<imp_dbh> because I<imp_dbh> will be
+destroyed by the failure, so errors recorded in that handle will not be
+visible to B<DBI>, and hence not the user either.
+
+Note too, that the function is passed I<dbh> and I<imp_dbh>, and there
+is a macro C<D_imp_drh_from_dbh> which can recover the I<imp_drh> from
+the I<imp_dbh>. However, there is no B<DBI> macro to provide you with the
+I<drh> given either the I<imp_dbh> or the I<dbh> or the I<imp_drh> (and
+there's no way to recover the I<dbh> given just the I<imp_dbh>).
+
+This suggests that, despite the above notes about C<dbd_drv_error()>
+taking an C<SV *>, it may be better to have two error routines, one
+taking I<imp_dbh> and one taking I<imp_drh> instead. With care, you can
+factor most of the formatting code out so that these are small routines
+calling a common error formatter. See the code in B<DBD::Informix>
+1.05.00 for more information.
+
+The C<dbd_db_login6()> function should return I<TRUE> for success,
+I<FALSE> otherwise.
+
+Drivers implemented long ago may define the five-argument function
+C<dbd_db_login()> instead of C<dbd_db_login6()>. The missing argument is
+the attributes. There are ways to work around the missing attributes,
+but they are ungainly; it is much better to use the 6-argument form.
+Even later drivers will use C<dbd_db_login6_sv()> which provides the
+dbname, username and password as SVs.
+
+=head3 The dbd_db_commit and dbd_db_rollback methods
+
+ int dbd_db_commit(SV *dbh, imp_dbh_t *imp_dbh);
+ int dbd_db_rollback(SV* dbh, imp_dbh_t* imp_dbh);
+
+These are used for commit and rollback. They should return I<TRUE> for
+success, I<FALSE> for error.
+
+The arguments I<dbh> and I<imp_dbh> are the same as for C<dbd_db_login6()>
+above; I will omit describing them in what follows, as they appear
+always.
+
+These functions should return I<TRUE> for success, I<FALSE> otherwise.
+
+=head3 The dbd_db_disconnect method
+
+This is your private part of the C<disconnect()> method. Any I<dbh> with
+the I<ACTIVE> flag on must be disconnected. (Note that you have to set
+it in C<dbd_db_connect()> above.)
+
+ int dbd_db_disconnect(SV* dbh, imp_dbh_t* imp_dbh);
+
+The database handle will return I<TRUE> for success, I<FALSE> otherwise.
+In any case it should do a:
+
+ DBIc_ACTIVE_off(imp_dbh);
+
+before returning so B<DBI> knows that C<dbd_db_disconnect()> was executed.
+
+Note that there's nothing to stop a I<dbh> being I<disconnected> while
+it still have active children. If your database API reacts badly to
+trying to use an I<sth> in this situation then you'll need to add code
+like this to all I<sth> methods:
+
+ if (!DBIc_ACTIVE(DBIc_PARENT_COM(imp_sth)))
+ return 0;
+
+Alternatively, you can add code to your driver to keep explicit track of
+the statement handles that exist for each database handle and arrange
+to destroy those handles before disconnecting from the database. There
+is code to do this in B<DBD::Informix>. Similar comments apply to the
+driver handle keeping track of all the database handles.
+
+Note that the code which destroys the subordinate handles should only
+release the associated database resources and mark the handles inactive;
+it does not attempt to free the actual handle structures.
+
+This function should return I<TRUE> for success, I<FALSE> otherwise, but
+it is not clear what anything can do about a failure.
+
+=head3 The dbd_db_discon_all method
+
+ int dbd_discon_all (SV *drh, imp_drh_t *imp_drh);
+
+This function may be called at shutdown time. It should make
+best-efforts to disconnect all database handles - if possible. Some
+databases don't support that, in which case you can do nothing
+but return 'success'.
+
+This function should return I<TRUE> for success, I<FALSE> otherwise, but
+it is not clear what anything can do about a failure.
+
+=head3 The dbd_db_destroy method
+
+This is your private part of the database handle destructor. Any I<dbh> with
+the I<IMPSET> flag on must be destroyed, so that you can safely free
+resources. (Note that you have to set it in C<dbd_db_connect()> above.)
+
+ void dbd_db_destroy(SV* dbh, imp_dbh_t* imp_dbh)
+ {
+ DBIc_IMPSET_off(imp_dbh);
+ }
+
+The B<DBI> F<Driver.xst> code will have called C<dbd_db_disconnect()> for you,
+if the handle is still 'active', before calling C<dbd_db_destroy()>.
+
+Before returning the function must switch I<IMPSET> to off, so B<DBI> knows
+that the destructor was called.
+
+A B<DBI> handle doesn't keep references to its children. But children
+do keep references to their parents. So a database handle won't be
+C<DESTROY>'d until all its children have been C<DESTROY>'d.
+
+=head3 The dbd_db_STORE_attrib method
+
+This function handles
+
+ $dbh->{$key} = $value;
+
+Its prototype is:
+
+ int dbd_db_STORE_attrib(SV* dbh, imp_dbh_t* imp_dbh, SV* keysv,
+ SV* valuesv);
+
+You do not handle all attributes; on the contrary, you should not handle
+B<DBI> attributes here: leave this to B<DBI>. (There are two exceptions,
+I<AutoCommit> and I<ChopBlanks>, which you should care about.)
+
+The return value is I<TRUE> if you have handled the attribute or I<FALSE>
+otherwise. If you are handling an attribute and something fails, you
+should call C<dbd_drv_error()>, so B<DBI> can raise exceptions, if desired.
+If C<dbd_drv_error()> returns, however, you have a problem: the user will
+never know about the error, because he typically will not check
+C<$dbh-E<gt>errstr()>.
+
+I cannot recommend a general way of going on, if C<dbd_drv_error()> returns,
+but there are examples where even the B<DBI> specification expects that
+you C<croak()>. (See the I<AutoCommit> method in L<DBI>.)
+
+If you have to store attributes, you should either use your private
+data structure I<imp_xxx>, the handle hash (via C<(HV*)SvRV(dbh)>), or use
+the private I<imp_data>.
+
+The first is best for internal C values like integers or pointers and
+where speed is important within the driver. The handle hash is best for
+values the user may want to get/set via driver-specific attributes.
+The private I<imp_data> is an additional C<SV> attached to the handle. You
+could think of it as an unnamed handle attribute. It's not normally used.
+
+=head3 The dbd_db_FETCH_attrib method
+
+This is the counterpart of C<dbd_db_STORE_attrib()>, needed for:
+
+ $value = $dbh->{$key};
+
+Its prototype is:
+
+ SV* dbd_db_FETCH_attrib(SV* dbh, imp_dbh_t* imp_dbh, SV* keysv);
+
+Unlike all previous methods this returns an C<SV> with the value. Note
+that you should normally execute C<sv_2mortal()>, if you return a nonconstant
+value. (Constant values are C<&sv_undef>, C<&sv_no> and C<&sv_yes>.)
+
+Note, that B<DBI> implements a caching algorithm for attribute values.
+If you think, that an attribute may be fetched, you store it in the
+I<dbh> itself:
+
+ if (cacheit) /* cache value for later DBI 'quick' fetch? */
+ hv_store((HV*)SvRV(dbh), key, kl, cachesv, 0);
+
+=head3 The dbd_st_prepare method
+
+This is the private part of the C<prepare()> method. Note that you
+B<must not> really execute the statement here. You may, however,
+preparse and validate the statement, or do similar things.
+
+ int dbd_st_prepare(SV* sth, imp_sth_t* imp_sth, char* statement,
+ SV* attribs);
+
+A typical, simple, possibility is to do nothing and rely on the perl
+C<prepare()> code that set the I<Statement> attribute on the handle. This
+attribute can then be used by C<dbd_st_execute()>.
+
+If the driver supports placeholders then the I<NUM_OF_PARAMS> attribute
+must be set correctly by C<dbd_st_prepare()>:
+
+ DBIc_NUM_PARAMS(imp_sth) = ...
+
+If you can, you should also setup attributes like I<NUM_OF_FIELDS>, I<NAME>,
+etc. here, but B<DBI> doesn't require that - they can be deferred until
+execute() is called. However, if you do, document it.
+
+In any case you should set the I<IMPSET> flag, as you did in
+C<dbd_db_connect()> above:
+
+ DBIc_IMPSET_on(imp_sth);
+
+=head3 The dbd_st_execute method
+
+This is where a statement will really be executed.
+
+ int dbd_st_execute(SV* sth, imp_sth_t* imp_sth);
+
+C<dbd_st_execute> should return -2 for any error, -1 if the number of
+rows affected is unknown else it should be the number of affected
+(updated, inserted) rows.
+
+Note that you must be aware a statement may be executed repeatedly.
+Also, you should not expect that C<finish()> will be called between two
+executions, so you might need code, like the following, near the start
+of the function:
+
+ if (DBIc_ACTIVE(imp_sth))
+ dbd_st_finish(h, imp_sth);
+
+If your driver supports the binding of parameters (it should!), but the
+database doesn't, you must do it here. This can be done as follows:
+
+ SV *svp;
+ char* statement = DBD_ATTRIB_GET_PV(h, "Statement", 9, svp, "");
+ int numParam = DBIc_NUM_PARAMS(imp_sth);
+ int i;
+
+ for (i = 0; i < numParam; i++)
+ {
+ char* value = dbd_db_get_param(sth, imp_sth, i);
+ /* It is your drivers task to implement dbd_db_get_param, */
+ /* it must be setup as a counterpart of dbd_bind_ph. */
+ /* Look for '?' and replace it with 'value'. Difficult */
+ /* task, note that you may have question marks inside */
+ /* quotes and comments the like ... :-( */
+ /* See DBD::mysql for an example. (Don't look too deep into */
+ /* the example, you will notice where I was lazy ...) */
+ }
+
+The next thing is to really execute the statement.
+
+Note that you must set the attributes I<NUM_OF_FIELDS>, I<NAME>, etc
+when the statement is successfully executed if the driver has not
+already done so: they may be used even before a potential C<fetchrow()>.
+In particular you have to tell B<DBI> the number of fields that the
+statement has, because it will be used by B<DBI> internally. Thus the
+function will typically ends with:
+
+ if (isSelectStatement) {
+ DBIc_NUM_FIELDS(imp_sth) = numFields;
+ DBIc_ACTIVE_on(imp_sth);
+ }
+
+It is important that the I<ACTIVE> flag only be set for C<SELECT>
+statements (or any other statements that can return many
+values from the database using a cursor-like mechanism). See
+C<dbd_db_connect()> above for more explanations.
+
+There plans for a preparse function to be provided by B<DBI>, but this has
+not reached fruition yet.
+Meantime, if you want to know how ugly it can get, try looking at the
+C<dbd_ix_preparse()> in B<DBD::Informix> F<dbdimp.ec> and the related
+functions in F<iustoken.c> and F<sqltoken.c>.
+
+=head3 The dbd_st_fetch method
+
+This function fetches a row of data. The row is stored in in an array,
+of C<SV>'s that B<DBI> prepares for you. This has two advantages: it is fast
+(you even reuse the C<SV>'s, so they don't have to be created after the
+first C<fetchrow()>), and it guarantees that B<DBI> handles C<bind_cols()> for
+you.
+
+What you do is the following:
+
+ AV* av;
+ int numFields = DBIc_NUM_FIELDS(imp_sth); /* Correct, if NUM_FIELDS
+ is constant for this statement. There are drivers where this is
+ not the case! */
+ int chopBlanks = DBIc_is(imp_sth, DBIcf_ChopBlanks);
+ int i;
+
+ if (!fetch_new_row_of_data(...)) {
+ ... /* check for error or end-of-data */
+ DBIc_ACTIVE_off(imp_sth); /* turn off Active flag automatically */
+ return Nullav;
+ }
+ /* get the fbav (field buffer array value) for this row */
+ /* it is very important to only call this after you know */
+ /* that you have a row of data to return. */
+ av = DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth);
+ for (i = 0; i < numFields; i++) {
+ SV* sv = fetch_a_field(..., i);
+ if (chopBlanks && SvOK(sv) && type_is_blank_padded(field_type[i])) {
+ /* Remove white space from end (only) of sv */
+ }
+ sv_setsv(AvARRAY(av)[i], sv); /* Note: (re)use! */
+ }
+ return av;
+
+There's no need to use a C<fetch_a_field()> function returning an C<SV*>.
+It's more common to use your database API functions to fetch the
+data as character strings and use code like this:
+
+ sv_setpvn(AvARRAY(av)[i], char_ptr, char_count);
+
+C<NULL> values must be returned as C<undef>. You can use code like this:
+
+ SvOK_off(AvARRAY(av)[i]);
+
+The function returns the C<AV> prepared by B<DBI> for success or C<Nullav>
+otherwise.
+
+ *FIX ME* Discuss what happens when there's no more data to fetch.
+ Are errors permitted if another fetch occurs after the first fetch
+ that reports no more data. (Permitted, not required.)
+
+If an error occurs which leaves the I<$sth> in a state where remaining
+rows can't be fetched then I<Active> should be turned off before the
+method returns.
+
+=head3 The dbd_st_finish3 method
+
+The C<$sth-E<gt>finish()> method can be called if the user wishes to
+indicate that no more rows will be fetched even if the database has more
+rows to offer, and the B<DBI> code can call the function when handles are
+being destroyed. See the B<DBI> specification for more background details.
+
+In both circumstances, the B<DBI> code ends up calling the
+C<dbd_st_finish3()> method (if you provide a mapping for
+C<dbd_st_finish3()> in F<dbdimp.h>), or C<dbd_st_finish()> otherwise.
+The difference is that C<dbd_st_finish3()> takes a third argument which
+is an C<int> with the value 1 if it is being called from a C<destroy()>
+method and 0 otherwise.
+
+Note that B<DBI> v1.32 and earlier test on C<dbd_db_finish3()> to call
+C<dbd_st_finish3()>; if you provide C<dbd_st_finish3()>, either define
+C<dbd_db_finish3()> too, or insist on B<DBI> v1.33 or later.
+
+All it I<needs> to do is turn off the I<Active> flag for the I<sth>.
+It will only be called by F<Driver.xst> code, if the driver has set I<ACTIVE>
+to on for the I<sth>.
+
+Outline example:
+
+ int dbd_st_finish3(SV* sth, imp_sth_t* imp_sth, int from_destroy) {
+ if (DBIc_ACTIVE(imp_sth))
+ {
+ /* close cursor or equivalent action */
+ DBIc_ACTIVE_off(imp_sth);
+ }
+ return 1;
+ }
+
+The from_destroy parameter is true if C<dbd_st_finish3()> is being called
+from C<DESTROY()> - and so the statement is about to be destroyed.
+For many drivers there is no point in doing anything more than turning off
+the I<Active> flag in this case.
+
+The function returns I<TRUE> for success, I<FALSE> otherwise, but there isn't
+a lot anyone can do to recover if there is an error.
+
+=head3 The dbd_st_destroy method
+
+This function is the private part of the statement handle destructor.
+
+ void dbd_st_destroy(SV* sth, imp_sth_t* imp_sth) {
+ ... /* any clean-up that's needed */
+ DBIc_IMPSET_off(imp_sth); /* let DBI know we've done it */
+ }
+
+The B<DBI> F<Driver.xst> code will call C<dbd_st_finish()> for you, if the
+I<sth> has the I<ACTIVE> flag set, before calling C<dbd_st_destroy()>.
+
+=head3 The dbd_st_STORE_attrib and dbd_st_FETCH_attrib methods
+
+These functions correspond to C<dbd_db_STORE()> and C<dbd_db_FETCH()> attrib
+above, except that they are for statement handles.
+See above.
+
+ int dbd_st_STORE_attrib(SV* sth, imp_sth_t* imp_sth, SV* keysv,
+ SV* valuesv);
+ SV* dbd_st_FETCH_attrib(SV* sth, imp_sth_t* imp_sth, SV* keysv);
+
+=head3 The dbd_bind_ph method
+
+This function is internally used by the C<bind_param()> method, the
+C<bind_param_inout()> method and by the B<DBI> F<Driver.xst> code if
+C<execute()> is called with any bind parameters.
+
+ int dbd_bind_ph (SV *sth, imp_sth_t *imp_sth, SV *param,
+ SV *value, IV sql_type, SV *attribs,
+ int is_inout, IV maxlen);
+
+The I<param> argument holds an C<IV> with the parameter number (1, 2, ...).
+The I<value> argument is the parameter value and I<sql_type> is its type.
+
+If your driver does not support C<bind_param_inout()> then you should
+ignore I<maxlen> and croak if I<is_inout> is I<TRUE>.
+
+If your driver I<does> support C<bind_param_inout()> then you should
+note that I<value> is the C<SV> I<after> dereferencing the reference
+passed to C<bind_param_inout()>.
+
+In drivers of simple databases the function will, for example, store
+the value in a parameter array and use it later in C<dbd_st_execute()>.
+See the B<DBD::mysql> driver for an example.
+
+=head3 Implementing bind_param_inout support
+
+To provide support for parameters bound by reference rather than by
+value, the driver must do a number of things. First, and most
+importantly, it must note the references and stash them in its own
+driver structure. Secondly, when a value is bound to a column, the
+driver must discard any previous reference bound to the column. On
+each execute, the driver must evaluate the references and internally
+bind the values resulting from the references. This is only applicable
+if the user writes:
+
+ $sth->execute;
+
+If the user writes:
+
+ $sth->execute(@values);
+
+then B<DBI> automatically calls the binding code for each element of
+I<@values>. These calls are indistinguishable from explicit user calls to
+C<bind_param()>.
+
+=head2 C/XS version of Makefile.PL
+
+The F<Makefile.PL> file for a C/XS driver is similar to the code needed
+for a pure Perl driver, but there are a number of extra bits of
+information needed by the build system.
+
+For example, the attributes list passed to C<WriteMakefile()> needs
+to specify the object files that need to be compiled and built into
+the shared object (DLL). This is often, but not necessarily, just
+F<dbdimp.o> (unless that should be F<dbdimp.obj> because you're building
+on MS Windows).
+
+Note that you can reliably determine the extension of the object files
+from the I<$Config{obj_ext}> values, and there are many other useful pieces
+of configuration information lurking in that hash.
+You get access to it with:
+
+ use Config;
+
+=head2 Methods which do not need to be written
+
+The B<DBI> code implements the majority of the methods which are accessed
+using the notation C<DBI-E<gt>function()>, the only exceptions being
+C<DBI-E<gt>connect()> and C<DBI-E<gt>data_sources()> which require
+support from the driver.
+
+The B<DBI> code implements the following documented driver, database and
+statement functions which do not need to be written by the B<DBD> driver
+writer.
+
+=over 4
+
+=item $dbh->do()
+
+The default implementation of this function prepares, executes and
+destroys the statement. This can be replaced if there is a better
+way to implement this, such as C<EXECUTE IMMEDIATE> which can
+sometimes be used if there are no parameters.
+
+=item $h->errstr()
+
+=item $h->err()
+
+=item $h->state()
+
+=item $h->trace()
+
+The B<DBD> driver does not need to worry about these routines at all.
+
+=item $h->{ChopBlanks}
+
+This attribute needs to be honored during C<fetch()> operations, but does
+not need to be handled by the attribute handling code.
+
+=item $h->{RaiseError}
+
+The B<DBD> driver does not need to worry about this attribute at all.
+
+=item $h->{PrintError}
+
+The B<DBD> driver does not need to worry about this attribute at all.
+
+=item $sth->bind_col()
+
+Assuming the driver uses the C<DBIc_DBISTATE(imp_xxh)-E<gt>get_fbav()>
+function (C drivers, see below), or the C<$sth-E<gt>_set_fbav($data)>
+method (Perl drivers) the driver does not need to do anything about this
+routine.
+
+=item $sth->bind_columns()
+
+Regardless of whether the driver uses
+C<DBIc_DBISTATE(imp_xxh)-E<gt>get_fbav()>, the driver does not need
+to do anything about this routine as it simply iteratively calls
+C<$sth-E<gt>bind_col()>.
+
+=back
+
+The B<DBI> code implements a default implementation of the following
+functions which do not need to be written by the B<DBD> driver writer
+unless the default implementation is incorrect for the Driver.
+
+=over 4
+
+=item $dbh->quote()
+
+This should only be written if the database does not accept the ANSI
+SQL standard for quoting strings, with the string enclosed in single
+quotes and any embedded single quotes replaced by two consecutive
+single quotes.
+
+For the two argument form of quote, you need to implement the
+C<type_info()> method to provide the information that quote needs.
+
+=item $dbh->ping()
+
+This should be implemented as a simple efficient way to determine
+whether the connection to the database is still alive. Typically
+code like this:
+
+ sub ping {
+ my $dbh = shift;
+ $sth = $dbh->prepare_cached(q{
+ select * from A_TABLE_NAME where 1=0
+ }) or return 0;
+ $sth->execute or return 0;
+ $sth->finish;
+ return 1;
+ }
+
+where I<A_TABLE_NAME> is the name of a table that always exists (such as a
+database system catalogue).
+
+=item $drh->default_user
+
+The default implementation of default_user will get the database
+username and password fields from C<$ENV{DBI_USER}> and
+C<$ENV{DBI_PASS}>. You can override this method. It is called as
+follows:
+
+ ($user, $pass) = $drh->default_user($user, $pass, $attr)
+
+=back
+
+=head1 METADATA METHODS
+
+The exposition above ignores the B<DBI> MetaData methods.
+The metadata methods are all associated with a database handle.
+
+=head2 Using DBI::DBD::Metadata
+
+The B<DBI::DBD::Metadata> module is a good semi-automatic way for the
+developer of a B<DBD> module to write the C<get_info()> and C<type_info()>
+functions quickly and accurately.
+
+=head3 Generating the get_info method
+
+Prior to B<DBI> v1.33, this existed as the method C<write_getinfo_pm()>
+in the B<DBI::DBD> module. From B<DBI> v1.33, it exists as the method
+C<write_getinfo_pm()> in the B<DBI::DBD::Metadata> module. This
+discussion assumes you have B<DBI> v1.33 or later.
+
+You examine the documentation for C<write_getinfo_pm()> using:
+
+ perldoc DBI::DBD::Metadata
+
+To use it, you need a Perl B<DBI> driver for your database which implements
+the C<get_info()> method. In practice, this means you need to install
+B<DBD::ODBC>, an ODBC driver manager, and an ODBC driver for your
+database.
+
+With the pre-requisites in place, you might type:
+
+ perl -MDBI::DBD::Metadata -we \
+ "write_getinfo_pm (qw{ dbi:ODBC:foo_db username password Driver })"
+
+The procedure writes to standard output the code that should be added to
+your F<Driver.pm> file and the code that should be written to
+F<lib/DBD/Driver/GetInfo.pm>.
+
+You should review the output to ensure that it is sensible.
+
+=head3 Generating the type_info method
+
+Given the idea of the C<write_getinfo_pm()> method, it was not hard
+to devise a parallel method, C<write_typeinfo_pm()>, which does the
+analogous job for the B<DBI> C<type_info_all()> metadata method. The
+C<write_typeinfo_pm()> method was added to B<DBI> v1.33.
+
+You examine the documentation for C<write_typeinfo_pm()> using:
+
+ perldoc DBI::DBD::Metadata
+
+The setup is exactly analogous to the mechanism described in
+L</Generating the get_info method>.
+
+With the pre-requisites in place, you might type:
+
+ perl -MDBI::DBD::Metadata -we \
+ "write_typeinfo (qw{ dbi:ODBC:foo_db username password Driver })"
+
+The procedure writes to standard output the code that should be added to
+your F<Driver.pm> file and the code that should be written to
+F<lib/DBD/Driver/TypeInfo.pm>.
+
+You should review the output to ensure that it is sensible.
+
+=head2 Writing DBD::Driver::db::get_info
+
+If you use the B<DBI::DBD::Metadata> module, then the code you need is
+generated for you.
+
+If you decide not to use the B<DBI::DBD::Metadata> module, you
+should probably borrow the code from a driver that has done so (eg
+B<DBD::Informix> from version 1.05 onwards) and crib the code from
+there, or look at the code that generates that module and follow
+that. The method in F<Driver.pm> will be very simple; the method in
+F<lib/DBD/Driver/GetInfo.pm> is not very much more complex unless your
+DBMS itself is much more complex.
+
+Note that some of the B<DBI> utility methods rely on information from the
+C<get_info()> method to perform their operations correctly. See, for
+example, the C<quote_identifier()> and quote methods, discussed below.
+
+=head2 Writing DBD::Driver::db::type_info_all
+
+If you use the C<DBI::DBD::Metadata> module, then the code you need is
+generated for you.
+
+If you decide not to use the C<DBI::DBD::Metadata> module, you
+should probably borrow the code from a driver that has done so (eg
+C<DBD::Informix> from version 1.05 onwards) and crib the code from
+there, or look at the code that generates that module and follow
+that. The method in F<Driver.pm> will be very simple; the method in
+F<lib/DBD/Driver/TypeInfo.pm> is not very much more complex unless your
+DBMS itself is much more complex.
+
+=head2 Writing DBD::Driver::db::type_info
+
+The guidelines on writing this method are still not really clear.
+No sample implementation is available.
+
+=head2 Writing DBD::Driver::db::table_info
+
+ *FIX ME* The guidelines on writing this method have not been written yet.
+ No sample implementation is available.
+
+=head2 Writing DBD::Driver::db::column_info
+
+ *FIX ME* The guidelines on writing this method have not been written yet.
+ No sample implementation is available.
+
+=head2 Writing DBD::Driver::db::primary_key_info
+
+ *FIX ME* The guidelines on writing this method have not been written yet.
+ No sample implementation is available.
+
+=head2 Writing DBD::Driver::db::primary_key
+
+ *FIX ME* The guidelines on writing this method have not been written yet.
+ No sample implementation is available.
+
+=head2 Writing DBD::Driver::db::foreign_key_info
+
+ *FIX ME* The guidelines on writing this method have not been written yet.
+ No sample implementation is available.
+
+=head2 Writing DBD::Driver::db::tables
+
+This method generates an array of names in a format suitable for being
+embedded in SQL statements in places where a table name is expected.
+
+If your database hews close enough to the SQL standard or if you have
+implemented an appropriate C<table_info()> function and and the appropriate
+C<quote_identifier()> function, then the B<DBI> default version of this method
+will work for your driver too.
+
+Otherwise, you have to write a function yourself, such as:
+
+ sub tables
+ {
+ my($dbh, $cat, $sch, $tab, $typ) = @_;
+ my(@res);
+ my($sth) = $dbh->table_info($cat, $sch, $tab, $typ);
+ my(@arr);
+ while (@arr = $sth->fetchrow_array)
+ {
+ push @res, $dbh->quote_identifier($arr[0], $arr[1], $arr[2]);
+ }
+ return @res;
+ }
+
+See also the default implementation in F<DBI.pm>.
+
+=head2 Writing DBD::Driver::db::quote
+
+This method takes a value and converts it into a string suitable for
+embedding in an SQL statement as a string literal.
+
+If your DBMS accepts the SQL standard notation for strings (single
+quotes around the string as a whole with any embedded single quotes
+doubled up), then you do not need to write this method as B<DBI> provides a
+default method that does it for you.
+
+If your DBMS uses an alternative notation or escape mechanism, then you
+need to provide an equivalent function. For example, suppose your DBMS
+used C notation with double quotes around the string and backslashes
+escaping both double quotes and backslashes themselves. Then you might
+write the function as:
+
+ sub quote
+ {
+ my($dbh, $str) = @_;
+ $str =~ s/["\\]/\\$&/gmo;
+ return qq{"$str"};
+ }
+
+Handling newlines and other control characters is left as an exercise
+for the reader.
+
+This sample method ignores the I<$data_type> indicator which is the
+optional second argument to the method.
+
+=head2 Writing DBD::Driver::db::quote_identifier
+
+This method is called to ensure that the name of the given table (or
+other database object) can be embedded into an SQL statement without
+danger of misinterpretation. The result string should be usable in the
+text of an SQL statement as the identifier for a table.
+
+If your DBMS accepts the SQL standard notation for quoted identifiers
+(which uses double quotes around the identifier as a whole, with any
+embedded double quotes doubled up) and accepts I<"schema"."identifier">
+(and I<"catalog"."schema"."identifier"> when a catalog is specified), then
+you do not need to write this method as B<DBI> provides a default method
+that does it for you.
+
+In fact, even if your DBMS does not handle exactly that notation but
+you have implemented the C<get_info()> method and it gives the correct
+responses, then it will work for you. If your database is fussier, then
+you need to implement your own version of the function.
+
+For example, B<DBD::Informix> has to deal with an environment variable
+I<DELIMIDENT>. If it is not set, then the DBMS treats names enclosed in
+double quotes as strings rather than names, which is usually a syntax
+error. Additionally, the catalog portion of the name is separated from
+the schema and table by a different delimiter (colon instead of dot),
+and the catalog portion is never enclosed in quotes. (Fortunately,
+valid strings for the catalog will never contain weird characters that
+might need to be escaped, unless you count dots, dashes, slashes and
+at-signs as weird.) Finally, an Informix database can contain objects
+that cannot be accessed because they were created by a user with the
+I<DELIMIDENT> environment variable set, but the current user does not
+have it set. By design choice, the C<quote_identifier()> method encloses
+those identifiers in double quotes anyway, which generally triggers a
+syntax error, and the metadata methods which generate lists of tables
+etc omit those identifiers from the result sets.
+
+ sub quote_identifier
+ {
+ my($dbh, $cat, $sch, $obj) = @_;
+ my($rv) = "";
+ my($qq) = (defined $ENV{DELIMIDENT}) ? '"' : '';
+ $rv .= qq{$cat:} if (defined $cat);
+ if (defined $sch)
+ {
+ if ($sch !~ m/^\w+$/o)
+ {
+ $qq = '"';
+ $sch =~ s/$qq/$qq$qq/gm;
+ }
+ $rv .= qq{$qq$sch$qq.};
+ }
+ if (defined $obj)
+ {
+ if ($obj !~ m/^\w+$/o)
+ {
+ $qq = '"';
+ $obj =~ s/$qq/$qq$qq/gm;
+ }
+ $rv .= qq{$qq$obj$qq};
+ }
+ return $rv;
+ }
+
+Handling newlines and other control characters is left as an exercise
+for the reader.
+
+Note that there is an optional fourth parameter to this function which
+is a reference to a hash of attributes; this sample implementation
+ignores that.
+
+This sample implementation also ignores the single-argument variant of
+the method.
+
+=head1 TRACING
+
+Tracing in DBI is controlled with a combination of a trace level and a
+set of flags which together are known as the trace settings. The trace
+settings are stored in a single integer and divided into levels and
+flags by a set of masks (C<DBIc_TRACE_LEVEL_MASK> and
+C<DBIc_TRACE_FLAGS_MASK>).
+
+Each handle has it's own trace settings and so does the DBI. When you
+call a method the DBI merges the handles settings into its own for the
+duration of the call: the trace flags of the handle are OR'd into the
+trace flags of the DBI, and if the handle has a higher trace level
+then the DBI trace level is raised to match it. The previous DBI trace
+settings are restored when the called method returns.
+
+=head2 Trace Level
+
+The trace level is the first 4 bits of the trace settings (masked by
+C<DBIc_TRACE_FLAGS_MASK>) and represents trace levels of 1 to 15. Do
+not output anything at trace levels less than 3 as they are reserved
+for DBI.
+
+For advice on what to output at each level see "Trace Levels" in
+L<DBI>.
+
+To test for a trace level you can use the C<DBIc_TRACE_LEVEL> macro
+like this:
+
+ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) {
+ PerlIO_printf(DBIc_LOGPIO(imp_xxh), "foobar");
+ }
+
+Also B<note> the use of PerlIO_printf which you should always use for
+tracing and never the C C<stdio.h> I/O functions.
+
+=head2 Trace Flags
+
+Trace flags are used to enable tracing of specific activities within
+the DBI and drivers. The DBI defines some trace flags and drivers can
+define others. DBI trace flag names begin with a capital letter and
+driver specific names begin with a lowercase letter. For a list of DBI
+defined trace flags see "Trace Flags" in L<DBI>.
+
+If you want to use private trace flags you'll probably want to be able
+to set them by name. Drivers are expected to override the
+parse_trace_flag (note the singular) and check if $trace_flag_name is
+a driver specific trace flags and, if not, then call the DBIs default
+parse_trace_flag(). To do that you'll need to define a
+parse_trace_flag() method like this:
+
+ sub parse_trace_flag {
+ my ($h, $name) = @_;
+ return 0x01000000 if $name eq 'foo';
+ return 0x02000000 if $name eq 'bar';
+ return 0x04000000 if $name eq 'baz';
+ return 0x08000000 if $name eq 'boo';
+ return 0x10000000 if $name eq 'bop';
+ return $h->SUPER::parse_trace_flag($name);
+ }
+
+All private flag names must be lowercase, and all private flags must
+be in the top 8 of the 32 bits of C<DBIc_TRACE_FLAGS(imp)> i.e.,
+0xFF000000.
+
+If you've defined a parse_trace_flag() method in ::db you'll also want
+it in ::st, so just alias it in:
+
+ *parse_trace_flag = \&DBD::foo:db::parse_trace_flag;
+
+You may want to act on the current 'SQL' trace flag that DBI defines
+to output SQL prepared/executed as DBI currently does not do SQL
+tracing.
+
+=head2 Trace Macros
+
+Access to the trace level and trace flags is via a set of macros.
+
+ DBIc_TRACE_SETTINGS(imp) returns the trace settings
+ DBIc_TRACE_LEVEL(imp) returns the trace level
+ DBIc_TRACE_FLAGS(imp) returns the trace flags
+ DBIc_TRACE(imp, flags, flaglevel, level)
+
+ e.g.,
+
+ DBIc_TRACE(imp, 0, 0, 4)
+ if level >= 4
+
+ DBIc_TRACE(imp, DBDtf_FOO, 2, 4)
+ if tracing DBDtf_FOO & level>=2 or level>=4
+
+ DBIc_TRACE(imp, DBDtf_FOO, 2, 0)
+ as above but never trace just due to level
+
+=head1 WRITING AN EMULATION LAYER FOR AN OLD PERL INTERFACE
+
+Study F<Oraperl.pm> (supplied with B<DBD::Oracle>) and F<Ingperl.pm> (supplied
+with B<DBD::Ingres>) and the corresponding I<dbdimp.c> files for ideas.
+
+Note that the emulation code sets C<$dbh-E<gt>{CompatMode} = 1;> for each
+connection so that the internals of the driver can implement behaviour
+compatible with the old interface when dealing with those handles.
+
+=head2 Setting emulation perl variables
+
+For example, ingperl has a I<$sql_rowcount> variable. Rather than try
+to manually update this in F<Ingperl.pm> it can be done faster in C code.
+In C<dbd_init()>:
+
+ sql_rowcount = perl_get_sv("Ingperl::sql_rowcount", GV_ADDMULTI);
+
+In the relevant places do:
+
+ if (DBIc_COMPAT(imp_sth)) /* only do this for compatibility mode handles */
+ sv_setiv(sql_rowcount, the_row_count);
+
+=head1 OTHER MISCELLANEOUS INFORMATION
+
+=head2 The imp_xyz_t types
+
+Any handle has a corresponding C structure filled with private data.
+Some of this data is reserved for use by B<DBI> (except for using the
+DBIc macros below), some is for you. See the description of the
+F<dbdimp.h> file above for examples. Most functions in F<dbdimp.c>
+are passed both the handle C<xyz> and a pointer to C<imp_xyz>. In
+rare cases, however, you may use the following macros:
+
+=over 4
+
+=item D_imp_dbh(dbh)
+
+Given a function argument I<dbh>, declare a variable I<imp_dbh> and
+initialize it with a pointer to the handles private data. Note: This
+must be a part of the function header, because it declares a variable.
+
+=item D_imp_sth(sth)
+
+Likewise for statement handles.
+
+=item D_imp_xxx(h)
+
+Given any handle, declare a variable I<imp_xxx> and initialize it
+with a pointer to the handles private data. It is safe, for example,
+to cast I<imp_xxx> to C<imp_dbh_t*>, if C<DBIc_TYPE(imp_xxx) == DBIt_DB>.
+(You can also call C<sv_derived_from(h, "DBI::db")>, but that's much
+slower.)
+
+=item D_imp_dbh_from_sth
+
+Given a I<imp_sth>, declare a variable I<imp_dbh> and initialize it with a
+pointer to the parent database handle's implementors structure.
+
+=back
+
+=head2 Using DBIc_IMPSET_on
+
+The driver code which initializes a handle should use C<DBIc_IMPSET_on()>
+as soon as its state is such that the cleanup code must be called.
+When this happens is determined by your driver code.
+
+B<Failure to call this can lead to corruption of data structures.>
+
+For example, B<DBD::Informix> maintains a linked list of database
+handles in the driver, and within each handle, a linked list of
+statements. Once a statement is added to the linked list, it is crucial
+that it is cleaned up (removed from the list). When I<DBIc_IMPSET_on()>
+was being called too late, it was able to cause all sorts of problems.
+
+=head2 Using DBIc_is(), DBIc_has(), DBIc_on() and DBIc_off()
+
+Once upon a long time ago, the only way of handling the internal B<DBI>
+boolean flags/attributes was through macros such as:
+
+ DBIc_WARN DBIc_WARN_on DBIc_WARN_off
+ DBIc_COMPAT DBIc_COMPAT_on DBIc_COMPAT_off
+
+Each of these took an I<imp_xxh> pointer as an argument.
+
+Since then, new attributes have been added such as I<ChopBlanks>,
+I<RaiseError> and I<PrintError>, and these do not have the full set of
+macros. The approved method for handling these is now the four macros:
+
+ DBIc_is(imp, flag)
+ DBIc_has(imp, flag) an alias for DBIc_is
+ DBIc_on(imp, flag)
+ DBIc_off(imp, flag)
+ DBIc_set(imp, flag, on) set if on is true, else clear
+
+Consequently, the C<DBIc_XXXXX> family of macros is now mostly deprecated
+and new drivers should avoid using them, even though the older drivers
+will probably continue to do so for quite a while yet. However...
+
+There is an I<important exception> to that. The I<ACTIVE> and I<IMPSET>
+flags should be set via the C<DBIc_ACTIVE_on()> and C<DBIc_IMPSET_on()> macros,
+and unset via the C<DBIc_ACTIVE_off()> and C<DBIc_IMPSET_off()> macros.
+
+=head2 Using the get_fbav() method
+
+B<THIS IS CRITICAL for C/XS drivers>.
+
+The C<$sth-E<gt>bind_col()> and C<$sth-E<gt>bind_columns()> documented
+in the B<DBI> specification do not have to be implemented by the driver
+writer because B<DBI> takes care of the details for you.
+
+However, the key to ensuring that bound columns work is to call the
+function C<DBIc_DBISTATE(imp_xxh)-E<gt>get_fbav()> in the code which
+fetches a row of data.
+
+This returns an C<AV>, and each element of the C<AV> contains the C<SV> which
+should be set to contain the returned data.
+
+The pure Perl equivalent is the C<$sth-E<gt>_set_fbav($data)> method, as
+described in the part on pure Perl drivers.
+
+=head2 Casting strings to Perl types based on a SQL type
+
+DBI from 1.611 (and DBIXS_REVISION 13606) defines the
+sql_type_cast_svpv method which may be used to cast a string
+representation of a value to a more specific Perl type based on a SQL
+type. You should consider using this method when processing bound
+column data as it provides some support for the TYPE bind_col
+attribute which is rarely used in drivers.
+
+ int sql_type_cast_svpv(pTHX_ SV *sv, int sql_type, U32 flags, void *v)
+
+C<sv> is what you would like cast, C<sql_type> is one of the DBI defined
+SQL types (e.g., C<SQL_INTEGER>) and C<flags> is a bitmask as follows:
+
+=over
+
+=item DBIstcf_STRICT
+
+If set this indicates you want an error state returned if the cast
+cannot be performed.
+
+=item DBIstcf_DISCARD_STRING
+
+If set and the pv portion of the C<sv> is cast then this will cause
+sv's pv to be freed up.
+
+=back
+
+sql_type_cast_svpv returns the following states:
+
+ -2 sql_type is not handled - sv not changed
+ -1 sv is undef, sv not changed
+ 0 sv could not be cast cleanly and DBIstcf_STRICT was specified
+ 1 sv could not be case cleanly and DBIstcf_STRICT was not specified
+ 2 sv was cast ok
+
+The current implementation of sql_type_cast_svpv supports
+C<SQL_INTEGER>, C<SQL_DOUBLE> and C<SQL_NUMERIC>. C<SQL_INTEGER> uses
+sv_2iv and hence may set IV, UV or NV depending on the
+number. C<SQL_DOUBLE> uses sv_2nv so may set NV and C<SQL_NUMERIC>
+will set IV or UV or NV.
+
+DBIstcf_STRICT should be implemented as the StrictlyTyped attribute
+and DBIstcf_DISCARD_STRING implemented as the DiscardString attribute
+to the bind_col method and both default to off.
+
+See DBD::Oracle for an example of how this is used.
+
+=head1 SUBCLASSING DBI DRIVERS
+
+This is definitely an open subject. It can be done, as demonstrated by
+the B<DBD::File> driver, but it is not as simple as one might think.
+
+(Note that this topic is different from subclassing the B<DBI>. For an
+example of that, see the F<t/subclass.t> file supplied with the B<DBI>.)
+
+The main problem is that the I<dbh>'s and I<sth>'s that your C<connect()> and
+C<prepare()> methods return are not instances of your B<DBD::Driver::db>
+or B<DBD::Driver::st> packages, they are not even derived from it.
+Instead they are instances of the B<DBI::db> or B<DBI::st> classes or
+a derived subclass. Thus, if you write a method C<mymethod()> and do a
+
+ $dbh->mymethod()
+
+then the autoloader will search for that method in the package B<DBI::db>.
+Of course you can instead to a
+
+ $dbh->func('mymethod')
+
+and that will indeed work, even if C<mymethod()> is inherited, but not
+without additional work. Setting I<@ISA> is not sufficient.
+
+=head2 Overwriting methods
+
+The first problem is, that the C<connect()> method has no idea of
+subclasses. For example, you cannot implement base class and subclass
+in the same file: The C<install_driver()> method wants to do a
+
+ require DBD::Driver;
+
+In particular, your subclass B<has> to be a separate driver, from
+the view of B<DBI>, and you cannot share driver handles.
+
+Of course that's not much of a problem. You should even be able
+to inherit the base classes C<connect()> method. But you cannot
+simply overwrite the method, unless you do something like this,
+quoted from B<DBD::CSV>:
+
+ sub connect ($$;$$$) {
+ my ($drh, $dbname, $user, $auth, $attr) = @_;
+
+ my $this = $drh->DBD::File::dr::connect($dbname, $user, $auth, $attr);
+ if (!exists($this->{csv_tables})) {
+ $this->{csv_tables} = {};
+ }
+
+ $this;
+ }
+
+Note that we cannot do a
+
+ $drh->SUPER::connect($dbname, $user, $auth, $attr);
+
+as we would usually do in a an OO environment, because I<$drh> is an instance
+of B<DBI::dr>. And note, that the C<connect()> method of B<DBD::File> is
+able to handle subclass attributes. See the description of Pure Perl
+drivers above.
+
+It is essential that you always call superclass method in the above
+manner. However, that should do.
+
+=head2 Attribute handling
+
+Fortunately the B<DBI> specifications allow a simple, but still
+performant way of handling attributes. The idea is based on the
+convention that any driver uses a prefix I<driver_> for its private
+methods. Thus it's always clear whether to pass attributes to the super
+class or not. For example, consider this C<STORE()> method from the
+B<DBD::CSV> class:
+
+ sub STORE {
+ my ($dbh, $attr, $val) = @_;
+ if ($attr !~ /^driver_/) {
+ return $dbh->DBD::File::db::STORE($attr, $val);
+ }
+ if ($attr eq 'driver_foo') {
+ ...
+ }
+
+=cut
+
+use Exporter ();
+use Config qw(%Config);
+use Carp;
+use Cwd;
+use File::Spec;
+use strict;
+use vars qw(
+ @ISA @EXPORT
+ $is_dbi
+);
+
+BEGIN {
+ if ($^O eq 'VMS') {
+ require vmsish;
+ import vmsish;
+ require VMS::Filespec;
+ import VMS::Filespec;
+ }
+ else {
+ *vmsify = sub { return $_[0] };
+ *unixify = sub { return $_[0] };
+ }
+}
+
+@ISA = qw(Exporter);
+
+@EXPORT = qw(
+ dbd_dbi_dir
+ dbd_dbi_arch_dir
+ dbd_edit_mm_attribs
+ dbd_postamble
+);
+
+BEGIN {
+ $is_dbi = (-r 'DBI.pm' && -r 'DBI.xs' && -r 'DBIXS.h');
+ require DBI unless $is_dbi;
+}
+
+my $done_inst_checks;
+
+sub _inst_checks {
+ return if $done_inst_checks++;
+ my $cwd = cwd();
+ if ($cwd =~ /\Q$Config{path_sep}/) {
+ warn "*** Warning: Path separator characters (`$Config{path_sep}') ",
+ "in the current directory path ($cwd) may cause problems\a\n\n";
+ sleep 2;
+ }
+ if ($cwd =~ /\s/) {
+ warn "*** Warning: whitespace characters ",
+ "in the current directory path ($cwd) may cause problems\a\n\n";
+ sleep 2;
+ }
+ if ( $^O eq 'MSWin32'
+ && $Config{cc} eq 'cl'
+ && !(exists $ENV{'LIB'} && exists $ENV{'INCLUDE'}))
+ {
+ die <<EOT;
+*** You're using Microsoft Visual C++ compiler or similar but
+ the LIB and INCLUDE environment variables are not both set.
+
+ You need to run the VCVARS32.BAT batch file that was supplied
+ with the compiler before you can use it.
+
+ A copy of vcvars32.bat can typically be found in the following
+ directories under your Visual Studio install directory:
+ Visual C++ 6.0: vc98\\bin
+ Visual Studio .NET: vc7\\bin
+
+ Find it, run it, then retry this.
+
+ If you think this error is not correct then just set the LIB and
+ INCLUDE environment variables to some value to disable the check.
+EOT
+ }
+}
+
+sub dbd_edit_mm_attribs {
+ # this both edits the attribs in-place and returns the flattened attribs
+ my $mm_attr = shift;
+ my $dbd_attr = shift || {};
+ croak "dbd_edit_mm_attribs( \%makemaker [, \%other ]): too many parameters"
+ if @_;
+ _inst_checks();
+
+ # what can be done
+ my %test_variants = (
+ p => { name => "DBI::PurePerl",
+ match => qr/^\d/,
+ add => [ '$ENV{DBI_PUREPERL} = 2',
+ 'END { delete $ENV{DBI_PUREPERL}; }' ],
+ },
+ g => { name => "DBD::Gofer",
+ match => qr/^\d/,
+ add => [ q{$ENV{DBI_AUTOPROXY} = 'dbi:Gofer:transport=null;policy=pedantic'},
+ q|END { delete $ENV{DBI_AUTOPROXY}; }| ],
+ },
+ n => { name => "DBI::SQL::Nano",
+ match => qr/^(?:48dbi_dbd_sqlengine|49dbd_file|5\ddbm_\w+|85gofer)\.t$/,
+ add => [ q{$ENV{DBI_SQL_NANO} = 1},
+ q|END { delete $ENV{DBI_SQL_NANO}; }| ],
+ },
+ # mx => { name => "DBD::Multiplex",
+ # add => [ q{local $ENV{DBI_AUTOPROXY} = 'dbi:Multiplex:';} ],
+ # }
+ # px => { name => "DBD::Proxy",
+ # need mechanism for starting/stopping the proxy server
+ # add => [ q{local $ENV{DBI_AUTOPROXY} = 'dbi:Proxy:XXX';} ],
+ # }
+ );
+
+ # decide what needs doing
+ $dbd_attr->{create_pp_tests} or delete $test_variants{p};
+ $dbd_attr->{create_nano_tests} or delete $test_variants{n};
+ $dbd_attr->{create_gap_tests} or delete $test_variants{g};
+
+ # expand for all combinations
+ my @all_keys = my @tv_keys = sort keys %test_variants;
+ while( @tv_keys ) {
+ my $cur_key = shift @tv_keys;
+ last if( 1 < length $cur_key );
+ my @new_keys;
+ foreach my $remain (@tv_keys) {
+ push @new_keys, $cur_key . $remain unless $remain =~ /$cur_key/;
+ }
+ push @tv_keys, @new_keys;
+ push @all_keys, @new_keys;
+ }
+
+ my %uniq_keys;
+ foreach my $key (@all_keys) {
+ @tv_keys = sort split //, $key;
+ my $ordered = join( '', @tv_keys );
+ $uniq_keys{$ordered} = 1;
+ }
+ @all_keys = sort { length $a <=> length $b or $a cmp $b } keys %uniq_keys;
+
+ # do whatever needs doing
+ if( keys %test_variants ) {
+ # XXX need to convert this to work within the generated Makefile
+ # so 'make' creates them and 'make clean' deletes them
+ opendir DIR, 't' or die "Can't read 't' directory: $!";
+ my @tests = grep { /\.t$/ } readdir DIR;
+ closedir DIR;
+
+ foreach my $test_combo (@all_keys) {
+ @tv_keys = split //, $test_combo;
+ my @test_names = map { $test_variants{$_}->{name} } @tv_keys;
+ printf "Creating test wrappers for " . join( " + ", @test_names ) . ":\n";
+ my @test_matches = map { $test_variants{$_}->{match} } @tv_keys;
+ my @test_adds;
+ foreach my $test_add ( map { $test_variants{$_}->{add} } @tv_keys) {
+ push @test_adds, @$test_add;
+ }
+ my $v_type = $test_combo;
+ $v_type = 'x' . $v_type if length( $v_type ) > 1;
+
+ TEST:
+ foreach my $test (sort @tests) {
+ foreach my $match (@test_matches) {
+ next TEST if $test !~ $match;
+ }
+ my $usethr = ($test =~ /(\d+|\b)thr/ && $] >= 5.008 && $Config{useithreads});
+ my $v_test = "t/zv${v_type}_$test";
+ my $v_perl = ($test =~ /taint/) ? "perl -wT" : "perl -w";
+ printf "%s %s\n", $v_test, ($usethr) ? "(use threads)" : "";
+ open PPT, ">$v_test" or warn "Can't create $v_test: $!";
+ print PPT "#!$v_perl\n";
+ print PPT "use threads;\n" if $usethr;
+ print PPT "$_;\n" foreach @test_adds;
+ print PPT "require './t/$test'; # or warn \$!;\n";
+ close PPT or warn "Error writing $v_test: $!";
+ }
+ }
+ }
+ return %$mm_attr;
+}
+
+sub dbd_dbi_dir {
+ _inst_checks();
+ return '.' if $is_dbi;
+ my $dbidir = $INC{'DBI.pm'} || die "DBI.pm not in %INC!";
+ $dbidir =~ s:/DBI\.pm$::;
+ return $dbidir;
+}
+
+sub dbd_dbi_arch_dir {
+ _inst_checks();
+ return '$(INST_ARCHAUTODIR)' if $is_dbi;
+ my $dbidir = dbd_dbi_dir();
+ my %seen;
+ my @try = grep { not $seen{$_}++ } map { vmsify( unixify($_) . "/auto/DBI/" ) } @INC;
+ my @xst = grep { -f vmsify( unixify($_) . "/Driver.xst" ) } @try;
+ Carp::croak("Unable to locate Driver.xst in @try") unless @xst;
+ Carp::carp( "Multiple copies of Driver.xst found in: @xst") if @xst > 1;
+ print "Using DBI $DBI::VERSION (for perl $] on $Config{archname}) installed in $xst[0]\n";
+ return File::Spec->canonpath($xst[0]);
+}
+
+sub dbd_postamble {
+ my $self = shift;
+ _inst_checks();
+ my $dbi_instarch_dir = ($is_dbi) ? "." : dbd_dbi_arch_dir();
+ my $dbi_driver_xst= File::Spec->catfile($dbi_instarch_dir, 'Driver.xst');
+ my $xstf_h = File::Spec->catfile($dbi_instarch_dir, 'Driver_xst.h');
+
+ # we must be careful of quotes, expecially for Win32 here.
+ return '
+# --- This section was generated by DBI::DBD::dbd_postamble()
+DBI_INSTARCH_DIR='.$dbi_instarch_dir.'
+DBI_DRIVER_XST='.$dbi_driver_xst.'
+
+# The main dependancy (technically correct but probably not used)
+$(BASEEXT).c: $(BASEEXT).xsi
+
+# This dependancy is needed since MakeMaker uses the .xs.o rule
+$(BASEEXT)$(OBJ_EXT): $(BASEEXT).xsi
+
+$(BASEEXT).xsi: $(DBI_DRIVER_XST) '.$xstf_h.'
+ $(PERL) -p -e "s/~DRIVER~/$(BASEEXT)/g" $(DBI_DRIVER_XST) > $(BASEEXT).xsi
+
+# ---
+';
+}
+
+package DBDI; # just to reserve it via PAUSE for the future
+
+1;
+
+__END__
+
+=head1 AUTHORS
+
+Jonathan Leffler <jleffler@us.ibm.com> (previously <jleffler@informix.com>),
+Jochen Wiedmann <joe@ispsoft.de>,
+Steffen Goeldner <sgoeldner@cpan.org>,
+and Tim Bunce <dbi-users@perl.org>.
+
+=cut
diff --git a/lib/DBI/DBD/Metadata.pm b/lib/DBI/DBD/Metadata.pm
new file mode 100644
index 0000000..75f5b89
--- /dev/null
+++ b/lib/DBI/DBD/Metadata.pm
@@ -0,0 +1,493 @@
+package DBI::DBD::Metadata;
+
+# $Id: Metadata.pm 14213 2010-06-30 19:29:18Z mjevans $
+#
+# Copyright (c) 1997-2003 Jonathan Leffler, Jochen Wiedmann,
+# Steffen Goeldner and Tim Bunce
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use Exporter ();
+use Carp;
+
+use DBI;
+use DBI::Const::GetInfoType qw(%GetInfoType);
+
+# Perl 5.005_03 does not recognize 'our'
+@ISA = qw(Exporter);
+@EXPORT = qw(write_getinfo_pm write_typeinfo_pm);
+
+$VERSION = sprintf("2.%06d", q$Revision: 14213 $ =~ /(\d+)/o);
+
+
+use strict;
+
+=head1 NAME
+
+DBI::DBD::Metadata - Generate the code and data for some DBI metadata methods
+
+=head1 SYNOPSIS
+
+The idea is to extract metadata information from a good quality
+ODBC driver and use it to generate code and data to use in your own
+DBI driver for the same database.
+
+To generate code to support the get_info method:
+
+ perl -MDBI::DBD::Metadata -e "write_getinfo_pm('dbi:ODBC:dsn-name','user','pass','Driver')"
+
+ perl -MDBI::DBD::Metadata -e write_getinfo_pm dbi:ODBC:foo_db username password Driver
+
+To generate code to support the type_info method:
+
+ perl -MDBI::DBD::Metadata -e "write_typeinfo_pm('dbi:ODBC:dsn-name','user','pass','Driver')"
+
+ perl -MDBI::DBD::Metadata -e write_typeinfo_pm dbi:ODBC:dsn-name user pass Driver
+
+Where C<dbi:ODBC:dsn-name> is the connection to use to extract the
+data, and C<Driver> is the name of the driver you want the code
+generated for (the driver name gets embedded into the output in
+numerous places).
+
+=head1 Generating a GetInfo package for a driver
+
+The C<write_getinfo_pm> in the DBI::DBD::Metadata module generates a
+DBD::Driver::GetInfo package on standard output.
+
+This method generates a DBD::Driver::GetInfo package from the data
+source you specified in the parameter list or in the environment
+variable DBI_DSN.
+DBD::Driver::GetInfo should help a DBD author implement the DBI
+get_info() method.
+Because you are just creating this package, it is very unlikely that
+DBD::Driver already provides a good implementation for get_info().
+Thus you will probably connect via DBD::ODBC.
+
+Once you are sure that it is producing reasonably sane data, you should
+typically redirect the standard output to lib/DBD/Driver/GetInfo.pm, and
+then hand edit the result.
+Do not forget to update your Makefile.PL and MANIFEST to include this as
+an extra PM file that should be installed.
+
+If you connect via DBD::ODBC, you should use version 0.38 or greater;
+
+Please take a critical look at the data returned!
+ODBC drivers vary dramatically in their quality.
+
+The generator assumes that most values are static and places these
+values directly in the %info hash.
+A few examples show the use of CODE references and the implementation
+via subroutines.
+It is very likely that you will have to write additional subroutines for
+values depending on the session state or server version, e.g.
+SQL_DBMS_VER.
+
+A possible implementation of DBD::Driver::db::get_info() may look like:
+
+ sub get_info {
+ my($dbh, $info_type) = @_;
+ require DBD::Driver::GetInfo;
+ my $v = $DBD::Driver::GetInfo::info{int($info_type)};
+ $v = $v->($dbh) if ref $v eq 'CODE';
+ return $v;
+ }
+
+Please replace Driver (or "<foo>") with the name of your driver.
+Note that this stub function is generated for you by write_getinfo_pm
+function, but you must manually transfer the code to Driver.pm.
+
+=cut
+
+sub write_getinfo_pm
+{
+ my ($dsn, $user, $pass, $driver) = @_ ? @_ : @ARGV;
+ my $dbh = DBI->connect($dsn, $user, $pass, {RaiseError=>1});
+ $driver = "<foo>" unless defined $driver;
+
+ print <<PERL;
+
+# Transfer this to ${driver}.pm
+
+# The get_info function was automatically generated by
+# DBI::DBD::Metadata::write_getinfo_pm v$DBI::DBD::Metadata::VERSION.
+
+package DBD::${driver}::db; # This line can be removed once transferred.
+
+ sub get_info {
+ my(\$dbh, \$info_type) = \@_;
+ require DBD::${driver}::GetInfo;
+ my \$v = \$DBD::${driver}::GetInfo::info{int(\$info_type)};
+ \$v = \$v->(\$dbh) if ref \$v eq 'CODE';
+ return \$v;
+ }
+
+# Transfer this to lib/DBD/${driver}/GetInfo.pm
+
+# The \%info hash was automatically generated by
+# DBI::DBD::Metadata::write_getinfo_pm v$DBI::DBD::Metadata::VERSION.
+
+package DBD::${driver}::GetInfo;
+
+use strict;
+use DBD::${driver};
+
+# Beware: not officially documented interfaces...
+# use DBI::Const::GetInfoType qw(\%GetInfoType);
+# use DBI::Const::GetInfoReturn qw(\%GetInfoReturnTypes \%GetInfoReturnValues);
+
+my \$sql_driver = '${driver}';
+my \$sql_ver_fmt = '%02d.%02d.%04d'; # ODBC version string: ##.##.#####
+my \$sql_driver_ver = sprintf \$sql_ver_fmt, split (/\\./, \$DBD::${driver}::VERSION);
+PERL
+
+my $kw_map = 0;
+{
+# Informix CLI (ODBC) v3.81.0000 does not return a list of keywords.
+ local $\ = "\n";
+ local $, = "\n";
+ my ($kw) = $dbh->get_info($GetInfoType{SQL_KEYWORDS});
+ if ($kw)
+ {
+ print "\nmy \@Keywords = qw(\n";
+ print sort split /,/, $kw;
+ print ");\n\n";
+ print "sub sql_keywords {\n";
+ print q% return join ',', @Keywords;%;
+ print "\n}\n\n";
+ $kw_map = 1;
+ }
+}
+
+ print <<'PERL';
+
+sub sql_data_source_name {
+ my $dbh = shift;
+ return "dbi:$sql_driver:" . $dbh->{Name};
+}
+
+sub sql_user_name {
+ my $dbh = shift;
+ # CURRENT_USER is a non-standard attribute, probably undef
+ # Username is a standard DBI attribute
+ return $dbh->{CURRENT_USER} || $dbh->{Username};
+}
+
+PERL
+
+ print "\nour \%info = (\n";
+ foreach my $key (sort keys %GetInfoType)
+ {
+ my $num = $GetInfoType{$key};
+ my $val = eval { $dbh->get_info($num); };
+ if ($key eq 'SQL_DATA_SOURCE_NAME') {
+ $val = '\&sql_data_source_name';
+ }
+ elsif ($key eq 'SQL_KEYWORDS') {
+ $val = ($kw_map) ? '\&sql_keywords' : 'undef';
+ }
+ elsif ($key eq 'SQL_DRIVER_NAME') {
+ $val = "\$INC{'DBD/$driver.pm'}";
+ }
+ elsif ($key eq 'SQL_DRIVER_VER') {
+ $val = '$sql_driver_ver';
+ }
+ elsif ($key eq 'SQL_USER_NAME') {
+ $val = '\&sql_user_name';
+ }
+ elsif (not defined $val) {
+ $val = 'undef';
+ }
+ elsif ($val eq '') {
+ $val = "''";
+ }
+ elsif ($val =~ /\D/) {
+ $val =~ s/\\/\\\\/g;
+ $val =~ s/'/\\'/g;
+ $val = "'$val'";
+ }
+ printf "%s %5d => %-30s # %s\n", (($val eq 'undef') ? '#' : ' '), $num, "$val,", $key;
+ }
+ print ");\n\n1;\n\n__END__\n";
+}
+
+
+
+=head1 Generating a TypeInfo package for a driver
+
+The C<write_typeinfo_pm> function in the DBI::DBD::Metadata module generates
+on standard output the data needed for a driver's type_info_all method.
+It also provides default implementations of the type_info_all
+method for inclusion in the driver's main implementation file.
+
+The driver parameter is the name of the driver for which the methods
+will be generated; for the sake of examples, this will be "Driver".
+Typically, the dsn parameter will be of the form "dbi:ODBC:odbc_dsn",
+where the odbc_dsn is a DSN for one of the driver's databases.
+The user and pass parameters are the other optional connection
+parameters that will be provided to the DBI connect method.
+
+Once you are sure that it is producing reasonably sane data, you should
+typically redirect the standard output to lib/DBD/Driver/TypeInfo.pm,
+and then hand edit the result if necessary.
+Do not forget to update your Makefile.PL and MANIFEST to include this as
+an extra PM file that should be installed.
+
+Please take a critical look at the data returned!
+ODBC drivers vary dramatically in their quality.
+
+The generator assumes that all the values are static and places these
+values directly in the %info hash.
+
+A possible implementation of DBD::Driver::type_info_all() may look like:
+
+ sub type_info_all {
+ my ($dbh) = @_;
+ require DBD::Driver::TypeInfo;
+ return [ @$DBD::Driver::TypeInfo::type_info_all ];
+ }
+
+Please replace Driver (or "<foo>") with the name of your driver.
+Note that this stub function is generated for you by the write_typeinfo_pm
+function, but you must manually transfer the code to Driver.pm.
+
+=cut
+
+
+# These two are used by fmt_value...
+my %dbi_inv;
+my %sql_type_inv;
+
+#-DEBUGGING-#
+#sub print_hash
+#{
+# my ($name, %hash) = @_;
+# print "Hash: $name\n";
+# foreach my $key (keys %hash)
+# {
+# print "$key => $hash{$key}\n";
+# }
+#}
+#-DEBUGGING-#
+
+sub inverse_hash
+{
+ my (%hash) = @_;
+ my (%inv);
+ foreach my $key (keys %hash)
+ {
+ my $val = $hash{$key};
+ die "Double mapping for key value $val ($inv{$val}, $key)!"
+ if (defined $inv{$val});
+ $inv{$val} = $key;
+ }
+ return %inv;
+}
+
+sub fmt_value
+{
+ my ($num, $val) = @_;
+ if (!defined $val)
+ {
+ $val = "undef";
+ }
+ elsif ($val !~ m/^[-+]?\d+$/)
+ {
+ # All the numbers in type_info_all are integers!
+ # Anything that isn't an integer is a string.
+ # Ensure that no double quotes screw things up.
+ $val =~ s/"/\\"/g if ($val =~ m/"/o);
+ $val = qq{"$val"};
+ }
+ elsif ($dbi_inv{$num} =~ m/^(SQL_)?DATA_TYPE$/)
+ {
+ # All numeric...
+ $val = $sql_type_inv{$val}
+ if (defined $sql_type_inv{$val});
+ }
+ return $val;
+}
+
+sub write_typeinfo_pm
+{
+ my ($dsn, $user, $pass, $driver) = @_ ? @_ : @ARGV;
+ my $dbh = DBI->connect($dsn, $user, $pass, {AutoCommit=>1, RaiseError=>1});
+ $driver = "<foo>" unless defined $driver;
+
+ print <<PERL;
+
+# Transfer this to ${driver}.pm
+
+# The type_info_all function was automatically generated by
+# DBI::DBD::Metadata::write_typeinfo_pm v$DBI::DBD::Metadata::VERSION.
+
+package DBD::${driver}::db; # This line can be removed once transferred.
+
+ sub type_info_all
+ {
+ my (\$dbh) = \@_;
+ require DBD::${driver}::TypeInfo;
+ return [ \@\$DBD::${driver}::TypeInfo::type_info_all ];
+ }
+
+# Transfer this to lib/DBD/${driver}/TypeInfo.pm.
+# Don't forget to add version and intellectual property control information.
+
+# The \%type_info_all hash was automatically generated by
+# DBI::DBD::Metadata::write_typeinfo_pm v$DBI::DBD::Metadata::VERSION.
+
+package DBD::${driver}::TypeInfo;
+
+{
+ require Exporter;
+ require DynaLoader;
+ \@ISA = qw(Exporter DynaLoader);
+ \@EXPORT = qw(type_info_all);
+ use DBI qw(:sql_types);
+
+PERL
+
+ # Generate SQL type name mapping hashes.
+ # See code fragment in DBI specification.
+ my %sql_type_map;
+ foreach (@{$DBI::EXPORT_TAGS{sql_types}})
+ {
+ no strict 'refs';
+ $sql_type_map{$_} = &{"DBI::$_"}();
+ $sql_type_inv{$sql_type_map{$_}} = $_;
+ }
+ #-DEBUG-# print_hash("sql_type_map", %sql_type_map);
+ #-DEBUG-# print_hash("sql_type_inv", %sql_type_inv);
+
+ my %dbi_map =
+ (
+ TYPE_NAME => 0,
+ DATA_TYPE => 1,
+ COLUMN_SIZE => 2,
+ LITERAL_PREFIX => 3,
+ LITERAL_SUFFIX => 4,
+ CREATE_PARAMS => 5,
+ NULLABLE => 6,
+ CASE_SENSITIVE => 7,
+ SEARCHABLE => 8,
+ UNSIGNED_ATTRIBUTE => 9,
+ FIXED_PREC_SCALE => 10,
+ AUTO_UNIQUE_VALUE => 11,
+ LOCAL_TYPE_NAME => 12,
+ MINIMUM_SCALE => 13,
+ MAXIMUM_SCALE => 14,
+ SQL_DATA_TYPE => 15,
+ SQL_DATETIME_SUB => 16,
+ NUM_PREC_RADIX => 17,
+ INTERVAL_PRECISION => 18,
+ );
+
+ #-DEBUG-# print_hash("dbi_map", %dbi_map);
+
+ %dbi_inv = inverse_hash(%dbi_map);
+
+ #-DEBUG-# print_hash("dbi_inv", %dbi_inv);
+
+ my $maxlen = 0;
+ foreach my $key (keys %dbi_map)
+ {
+ $maxlen = length($key) if length($key) > $maxlen;
+ }
+
+ # Print the name/value mapping entry in the type_info_all array;
+ my $fmt = " \%-${maxlen}s => \%2d,\n";
+ my $numkey = 0;
+ my $maxkey = 0;
+ print " \$type_info_all = [\n {\n";
+ foreach my $i (sort { $a <=> $b } keys %dbi_inv)
+ {
+ printf($fmt, $dbi_inv{$i}, $i);
+ $numkey++;
+ $maxkey = $i;
+ }
+ print " },\n";
+
+ print STDERR "### WARNING - Non-dense set of keys ($numkey keys, $maxkey max key)\n"
+ unless $numkey = $maxkey + 1;
+
+ my $h = $dbh->type_info_all;
+ my @tia = @$h;
+ my %odbc_map = map { uc $_ => $tia[0]->{$_} } keys %{$tia[0]};
+ shift @tia; # Remove the mapping reference.
+ my $numtyp = $#tia;
+
+ #-DEBUG-# print_hash("odbc_map", %odbc_map);
+
+ # In theory, the key/number mapping sequence for %dbi_map
+ # should be the same as the one from the ODBC driver. However, to
+ # prevent the possibility of mismatches, and to deal with older
+ # missing attributes or unexpected new ones, we chase back through
+ # the %dbi_inv and %odbc_map hashes, generating @dbi_to_odbc
+ # to map our new key number to the old one.
+ # Report if @dbi_to_odbc is not an identity mapping.
+ my @dbi_to_odbc;
+ foreach my $num (sort { $a <=> $b } keys %dbi_inv)
+ {
+ # Find the name in %dbi_inv that matches this index number.
+ my $dbi_key = $dbi_inv{$num};
+ #-DEBUG-# print "dbi_key = $dbi_key\n";
+ #-DEBUG-# print "odbc_key = $odbc_map{$dbi_key}\n";
+ # Find the index in %odbc_map that has this key.
+ $dbi_to_odbc[$num] = (defined $odbc_map{$dbi_key}) ? $odbc_map{$dbi_key} : undef;
+ }
+
+ # Determine the length of the longest formatted value in each field
+ my @len;
+ for (my $i = 0; $i <= $numtyp; $i++)
+ {
+ my @odbc_val = @{$tia[$i]};
+ for (my $num = 0; $num <= $maxkey; $num++)
+ {
+ # Find the value of the entry in the @odbc_val array.
+ my $val = (defined $dbi_to_odbc[$num]) ? $odbc_val[$dbi_to_odbc[$num]] : undef;
+ $val = fmt_value($num, $val);
+ #-DEBUG-# print "val = $val\n";
+ $val = "$val,";
+ $len[$num] = length($val) if !defined $len[$num] || length($val) > $len[$num];
+ }
+ }
+
+ # Generate format strings to left justify each string in maximum field width.
+ my @fmt;
+ for (my $i = 0; $i <= $maxkey; $i++)
+ {
+ $fmt[$i] = "%-$len[$i]s";
+ #-DEBUG-# print "fmt[$i] = $fmt[$i]\n";
+ }
+
+ # Format the data from type_info_all
+ for (my $i = 0; $i <= $numtyp; $i++)
+ {
+ my @odbc_val = @{$tia[$i]};
+ print " [ ";
+ for (my $num = 0; $num <= $maxkey; $num++)
+ {
+ # Find the value of the entry in the @odbc_val array.
+ my $val = (defined $dbi_to_odbc[$num]) ? $odbc_val[$dbi_to_odbc[$num]] : undef;
+ $val = fmt_value($num, $val);
+ printf $fmt[$num], "$val,";
+ }
+ print " ],\n";
+ }
+
+ print " ];\n\n 1;\n}\n\n__END__\n";
+
+}
+
+1;
+
+__END__
+
+=head1 AUTHORS
+
+Jonathan Leffler <jleffler@us.ibm.com> (previously <jleffler@informix.com>),
+Jochen Wiedmann <joe@ispsoft.de>,
+Steffen Goeldner <sgoeldner@cpan.org>,
+and Tim Bunce <dbi-users@perl.org>.
+
+=cut
diff --git a/lib/DBI/DBD/SqlEngine.pm b/lib/DBI/DBD/SqlEngine.pm
new file mode 100644
index 0000000..ae5c115
--- /dev/null
+++ b/lib/DBI/DBD/SqlEngine.pm
@@ -0,0 +1,1232 @@
+# -*- perl -*-
+#
+# DBI::DBD::SqlEngine - A base class for implementing DBI drivers that
+# have not an own SQL engine
+#
+# This module is currently maintained by
+#
+# H.Merijn Brand & Jens Rehsack
+#
+# The original author is Jochen Wiedmann.
+#
+# Copyright (C) 2009,2010 by H.Merijn Brand & Jens Rehsack
+# Copyright (C) 2004 by Jeff Zucker
+# Copyright (C) 1998 by Jochen Wiedmann
+#
+# All rights reserved.
+#
+# You may distribute this module under the terms of either the GNU
+# General Public License or the Artistic License, as specified in
+# the Perl README file.
+
+require 5.008;
+
+use strict;
+
+use DBI ();
+require DBI::SQL::Nano;
+
+package DBI::DBD::SqlEngine;
+
+use strict;
+
+use Carp;
+use vars qw( @ISA $VERSION $drh %methods_installed);
+
+$VERSION = "0.03";
+
+$drh = undef; # holds driver handle(s) once initialized
+
+DBI->setup_driver("DBI::DBD::SqlEngine"); # only needed once but harmless to repeat
+
+my %accessors = ( versions => "get_driver_versions", );
+
+sub driver ($;$)
+{
+ my ( $class, $attr ) = @_;
+
+ # Drivers typically use a singleton object for the $drh
+ # We use a hash here to have one singleton per subclass.
+ # (Otherwise DBD::CSV and DBD::DBM, for example, would
+ # share the same driver object which would cause problems.)
+ # An alternative would be not not cache the $drh here at all
+ # and require that subclasses do that. Subclasses should do
+ # their own caching, so caching here just provides extra safety.
+ $drh->{$class} and return $drh->{$class};
+
+ $attr ||= {};
+ {
+ no strict "refs";
+ unless ( $attr->{Attribution} )
+ {
+ $class eq "DBI::DBD::SqlEngine"
+ and $attr->{Attribution} = "$class by Jens Rehsack";
+ $attr->{Attribution} ||= ${ $class . "::ATTRIBUTION" }
+ || "oops the author of $class forgot to define this";
+ }
+ $attr->{Version} ||= ${ $class . "::VERSION" };
+ $attr->{Name} or ( $attr->{Name} = $class ) =~ s/^DBD\:\://;
+ }
+
+ $drh->{$class} = DBI::_new_drh( $class . "::dr", $attr );
+ $drh->{$class}->STORE( ShowErrorStatement => 1 );
+
+ my $prefix = DBI->driver_prefix($class);
+ if ($prefix)
+ {
+ my $dbclass = $class . "::db";
+ while ( my ( $accessor, $funcname ) = each %accessors )
+ {
+ my $method = $prefix . $accessor;
+ $dbclass->can($method) and next;
+ my $inject = sprintf <<'EOI', $dbclass, $method, $dbclass, $funcname;
+sub %s::%s
+{
+ my $func = %s->can (q{%s});
+ goto &$func;
+ }
+EOI
+ eval $inject;
+ $dbclass->install_method($method);
+ }
+ }
+
+ # XXX inject DBD::XXX::Statement unless exists
+
+ my $stclass = $class . "::st";
+ $stclass->install_method("sql_get_colnames") unless ( $methods_installed{$class}++ );
+
+ return $drh->{$class};
+} # driver
+
+sub CLONE
+{
+ undef $drh;
+} # CLONE
+
+# ====== DRIVER ================================================================
+
+package DBI::DBD::SqlEngine::dr;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+$imp_data_size = 0;
+
+sub connect ($$;$$$)
+{
+ my ( $drh, $dbname, $user, $auth, $attr ) = @_;
+
+ # create a 'blank' dbh
+ my $dbh = DBI::_new_dbh(
+ $drh,
+ {
+ Name => $dbname,
+ USER => $user,
+ CURRENT_USER => $user,
+ }
+ );
+
+ if ($dbh)
+ {
+ # must be done first, because setting flags implicitly calls $dbdname::db->STORE
+ $dbh->func( 0, "init_default_attributes" );
+ my $two_phased_init;
+ defined $dbh->{sql_init_phase} and $two_phased_init = ++$dbh->{sql_init_phase};
+ my %second_phase_attrs;
+
+ my ( $var, $val );
+ while ( length $dbname )
+ {
+ if ( $dbname =~ s/^((?:[^\\;]|\\.)*?);//s )
+ {
+ $var = $1;
+ }
+ else
+ {
+ $var = $dbname;
+ $dbname = "";
+ }
+ if ( $var =~ m/^(.+?)=(.*)/s )
+ {
+ $var = $1;
+ ( $val = $2 ) =~ s/\\(.)/$1/g;
+ if ($two_phased_init)
+ {
+ eval { $dbh->STORE( $var, $val ); };
+ $@ and $second_phase_attrs{$var} = $val;
+ }
+ else
+ {
+ $dbh->STORE( $var, $val );
+ }
+ }
+ elsif ( $var =~ m/^(.+?)=>(.*)/s )
+ {
+ $var = $1;
+ ( $val = $2 ) =~ s/\\(.)/$1/g;
+ my $ref = eval $val;
+ $dbh->$var($ref);
+ }
+ }
+
+ if ($two_phased_init)
+ {
+ foreach $a (qw(Profile RaiseError PrintError AutoCommit))
+ { # do these first
+ exists $attr->{$a} or next;
+ eval {
+ $dbh->{$a} = $attr->{$a};
+ delete $attr->{$a};
+ };
+ $@ and $second_phase_attrs{$a} = delete $attr->{$a};
+ }
+ while ( my ( $a, $v ) = each %$attr )
+ {
+ eval { $dbh->{$a} = $v };
+ $@ and $second_phase_attrs{$a} = $v;
+ }
+
+ $dbh->func( 1, "init_default_attributes" );
+ %$attr = %second_phase_attrs;
+ }
+
+ $dbh->func("init_done");
+
+ $dbh->STORE( Active => 1 );
+ }
+
+ return $dbh;
+} # connect
+
+sub disconnect_all
+{
+} # disconnect_all
+
+sub DESTROY
+{
+ undef;
+} # DESTROY
+
+# ====== DATABASE ==============================================================
+
+package DBI::DBD::SqlEngine::db;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+use Carp;
+
+if ( eval { require Clone; } )
+{
+ Clone->import("clone");
+}
+else
+{
+ require Storable; # in CORE since 5.7.3
+ *clone = \&Storable::dclone;
+}
+
+$imp_data_size = 0;
+
+sub ping
+{
+ ( $_[0]->FETCH("Active") ) ? 1 : 0;
+} # ping
+
+sub prepare ($$;@)
+{
+ my ( $dbh, $statement, @attribs ) = @_;
+
+ # create a 'blank' sth
+ my $sth = DBI::_new_sth( $dbh, { Statement => $statement } );
+
+ if ($sth)
+ {
+ my $class = $sth->FETCH("ImplementorClass");
+ $class =~ s/::st$/::Statement/;
+ my $stmt;
+
+ # if using SQL::Statement version > 1
+ # cache the parser object if the DBD supports parser caching
+ # SQL::Nano and older SQL::Statements don't support this
+
+ if ( $class->isa("SQL::Statement") )
+ {
+ my $parser = $dbh->{sql_parser_object};
+ $parser ||= eval { $dbh->func("sql_parser_object") };
+ if ($@)
+ {
+ $stmt = eval { $class->new($statement) };
+ }
+ else
+ {
+ $stmt = eval { $class->new( $statement, $parser ) };
+ }
+ }
+ else
+ {
+ $stmt = eval { $class->new($statement) };
+ }
+ if ($@ || $stmt->{errstr})
+ {
+ $dbh->set_err( $DBI::stderr, $@ || $stmt->{errstr} );
+ undef $sth;
+ }
+ else
+ {
+ $sth->STORE( "sql_stmt", $stmt );
+ $sth->STORE( "sql_params", [] );
+ $sth->STORE( "NUM_OF_PARAMS", scalar( $stmt->params() ) );
+ my @colnames = $sth->sql_get_colnames();
+ $sth->STORE( "NUM_OF_FIELDS", scalar @colnames );
+ }
+ }
+ return $sth;
+} # prepare
+
+sub set_versions
+{
+ my $dbh = $_[0];
+ $dbh->{sql_engine_version} = $DBI::DBD::SqlEngine::VERSION;
+ for (qw( nano_version statement_version ))
+ {
+ defined $DBI::SQL::Nano::versions->{$_} or next;
+ $dbh->{"sql_$_"} = $DBI::SQL::Nano::versions->{$_};
+ }
+ $dbh->{sql_handler} =
+ $dbh->{sql_statement_version}
+ ? "SQL::Statement"
+ : "DBI::SQL::Nano";
+
+ return $dbh;
+} # set_versions
+
+sub init_valid_attributes
+{
+ my $dbh = $_[0];
+
+ $dbh->{sql_valid_attrs} = {
+ sql_engine_version => 1, # DBI::DBD::SqlEngine version
+ sql_handler => 1, # Nano or S:S
+ sql_nano_version => 1, # Nano version
+ sql_statement_version => 1, # S:S version
+ sql_flags => 1, # flags for SQL::Parser
+ sql_dialect => 1, # dialect for SQL::Parser
+ sql_quoted_identifier_case => 1, # case for quoted identifiers
+ sql_identifier_case => 1, # case for non-quoted identifiers
+ sql_parser_object => 1, # SQL::Parser instance
+ sql_sponge_driver => 1, # Sponge driver for table_info ()
+ sql_valid_attrs => 1, # SQL valid attributes
+ sql_readonly_attrs => 1, # SQL readonly attributes
+ sql_init_phase => 1, # Only during initialization
+ };
+ $dbh->{sql_readonly_attrs} = {
+ sql_engine_version => 1, # DBI::DBD::SqlEngine version
+ sql_handler => 1, # Nano or S:S
+ sql_nano_version => 1, # Nano version
+ sql_statement_version => 1, # S:S version
+ sql_quoted_identifier_case => 1, # case for quoted identifiers
+ sql_parser_object => 1, # SQL::Parser instance
+ sql_sponge_driver => 1, # Sponge driver for table_info ()
+ sql_valid_attrs => 1, # SQL valid attributes
+ sql_readonly_attrs => 1, # SQL readonly attributes
+ };
+
+ return $dbh;
+} # init_valid_attributes
+
+sub init_default_attributes
+{
+ my ( $dbh, $phase ) = @_;
+ my $given_phase = $phase;
+
+ unless ( defined($phase) )
+ {
+ # we have an "old" driver here
+ $phase = defined $dbh->{sql_init_phase};
+ $phase and $phase = $dbh->{sql_init_phase};
+ }
+
+ if ( 0 == $phase )
+ {
+ # must be done first, because setting flags implicitly calls $dbdname::db->STORE
+ $dbh->func("init_valid_attributes");
+
+ $dbh->func("set_versions");
+
+ $dbh->{sql_identifier_case} = 2; # SQL_IC_LOWER
+ $dbh->{sql_quoted_identifier_case} = 3; # SQL_IC_SENSITIVE
+
+ $dbh->{sql_dialect} = "CSV";
+
+ $dbh->{sql_init_phase} = $given_phase;
+
+ # complete derived attributes, if required
+ ( my $drv_class = $dbh->{ImplementorClass} ) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix($drv_class);
+ my $valid_attrs = $drv_prefix . "valid_attrs";
+ my $ro_attrs = $drv_prefix . "readonly_attrs";
+
+ my @comp_attrs = qw(valid_attrs version readonly_attrs);
+
+ foreach my $comp_attr (@comp_attrs)
+ {
+ my $attr = $drv_prefix . $comp_attr;
+ defined $dbh->{$valid_attrs}
+ and !defined $dbh->{$valid_attrs}{$attr}
+ and $dbh->{$valid_attrs}{$attr} = 1;
+ defined $dbh->{$ro_attrs}
+ and !defined $dbh->{$ro_attrs}{$attr}
+ and $dbh->{$ro_attrs}{$attr} = 1;
+ }
+ }
+
+ return $dbh;
+} # init_default_attributes
+
+sub init_done
+{
+ defined $_[0]->{sql_init_phase} and delete $_[0]->{sql_init_phase};
+ delete $_[0]->{sql_valid_attrs}->{sql_init_phase};
+ return;
+}
+
+sub sql_parser_object
+{
+ my $dbh = $_[0];
+ my $dialect = $dbh->{sql_dialect} || "CSV";
+ my $parser = {
+ RaiseError => $dbh->FETCH("RaiseError"),
+ PrintError => $dbh->FETCH("PrintError"),
+ };
+ my $sql_flags = $dbh->FETCH("sql_flags") || {};
+ %$parser = ( %$parser, %$sql_flags );
+ $parser = SQL::Parser->new( $dialect, $parser );
+ $dbh->{sql_parser_object} = $parser;
+ return $parser;
+} # sql_parser_object
+
+sub sql_sponge_driver
+{
+ my $dbh = $_[0];
+ my $dbh2 = $dbh->{sql_sponge_driver};
+ unless ($dbh2)
+ {
+ $dbh2 = $dbh->{sql_sponge_driver} = DBI->connect("DBI:Sponge:");
+ unless ($dbh2)
+ {
+ $dbh->set_err( $DBI::stderr, $DBI::errstr );
+ return;
+ }
+ }
+}
+
+sub disconnect ($)
+{
+ $_[0]->STORE( Active => 0 );
+ return 1;
+} # disconnect
+
+sub validate_FETCH_attr
+{
+ my ( $dbh, $attrib ) = @_;
+
+ return $attrib;
+}
+
+sub FETCH ($$)
+{
+ my ( $dbh, $attrib ) = @_;
+ $attrib eq "AutoCommit"
+ and return 1;
+
+ # Driver private attributes are lower cased
+ if ( $attrib eq ( lc $attrib ) )
+ {
+ # first let the implementation deliver an alias for the attribute to fetch
+ # after it validates the legitimation of the fetch request
+ $attrib = $dbh->func( $attrib, "validate_FETCH_attr" ) or return;
+
+ my $attr_prefix;
+ $attrib =~ m/^([a-z]+_)/ and $attr_prefix = $1;
+ unless ($attr_prefix)
+ {
+ ( my $drv_class = $dbh->{ImplementorClass} ) =~ s/::db$//;
+ $attr_prefix = DBI->driver_prefix($drv_class);
+ $attrib = $attr_prefix . $attrib;
+ }
+ my $valid_attrs = $attr_prefix . "valid_attrs";
+ my $ro_attrs = $attr_prefix . "readonly_attrs";
+
+ exists $dbh->{$valid_attrs}
+ and ( $dbh->{$valid_attrs}{$attrib}
+ or return $dbh->set_err( $DBI::stderr, "Invalid attribute '$attrib'" ) );
+ exists $dbh->{$ro_attrs}
+ and $dbh->{$ro_attrs}{$attrib}
+ and defined $dbh->{$attrib}
+ and refaddr( $dbh->{$attrib} )
+ and return clone( $dbh->{$attrib} );
+
+ return $dbh->{$attrib};
+ }
+ # else pass up to DBI to handle
+ return $dbh->SUPER::FETCH($attrib);
+} # FETCH
+
+sub validate_STORE_attr
+{
+ my ( $dbh, $attrib, $value ) = @_;
+
+ if ( $attrib eq "sql_identifier_case" || $attrib eq "sql_quoted_identifier_case"
+ and $value < 1 || $value > 4 )
+ {
+ croak "attribute '$attrib' must have a value from 1 .. 4 (SQL_IC_UPPER .. SQL_IC_MIXED)";
+ # XXX correctly a remap of all entries in f_meta/f_meta_map is required here
+ }
+
+ return ( $attrib, $value );
+}
+
+# the ::db::STORE method is what gets called when you set
+# a lower-cased database handle attribute such as $dbh->{somekey}=$someval;
+#
+# STORE should check to make sure that "somekey" is a valid attribute name
+# but only if it is really one of our attributes (starts with dbm_ or foo_)
+# You can also check for valid values for the attributes if needed
+# and/or perform other operations
+#
+sub STORE ($$$)
+{
+ my ( $dbh, $attrib, $value ) = @_;
+
+ if ( $attrib eq "AutoCommit" )
+ {
+ $value and return 1; # is already set
+ croak "Can't disable AutoCommit";
+ }
+
+ if ( $attrib eq lc $attrib )
+ {
+ # Driver private attributes are lower cased
+
+ my $attr_prefix;
+ $attrib =~ m/^([a-z]+_)/ and $attr_prefix = $1;
+ unless ($attr_prefix)
+ {
+ ( my $drv_class = $dbh->{ImplementorClass} ) =~ s/::db$//;
+ $attr_prefix = DBI->driver_prefix($drv_class);
+ $attrib = $attr_prefix . $attrib;
+ }
+ my $valid_attrs = $attr_prefix . "valid_attrs";
+ my $ro_attrs = $attr_prefix . "readonly_attrs";
+
+ ( $attrib, $value ) = $dbh->func( $attrib, $value, "validate_STORE_attr" );
+ $attrib or return;
+
+ exists $dbh->{$valid_attrs}
+ and ( $dbh->{$valid_attrs}{$attrib}
+ or return $dbh->set_err( $DBI::stderr, "Invalid attribute '$attrib'" ) );
+ exists $dbh->{$ro_attrs}
+ and $dbh->{$ro_attrs}{$attrib}
+ and defined $dbh->{$attrib}
+ and return $dbh->set_err( $DBI::stderr,
+ "attribute '$attrib' is readonly and must not be modified" );
+
+ $dbh->{$attrib} = $value;
+ return 1;
+ }
+
+ return $dbh->SUPER::STORE( $attrib, $value );
+} # STORE
+
+sub get_driver_versions
+{
+ my ( $dbh, $table ) = @_;
+ my %vsn = (
+ OS => "$^O ($Config::Config{osvers})",
+ Perl => "$] ($Config::Config{archname})",
+ DBI => $DBI::VERSION,
+ );
+ my %vmp;
+
+ my $sql_engine_verinfo =
+ join " ",
+ $dbh->{sql_engine_version}, "using", $dbh->{sql_handler},
+ $dbh->{sql_handler} eq "SQL::Statement"
+ ? $dbh->{sql_statement_version}
+ : $dbh->{sql_nano_version};
+
+ my $indent = 0;
+ my @deriveds = ( $dbh->{ImplementorClass} );
+ while (@deriveds)
+ {
+ my $derived = shift @deriveds;
+ $derived eq "DBI::DBD::SqlEngine::db" and last;
+ $derived->isa("DBI::DBD::SqlEngine::db") or next;
+ #no strict 'refs';
+ eval "push \@deriveds, \@${derived}::ISA";
+ #use strict;
+ ( my $drv_class = $derived ) =~ s/::db$//;
+ my $drv_prefix = DBI->driver_prefix($drv_class);
+ my $ddgv = $dbh->{ImplementorClass}->can("get_${drv_prefix}versions");
+ my $drv_version = $ddgv ? &$ddgv( $dbh, $table ) : $dbh->{ $drv_prefix . "version" };
+ $drv_version ||= eval { $derived->VERSION() }; # XXX access $drv_class::VERSION via symbol table
+ $vsn{$drv_class} = $drv_version;
+ $indent and $vmp{$drv_class} = " " x $indent . $drv_class;
+ $indent += 2;
+ }
+
+ $vsn{"DBI::DBD::SqlEngine"} = $sql_engine_verinfo;
+ $indent and $vmp{"DBI::DBD::SqlEngine"} = " " x $indent . "DBI::DBD::SqlEngine";
+
+ $DBI::PurePerl and $vsn{"DBI::PurePerl"} = $DBI::PurePerl::VERSION;
+
+ $indent += 20;
+ my @versions = map { sprintf "%-${indent}s %s", $vmp{$_} || $_, $vsn{$_} }
+ sort {
+ $a->isa($b) and return -1;
+ $b->isa($a) and return 1;
+ $a->isa("DBI::DBD::SqlEngine") and return -1;
+ $b->isa("DBI::DBD::SqlEngine") and return 1;
+ return $a cmp $b;
+ } keys %vsn;
+
+ return wantarray ? @versions : join "\n", @versions;
+} # get_versions
+
+sub DESTROY ($)
+{
+ my $dbh = shift;
+ $dbh->SUPER::FETCH("Active") and $dbh->disconnect;
+ undef $dbh->{sql_parser_object};
+} # DESTROY
+
+sub type_info_all ($)
+{
+ [
+ {
+ TYPE_NAME => 0,
+ DATA_TYPE => 1,
+ PRECISION => 2,
+ LITERAL_PREFIX => 3,
+ LITERAL_SUFFIX => 4,
+ CREATE_PARAMS => 5,
+ NULLABLE => 6,
+ CASE_SENSITIVE => 7,
+ SEARCHABLE => 8,
+ UNSIGNED_ATTRIBUTE => 9,
+ MONEY => 10,
+ AUTO_INCREMENT => 11,
+ LOCAL_TYPE_NAME => 12,
+ MINIMUM_SCALE => 13,
+ MAXIMUM_SCALE => 14,
+ },
+ [ "VARCHAR", DBI::SQL_VARCHAR(), undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, ],
+ [ "CHAR", DBI::SQL_CHAR(), undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, ],
+ [ "INTEGER", DBI::SQL_INTEGER(), undef, "", "", undef, 0, 0, 1, 0, 0, 0, undef, 0, 0, ],
+ [ "REAL", DBI::SQL_REAL(), undef, "", "", undef, 0, 0, 1, 0, 0, 0, undef, 0, 0, ],
+ [ "BLOB", DBI::SQL_LONGVARBINARY(), undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, ],
+ [ "BLOB", DBI::SQL_LONGVARBINARY(), undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, ],
+ [ "TEXT", DBI::SQL_LONGVARCHAR(), undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, ],
+ ];
+} # type_info_all
+
+sub get_avail_tables
+{
+ my $dbh = $_[0];
+ my @tables = ();
+
+ if ( $dbh->{sql_handler} eq "SQL::Statement" and $dbh->{sql_ram_tables} )
+ {
+ foreach my $table ( keys %{ $dbh->{sql_ram_tables} } )
+ {
+ push @tables, [ undef, undef, $table, "TABLE", "TEMP" ];
+ }
+ }
+
+ return @tables;
+} # get_avail_tables
+
+{
+ my $names = [qw( TABLE_QUALIFIER TABLE_OWNER TABLE_NAME TABLE_TYPE REMARKS )];
+
+ sub table_info ($)
+ {
+ my $dbh = shift;
+
+ my @tables = $dbh->func("get_avail_tables");
+
+ # Temporary kludge: DBD::Sponge dies if @tables is empty. :-(
+ @tables or return;
+
+ my $dbh2 = $dbh->func("sql_sponge_driver");
+ my $sth = $dbh2->prepare(
+ "TABLE_INFO",
+ {
+ rows => \@tables,
+ NAMES => $names,
+ }
+ );
+ $sth or $dbh->set_err( $DBI::stderr, $dbh2->errstr );
+ return $sth;
+ } # table_info
+}
+
+sub list_tables ($)
+{
+ my $dbh = shift;
+ my @table_list;
+
+ my @tables = $dbh->func("get_avail_tables") or return;
+ foreach my $ref (@tables)
+ {
+ # rt69260 and rt67223 - the same issue in 2 different queues
+ push @table_list, $ref->[2];
+ }
+
+ return @table_list;
+} # list_tables
+
+sub quote ($$;$)
+{
+ my ( $self, $str, $type ) = @_;
+ defined $str or return "NULL";
+ defined $type && ( $type == DBI::SQL_NUMERIC()
+ || $type == DBI::SQL_DECIMAL()
+ || $type == DBI::SQL_INTEGER()
+ || $type == DBI::SQL_SMALLINT()
+ || $type == DBI::SQL_FLOAT()
+ || $type == DBI::SQL_REAL()
+ || $type == DBI::SQL_DOUBLE()
+ || $type == DBI::SQL_TINYINT() )
+ and return $str;
+
+ $str =~ s/\\/\\\\/sg;
+ $str =~ s/\0/\\0/sg;
+ $str =~ s/\'/\\\'/sg;
+ $str =~ s/\n/\\n/sg;
+ $str =~ s/\r/\\r/sg;
+ return "'$str'";
+} # quote
+
+sub commit ($)
+{
+ my $dbh = shift;
+ $dbh->FETCH("Warn")
+ and carp "Commit ineffective while AutoCommit is on", -1;
+ return 1;
+} # commit
+
+sub rollback ($)
+{
+ my $dbh = shift;
+ $dbh->FETCH("Warn")
+ and carp "Rollback ineffective while AutoCommit is on", -1;
+ return 0;
+} # rollback
+
+# ====== STATEMENT =============================================================
+
+package DBI::DBD::SqlEngine::st;
+
+use strict;
+use warnings;
+
+use vars qw(@ISA $imp_data_size);
+
+$imp_data_size = 0;
+
+sub bind_param ($$$;$)
+{
+ my ( $sth, $pNum, $val, $attr ) = @_;
+ if ( $attr && defined $val )
+ {
+ my $type = ref $attr eq "HASH" ? $attr->{TYPE} : $attr;
+ if ( $type == DBI::SQL_BIGINT()
+ || $type == DBI::SQL_INTEGER()
+ || $type == DBI::SQL_SMALLINT()
+ || $type == DBI::SQL_TINYINT() )
+ {
+ $val += 0;
+ }
+ elsif ( $type == DBI::SQL_DECIMAL()
+ || $type == DBI::SQL_DOUBLE()
+ || $type == DBI::SQL_FLOAT()
+ || $type == DBI::SQL_NUMERIC()
+ || $type == DBI::SQL_REAL() )
+ {
+ $val += 0.;
+ }
+ else
+ {
+ $val = "$val";
+ }
+ }
+ $sth->{sql_params}[ $pNum - 1 ] = $val;
+ return 1;
+} # bind_param
+
+sub execute
+{
+ my $sth = shift;
+ my $params = @_ ? ( $sth->{sql_params} = [@_] ) : $sth->{sql_params};
+
+ $sth->finish;
+ my $stmt = $sth->{sql_stmt};
+ unless ( $sth->{sql_params_checked}++ )
+ {
+ # bug in SQL::Statement 1.20 and below causes breakage
+ # on all but the first call
+ unless ( ( my $req_prm = $stmt->params() ) == ( my $nparm = @$params ) )
+ {
+ my $msg = "You passed $nparm parameters where $req_prm required";
+ $sth->set_err( $DBI::stderr, $msg );
+ return;
+ }
+ }
+ my @err;
+ my $result;
+ eval {
+ local $SIG{__WARN__} = sub { push @err, @_ };
+ $result = $stmt->execute( $sth, $params );
+ };
+ unless ( defined $result )
+ {
+ $sth->set_err( $DBI::stderr, $@ || $stmt->{errstr} || $err[0] );
+ return;
+ }
+
+ if ( $stmt->{NUM_OF_FIELDS} )
+ { # is a SELECT statement
+ $sth->STORE( Active => 1 );
+ $sth->FETCH("NUM_OF_FIELDS")
+ or $sth->STORE( "NUM_OF_FIELDS", $stmt->{NUM_OF_FIELDS} );
+ }
+ return $result;
+} # execute
+
+sub finish
+{
+ my $sth = $_[0];
+ $sth->SUPER::STORE( Active => 0 );
+ delete $sth->{sql_stmt}{data};
+ return 1;
+} # finish
+
+sub fetch ($)
+{
+ my $sth = $_[0];
+ my $data = $sth->{sql_stmt}{data};
+ if ( !$data || ref $data ne "ARRAY" )
+ {
+ $sth->set_err(
+ $DBI::stderr,
+ "Attempt to fetch row without a preceeding execute () call or from a non-SELECT statement"
+ );
+ return;
+ }
+ my $dav = shift @$data;
+ unless ($dav)
+ {
+ $sth->finish;
+ return;
+ }
+ if ( $sth->FETCH("ChopBlanks") ) # XXX: (TODO) Only chop on CHAR fields,
+ { # not on VARCHAR or NUMERIC (see DBI docs)
+ $_ && $_ =~ s/ +$// for @$dav;
+ }
+ return $sth->_set_fbav($dav);
+} # fetch
+
+no warnings 'once';
+*fetchrow_arrayref = \&fetch;
+
+use warnings;
+
+sub sql_get_colnames
+{
+ my $sth = $_[0];
+ # Being a bit dirty here, as neither SQL::Statement::Structure nor
+ # DBI::SQL::Nano::Statement_ does not offer an interface to the
+ # required data
+ my @colnames;
+ if ( $sth->{sql_stmt}->{NAME} and "ARRAY" eq ref( $sth->{sql_stmt}->{NAME} ) )
+ {
+ @colnames = @{ $sth->{sql_stmt}->{NAME} };
+ }
+ elsif ( $sth->{sql_stmt}->isa('SQL::Statement') )
+ {
+ my $stmt = $sth->{sql_stmt} || {};
+ my @coldefs = @{ $stmt->{column_defs} || [] };
+ @colnames = map { $_->{name} || $_->{value} } @coldefs;
+ }
+ @colnames = $sth->{sql_stmt}->column_names() unless (@colnames);
+
+ @colnames = () if ( grep { m/\*/ } @colnames );
+
+ return @colnames;
+}
+
+sub FETCH ($$)
+{
+ my ( $sth, $attrib ) = @_;
+
+ $attrib eq "NAME" and return [ $sth->sql_get_colnames() ];
+
+ $attrib eq "TYPE" and return [ (DBI::SQL_VARCHAR()) x scalar $sth->sql_get_colnames() ];
+ $attrib eq "TYPE_NAME" and return [ ("VARCHAR") x scalar $sth->sql_get_colnames() ];
+ $attrib eq "PRECISION" and return [ (0) x scalar $sth->sql_get_colnames() ];
+ $attrib eq "NULLABLE" and return [ (1) x scalar $sth->sql_get_colnames() ];
+
+ if ( $attrib eq lc $attrib )
+ {
+ # Private driver attributes are lower cased
+ return $sth->{$attrib};
+ }
+
+ # else pass up to DBI to handle
+ return $sth->SUPER::FETCH($attrib);
+} # FETCH
+
+sub STORE ($$$)
+{
+ my ( $sth, $attrib, $value ) = @_;
+ if ( $attrib eq lc $attrib ) # Private driver attributes are lower cased
+ {
+ $sth->{$attrib} = $value;
+ return 1;
+ }
+ return $sth->SUPER::STORE( $attrib, $value );
+} # STORE
+
+sub DESTROY ($)
+{
+ my $sth = shift;
+ $sth->SUPER::FETCH("Active") and $sth->finish;
+ undef $sth->{sql_stmt};
+ undef $sth->{sql_params};
+} # DESTROY
+
+sub rows ($)
+{
+ return $_[0]->{sql_stmt}{NUM_OF_ROWS};
+} # rows
+
+# ====== SQL::STATEMENT ========================================================
+
+package DBI::DBD::SqlEngine::Statement;
+
+use strict;
+use warnings;
+
+use Carp;
+
+@DBI::DBD::SqlEngine::Statement::ISA = qw(DBI::SQL::Nano::Statement);
+
+# ====== SQL::TABLE ============================================================
+
+package DBI::DBD::SqlEngine::Table;
+
+use strict;
+use warnings;
+
+@DBI::DBD::SqlEngine::Table::ISA = qw(DBI::SQL::Nano::Table);
+
+=pod
+
+=head1 NAME
+
+DBI::DBD::SqlEngine - Base class for DBI drivers without their own SQL engine
+
+=head1 SYNOPSIS
+
+ package DBD::myDriver;
+
+ use base qw(DBI::DBD::SqlEngine);
+
+ sub driver
+ {
+ ...
+ my $drh = $proto->SUPER::driver($attr);
+ ...
+ return $drh->{class};
+ }
+
+ package DBD::myDriver::dr;
+
+ @ISA = qw(DBI::DBD::SqlEngine::dr);
+
+ sub data_sources { ... }
+ ...
+
+ package DBD::myDriver::db;
+
+ @ISA = qw(DBI::DBD::SqlEngine::db);
+
+ sub init_valid_attributes { ... }
+ sub init_default_attributes { ... }
+ sub set_versions { ... }
+ sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
+ sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
+ sub get_myd_versions { ... }
+ sub get_avail_tables { ... }
+
+ package DBD::myDriver::st;
+
+ @ISA = qw(DBI::DBD::SqlEngine::st);
+
+ sub FETCH { ... }
+ sub STORE { ... }
+
+ package DBD::myDriver::Statement;
+
+ @ISA = qw(DBI::DBD::SqlEngine::Statement);
+
+ sub open_table { ... }
+
+ package DBD::myDriver::Table;
+
+ @ISA = qw(DBI::DBD::SqlEngine::Table);
+
+ sub new { ... }
+
+=head1 DESCRIPTION
+
+DBI::DBD::SqlEngine abstracts the usage of SQL engines from the
+DBD. DBD authors can concentrate on the data retrieval they want to
+provide.
+
+It is strongly recommended that you read L<DBD::File::Developers> and
+L<DBD::File::Roadmap>, because many of the DBD::File API is provided
+by DBI::DBD::SqlEngine.
+
+Currently the API of DBI::DBD::SqlEngine is experimental and will
+likely change in the near future to provide the table meta data basics
+like DBD::File.
+
+=head2 Metadata
+
+The following attributes are handled by DBI itself and not by
+DBI::DBD::SqlEngine, thus they all work as expected:
+
+ Active
+ ActiveKids
+ CachedKids
+ CompatMode (Not used)
+ InactiveDestroy
+ AutoInactiveDestroy
+ Kids
+ PrintError
+ RaiseError
+ Warn (Not used)
+
+=head3 The following DBI attributes are handled by DBI::DBD::SqlEngine:
+
+=head4 AutoCommit
+
+Always on.
+
+=head4 ChopBlanks
+
+Works.
+
+=head4 NUM_OF_FIELDS
+
+Valid after C<< $sth->execute >>.
+
+=head4 NUM_OF_PARAMS
+
+Valid after C<< $sth->prepare >>.
+
+=head4 NAME
+
+Valid after C<< $sth->execute >>; probably undef for Non-Select statements.
+
+=head4 NULLABLE
+
+Not really working, always returns an array ref of ones, as DBD::CSV
+does not verify input data. Valid after C<< $sth->execute >>; undef for
+non-select statements.
+
+=head3 The following DBI attributes and methods are not supported:
+
+=over 4
+
+=item bind_param_inout
+
+=item CursorName
+
+=item LongReadLen
+
+=item LongTruncOk
+
+=back
+
+=head3 DBI::DBD::SqlEngine specific attributes
+
+In addition to the DBI attributes, you can use the following dbh
+attributes:
+
+=head4 sql_engine_version
+
+Contains the module version of this driver (B<readonly>)
+
+=head4 sql_nano_version
+
+Contains the module version of DBI::SQL::Nano (B<readonly>)
+
+=head4 sql_statement_version
+
+Contains the module version of SQL::Statement, if available (B<readonly>)
+
+=head4 sql_handler
+
+Contains the SQL Statement engine, either DBI::SQL::Nano or SQL::Statement
+(B<readonly>).
+
+=head4 sql_parser_object
+
+Contains an instantiated instance of SQL::Parser (B<readonly>).
+This is filled when used first time (only when used with SQL::Statement).
+
+=head4 sql_sponge_driver
+
+Contains an internally used DBD::Sponge handle (B<readonly>).
+
+=head4 sql_valid_attrs
+
+Contains the list of valid attributes for each DBI::DBD::SqlEngine based
+driver (B<readonly>).
+
+=head4 sql_readonly_attrs
+
+Contains the list of those attributes which are readonly (B<readonly>).
+
+=head4 sql_identifier_case
+
+Contains how DBI::DBD::SqlEngine deals with non-quoted SQL identifiers:
+
+ * SQL_IC_UPPER (1) means all identifiers are internally converted
+ into upper-cased pendants
+ * SQL_IC_LOWER (2) means all identifiers are internally converted
+ into lower-cased pendants
+ * SQL_IC_MIXED (4) means all identifiers are taken as they are
+
+These conversions happen if (and only if) no existing identifier matches.
+Once existing identifier is used as known.
+
+The SQL statement execution classes doesn't have to care, so don't expect
+C<sql_identifier_case> affects column names in statements like
+
+ SELECT * FROM foo
+
+=head4 sql_quoted_identifier_case
+
+Contains how DBI::DBD::SqlEngine deals with quoted SQL identifiers
+(B<readonly>). It's fixated to SQL_IC_SENSITIVE (3), which is interpreted
+as SQL_IC_MIXED.
+
+=head4 sql_flags
+
+Contains additional flags to instantiate an SQL::Parser. Because an
+SQL::Parser is instantiated only once, it's recommended to set this flag
+before any statement is executed.
+
+=head4 sql_dialect
+
+Controls the dialect understood by SQL::Parser. Possible values (delivery
+state of SQL::Statement):
+
+ * ANSI
+ * CSV
+ * AnyData
+
+Defaults to "CSV". Because an SQL::Parser is instantiated only once and
+SQL::Parser doesn't allow to modify the dialect once instantiated,
+it's strongly recommended to set this flag before any statement is
+executed (best place is connect attribute hash).
+
+=head1 SUPPORT
+
+You can find documentation for this module with the perldoc command.
+
+ perldoc DBI::DBD::SqlEngine
+
+You can also look for information at:
+
+=over 4
+
+=item * RT: CPAN's request tracker
+
+L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=DBI>
+L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=SQL-Statement>
+
+=item * AnnoCPAN: Annotated CPAN documentation
+
+L<http://annocpan.org/dist/DBI>
+L<http://annocpan.org/dist/SQL-Statement>
+
+=item * CPAN Ratings
+
+L<http://cpanratings.perl.org/d/DBI>
+
+=item * Search CPAN
+
+L<http://search.cpan.org/dist/DBI/>
+
+=back
+
+=head2 Where can I go for more help?
+
+For questions about installation or usage, please ask on the
+dbi-dev@perl.org mailing list.
+
+If you have a bug report, patch or suggestion, please open
+a new report ticket on CPAN, if there is not already one for
+the issue you want to report. Of course, you can mail any of the
+module maintainers, but it is less likely to be missed if
+it is reported on RT.
+
+Report tickets should contain a detailed description of the bug or
+enhancement request you want to report and at least an easy way to
+verify/reproduce the issue and any supplied fix. Patches are always
+welcome, too.
+
+=head1 ACKNOWLEDGEMENTS
+
+Thanks to Tim Bunce, Martin Evans and H.Merijn Brand for their continued
+support while developing DBD::File, DBD::DBM and DBD::AnyData.
+Their support, hints and feedback helped to design and implement this
+module.
+
+=head1 AUTHOR
+
+This module is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+The original authors are Jochen Wiedmann and Jeff Zucker.
+
+=head1 COPYRIGHT AND LICENSE
+
+ Copyright (C) 2009-2010 by H.Merijn Brand & Jens Rehsack
+ Copyright (C) 2004-2009 by Jeff Zucker
+ Copyright (C) 1998-2004 by Jochen Wiedmann
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=head1 SEE ALSO
+
+L<DBI>, L<DBD::File>, L<DBD::AnyData> and L<DBD::Sys>.
+
+=cut
diff --git a/lib/DBI/DBD/SqlEngine/Developers.pod b/lib/DBI/DBD/SqlEngine/Developers.pod
new file mode 100644
index 0000000..2ee3a5f
--- /dev/null
+++ b/lib/DBI/DBD/SqlEngine/Developers.pod
@@ -0,0 +1,422 @@
+=head1 NAME
+
+DBI::DBD::SqlEngine::Developers - Developers documentation for DBI::DBD::SqlEngine
+
+=head1 SYNOPSIS
+
+ package DBD::myDriver;
+
+ use base qw(DBI::DBD::SqlEngine);
+
+ sub driver
+ {
+ ...
+ my $drh = $proto->SUPER::driver($attr);
+ ...
+ return $drh->{class};
+ }
+
+ sub CLONE { ... }
+
+ package DBD::myDriver::dr;
+
+ @ISA = qw(DBI::DBD::SqlEngine::dr);
+
+ sub data_sources { ... }
+ ...
+
+ package DBD::myDriver::db;
+
+ @ISA = qw(DBI::DBD::SqlEngine::db);
+
+ sub init_valid_attributes { ... }
+ sub init_default_attributes { ... }
+ sub set_versions { ... }
+ sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
+ sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
+ sub get_myd_versions { ... }
+ sub get_avail_tables { ... }
+
+ package DBD::myDriver::st;
+
+ @ISA = qw(DBI::DBD::SqlEngine::st);
+
+ sub FETCH { ... }
+ sub STORE { ... }
+
+ package DBD::myDriver::Statement;
+
+ @ISA = qw(DBI::DBD::SqlEngine::Statement);
+
+ sub open_table { ... }
+
+ package DBD::myDriver::Table;
+
+ @ISA = qw(DBI::DBD::SqlEngine::Table);
+
+ sub new { ... }
+
+ sub fetch_row { ... }
+ sub push_row { ... }
+ sub push_names { ... }
+ sub seek { ... }
+ sub truncate { ... }
+ sub drop { ... }
+
+ # optimize the SQL engine by add one or more of
+ sub update_current_row { ... }
+ # or
+ sub update_specific_row { ... }
+ # or
+ sub update_one_row { ... }
+ # or
+ sub insert_new_row { ... }
+ # or
+ sub delete_current_row { ... }
+ # or
+ sub delete_one_row { ... }
+
+=head1 DESCRIPTION
+
+This document describes the interface of DBI::DBD::SqlEngine for DBD
+developers who write DBI::DBD::SqlEngine based DBI drivers. It supplements
+L<DBI::DBD> and L<DBI::DBD::SqlEngine::HowTo>, which you should read first.
+
+=head1 CLASSES
+
+Each DBI driver must provide a package global C<< driver >> method and
+three DBI related classes:
+
+=over 4
+
+=item DBI::DBD::SqlEngine::dr
+
+Driver package, contains the methods DBI calls indirectly via DBI
+interface:
+
+ DBI->connect ('DBI:DBM:', undef, undef, {})
+
+ # invokes
+ package DBD::DBM::dr;
+ @DBD::DBM::dr::ISA = qw(DBI::DBD::SqlEngine::dr);
+
+ sub connect ($$;$$$)
+ {
+ ...
+ }
+
+Similar for C<< data_sources () >> and C<< disconnect_all() >>.
+
+Pure Perl DBI drivers derived from DBI::DBD::SqlEngine do not usually need to
+override any of the methods provided through the DBD::XXX::dr package
+however if you need additional initialization in the connect method
+you may need to.
+
+=item DBI::DBD::SqlEngine::db
+
+Contains the methods which are called through DBI database handles
+(C<< $dbh >>). e.g.,
+
+ $sth = $dbh->prepare ("select * from foo");
+ # returns the f_encoding setting for table foo
+ $dbh->csv_get_meta ("foo", "f_encoding");
+
+DBI::DBD::SqlEngine provides the typical methods required here. Developers who
+write DBI drivers based on DBI::DBD::SqlEngine need to override the methods
+C<< set_versions >> and C<< init_valid_attributes >>.
+
+=item DBI::DBD::SqlEngine::st
+
+Contains the methods to deal with prepared statement handles. e.g.,
+
+ $sth->execute () or die $sth->errstr;
+
+=back
+
+=head2 DBI::DBD::SqlEngine
+
+This is the main package containing the routines to initialize
+DBI::DBD::SqlEngine based DBI drivers. Primarily the
+C<< DBI::DBD::SqlEngine::driver >> method is invoked, either directly
+from DBI when the driver is initialized or from the derived class.
+
+ package DBD::DBM;
+
+ use base qw( DBI::DBD::SqlEngine );
+
+ sub driver
+ {
+ my ( $class, $attr ) = @_;
+ ...
+ my $drh = $class->SUPER::driver( $attr );
+ ...
+ return $drh;
+ }
+
+It is not necessary to implement your own driver method as long as
+additional initialization (e.g. installing more private driver
+methods) is not required. You do not need to call C<< setup_driver >>
+as DBI::DBD::SqlEngine takes care of it.
+
+=head2 DBI::DBD::SqlEngine::dr
+
+The driver package contains the methods DBI calls indirectly via the DBI
+interface (see L<DBI/DBI Class Methods>).
+
+DBI::DBD::SqlEngine based DBI drivers usually do not need to implement anything here,
+it is enough to do the basic initialization:
+
+ package DBD:XXX::dr;
+
+ @DBD::XXX::dr::ISA = qw (DBI::DBD::SqlEngine::dr);
+ $DBD::XXX::dr::imp_data_size = 0;
+ $DBD::XXX::dr::data_sources_attr = undef;
+ $DBD::XXX::ATTRIBUTION = "DBD::XXX $DBD::XXX::VERSION by Hans Mustermann";
+
+=head2 DBI::DBD::SqlEngine::db
+
+This package defines the database methods, which are called via the DBI
+database handle C<< $dbh >>.
+
+Methods provided by DBI::DBD::SqlEngine:
+
+=over 4
+
+=item ping
+
+Simply returns the content of the C<< Active >> attribute. Override
+when your driver needs more complicated actions here.
+
+=item prepare
+
+Prepares a new SQL statement to execute. Returns a statement handle,
+C<< $sth >> - instance of the DBD:XXX::st. It is neither required nor
+recommended to override this method.
+
+=item FETCH
+
+Fetches an attribute of a DBI database object. Private handle attributes
+must have a prefix (this is mandatory). If a requested attribute is
+detected as a private attribute without a valid prefix, the driver prefix
+(written as C<$drv_prefix>) is added.
+
+The driver prefix is extracted from the attribute name and verified against
+C<< $dbh->{ $drv_prefix . "valid_attrs" } >> (when it exists). If the
+requested attribute value is not listed as a valid attribute, this method
+croaks. If the attribute is valid and readonly (listed in C<< $dbh->{
+$drv_prefix . "readonly_attrs" } >> when it exists), a real copy of the
+attribute value is returned. So it's not possible to modify
+C<f_valid_attrs> from outside of DBI::DBD::SqlEngine::db or a derived class.
+
+=item STORE
+
+Stores a database private attribute. Private handle attributes must have a
+prefix (this is mandatory). If a requested attribute is detected as a private
+attribute without a valid prefix, the driver prefix (written as
+C<$drv_prefix>) is added. If the database handle has an attribute
+C<${drv_prefix}_valid_attrs> - for attribute names which are not listed in
+that hash, this method croaks. If the database handle has an attribute
+C<${drv_prefix}_readonly_attrs>, only attributes which are not listed there
+can be stored (once they are initialized). Trying to overwrite such an
+immutable attribute forces this method to croak.
+
+An example of a valid attributes list can be found in
+C<< DBI::DBD::SqlEngine::db::init_valid_attributes >>.
+
+=item set_versions
+
+This method sets the attributes C<< f_version >>, C<< sql_nano_version >>,
+C<< sql_statement_version >> and (if not prohibited by a restrictive
+C<< ${prefix}_valid_attrs >>) C<< ${prefix}_version >>.
+
+This method is called at the end of the C<< connect () >> phase.
+
+When overriding this method, do not forget to invoke the superior one.
+
+=item init_valid_attributes
+
+This method is called after the database handle is instantiated as the
+first attribute initialization.
+
+C<< DBI::DBD::SqlEngine::db::init_valid_attributes >> initializes the
+attributes C<sql_valid_attrs> and C<sql_readonly_attrs>.
+
+When overriding this method, do not forget to invoke the superior one,
+preferably before doing anything else.
+
+=item init_default_attributes
+
+This method is called after the database handle is instantiated to
+initialize the default attributes.
+
+C<< DBI::DBD::SqlEngine::db::init_default_attributes >> initializes the
+attributes C<sql_identifier_case>, C<sql_quoted_identifier_case>,
+C<sql_handler>, C<sql_engine_version>, C<sql_nano_version> and
+C<sql_statement_version> when L<SQL::Statement> is available.
+
+When the derived implementor class provides the attribute to validate
+attributes (e.g. C<< $dbh->{dbm_valid_attrs} = {...}; >>) or the attribute
+containing the immutable attributes (e.g. C<< $dbh->{dbm_readonly_attrs}
+= {...}; >>), the attributes C<drv_valid_attrs>, C<drv_readonly_attrs> and
+C<drv_version> are added (when available) to the list of valid and
+immutable attributes (where C<drv_> is interpreted as the driver prefix).
+
+=item get_versions
+
+This method is called by the code injected into the instantiated driver to
+provide the user callable driver method C<< ${prefix}versions >> (e.g.
+C<< dbm_versions >>, C<< csv_versions >>, ...).
+
+The DBI::DBD::SqlEngine implementation returns all version information known by
+DBI::DBD::SqlEngine (e.g. DBI version, Perl version, DBI::DBD::SqlEngine version and
+the SQL handler version).
+
+C<get_versions> takes the C<$dbh> as the first argument and optionally a
+second argument containing a table name. The second argument is not
+evaluated in C<< DBI::DBD::SqlEngine::db::get_versions >> itself - but
+might be in the future.
+
+If the derived implementor class provides a method named
+C<get_${drv_prefix}versions>, this is invoked and the return value of
+it is associated to the derived driver name:
+
+ if (my $dgv = $dbh->{ImplementorClass}->can ("get_" . $drv_prefix . "versions") {
+ (my $derived_driver = $dbh->{ImplementorClass}) =~ s/::db$//;
+ $versions{$derived_driver} = &$dgv ($dbh, $table);
+ }
+
+Override it to add more version information about your module, (e.g.
+some kind of parser version in case of DBD::CSV, ...), if one line is not
+enough room to provide all relevant information.
+
+=item sql_parser_object
+
+Returns a L<SQL::Parser> instance, when C<< sql_handler >> is set to
+"SQL::Statement". The parser instance is stored in C<< sql_parser_object >>.
+
+It is not recommended to override this method.
+
+=item disconnect
+
+Disconnects from a database. All local table information is discarded and
+the C<< Active >> attribute is set to 0.
+
+=item type_info_all
+
+Returns information about all the types supported by DBI::DBD::SqlEngine.
+
+=item table_info
+
+Returns a statement handle which is prepared to deliver information about
+all known tables.
+
+=item list_tables
+
+Returns a list of all known table names.
+
+=item quote
+
+Quotes a string for use in SQL statements.
+
+=item commit
+
+Warns about a useless call (if warnings enabled) and returns.
+DBI::DBD::SqlEngine is typically a driver which commits every action instantly when
+executed.
+
+=item rollback
+
+Warns about a useless call (if warnings enabled) and returns.
+DBI::DBD::SqlEngine is typically a driver which commits every action instantly when
+executed.
+
+=back
+
+=head2 DBI::DBD::SqlEngine::st
+
+Contains the methods to deal with prepared statement handles:
+
+=over 4
+
+=item bind_param
+
+Common routine to bind placeholders to a statement for execution. It
+is dangerous to override this method without detailed knowledge about
+the DBI::DBD::SqlEngine internal storage structure.
+
+=item execute
+
+Executes a previously prepared statement (with placeholders, if any).
+
+=item finish
+
+Finishes a statement handle, discards all buffered results. The prepared
+statement is not discarded so the statement can be executed again.
+
+=item fetch
+
+Fetches the next row from the result-set. This method may be rewritten
+in a later version and if it's overridden in a derived class, the
+derived implementation should not rely on the storage details.
+
+=item fetchrow_arrayref
+
+Alias for C<< fetch >>.
+
+=item FETCH
+
+Fetches statement handle attributes. Supported attributes (for full overview
+see L<DBI/Statement Handle Attributes>) are C<NAME>, C<TYPE>, C<PRECISION>
+and C<NULLABLE>. Each column is returned as C<NULLABLE> which might be wrong
+depending on the derived backend storage. If the statement handle has
+private attributes, they can be fetched using this method, too. B<Note> that
+statement attributes are not associated with any table used in this statement.
+
+This method usually requires extending in a derived implementation.
+See L<DBD::CSV> or L<DBD::DBM> for some example.
+
+=item STORE
+
+Allows storing of statement private attributes. No special handling is
+currently implemented here.
+
+=item rows
+
+Returns the number of rows affected by the last execute. This method might
+return C<undef>.
+
+=back
+
+=head2 DBI::DBD::SqlEngine::Statement
+
+Derives from DBI::SQL::Nano::Statement for unified naming when deriving
+new drivers. No additional feature is provided from here.
+
+=head2 DBI::DBD::SqlEngine::Table
+
+Derives from DBI::SQL::Nano::Table for unified naming when deriving
+new drivers. No additional feature is provided from here.
+
+You should consult the documentation of C<< SQL::Eval::Table >> (see
+L<SQL::Eval>) to get more information about the abstract methods of the
+table's base class you have to override and a description of the table
+meta information expected by the SQL engines.
+
+=head1 AUTHOR
+
+The module DBI::DBD::SqlEngine is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=cut
diff --git a/lib/DBI/DBD/SqlEngine/HowTo.pod b/lib/DBI/DBD/SqlEngine/HowTo.pod
new file mode 100644
index 0000000..764dd08
--- /dev/null
+++ b/lib/DBI/DBD/SqlEngine/HowTo.pod
@@ -0,0 +1,218 @@
+=head1 NAME
+
+DBI::DBD::SqlEngine::HowTo - Guide to create DBI::DBD::SqlEngine based driver
+
+=head1 SYNOPSIS
+
+ perldoc DBI::DBD::SqlEngine::HowTo
+ perldoc DBI
+ perldoc DBI::DBD
+ perldoc DBI::DBD::SqlEngine::Developers
+ perldoc SQL::Eval
+ perldoc DBI::DBD::SqlEngine
+ perldoc DBI::DBD::SqlEngine::HowTo
+ perldoc SQL::Statement::Embed
+
+=head1 DESCRIPTION
+
+This document provides a step-by-step guide, how to create a new
+C<DBI::DBD::SqlEngine> based DBD. It expects that you carefully read the
+L<DBI> documentation and that you're familiar with L<DBI::DBD> and had
+read and understood L<DBD::ExampleP>.
+
+This document addresses experienced developers who are really sure that
+they need to invest time when writing a new DBI Driver. Writing a DBI
+Driver is neither a weekend project nor an easy job for hobby coders
+after work. Expect one or two man-month of time for the first start.
+
+Those who are still reading, should be able to sing the rules of
+L<DBI::DBD/CREATING A NEW DRIVER>.
+
+=head1 CREATING DRIVER CLASSES
+
+Do you have an entry in DBI's DBD registry? For this guide, a prefix of
+C<foo_> is assumed.
+
+=head2 Sample Skeleton
+
+ package DBD::Foo;
+
+ use strict;
+ use warnings;
+ use vars qw($VERSION);
+ use base qw(DBI::DBD::SqlEngine);
+
+ use DBI ();
+
+ $VERSION = "0.001";
+
+ package DBD::Foo::dr;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBI::DBD::SqlEngine::dr);
+ $imp_data_size = 0;
+
+ package DBD::Foo::db;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBI::DBD::SqlEngine::db);
+ $imp_data_size = 0;
+
+ package DBD::Foo::st;
+
+ use vars qw(@ISA $imp_data_size);
+
+ @ISA = qw(DBI::DBD::SqlEngine::st);
+ $imp_data_size = 0;
+
+ package DBD::Foo::Statement;
+
+ use vars qw(@ISA);
+
+ @ISA = qw(DBI::DBD::SqlEngine::Statement);
+
+ package DBD::Foo::Table;
+
+ use vars qw(@ISA);
+
+ @ISA = qw(DBI::DBD::SqlEngine::Table);
+
+ 1;
+
+Tiny, eh? And all you have now is a DBD named foo which will is able to
+deal with temporary tables, as long as you use L<SQL::Statement>. In
+L<DBI::SQL::Nano> environments, this DBD can do nothing.
+
+=head2 Deal with own attributes
+
+Before we start doing usable stuff with our DBI driver, we need to think
+about what we want to do and how we want to do it.
+
+Do we need tunable knobs accessible by users? Do we need status
+information? All this is handled in attributes of the database handles (be
+careful when your DBD is running "behind" a L<DBD::Gofer> proxy).
+
+How come the attributes into the DBD and how are they fetchable by the
+user? Good question, but you should know because you've read the L<DBI>
+documentation.
+
+C<DBI::DBD::SqlEngine::db::FETCH> and C<DBI::DBD::SqlEngine::db::STORE>
+taking care for you - all they need to know is which attribute names
+are valid and mutable or immutable. Tell them by adding
+C<init_valid_attributes> to your db class:
+
+ sub init_valid_attributes
+ {
+ my $dbh = $_[0];
+
+ $dbh->SUPER::init_valid_attributes ();
+
+ $dbh->{foo_valid_attrs} = {
+ foo_version => 1, # contains version of this driver
+ foo_valid_attrs => 1, # contains the valid attributes of foo drivers
+ foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
+ foo_bar => 1, # contains the bar attribute
+ foo_baz => 1, # contains the baz attribute
+ foo_manager => 1, # contains the manager of the driver instance
+ foo_manager_type => 1, # contains the manager class of the driver instance
+ };
+ $dbh->{foo_readonly_attrs} = {
+ foo_version => 1, # ensure no-one modifies the driver version
+ foo_valid_attrs => 1, # do not permit to add more valid attributes ...
+ foo_readonly_attrs => 1, # ... or make the immutable mutable
+ foo_manager => 1, # manager is set internally only
+ };
+
+ return $dbh;
+ }
+
+Woooho - but now the user cannot assign new managers? This is intended,
+overwrite C<STORE> to handle it!
+
+ sub STORE ($$$)
+ {
+ my ( $dbh, $attrib, $value ) = @_;
+
+ $dbh->SUPER::STORE( $attrib, $value );
+
+ # we're still alive, so no exception is thrown ...
+ # by DBI::DBD::SqlEngine::db::STORE
+ if ( $attrib eq "foo_manager_type" )
+ {
+ $dbh->{foo_manager} = $dbh->{foo_manager_type}->new();
+ # ... probably correct some states based on the new
+ # foo_manager_type - see DBD::Sys for an example
+ }
+ }
+
+But ... my driver runs without a manager until someone first assignes
+a C<foo_manager_type>. Well, no - there're two places where you can
+initialize defaults:
+
+ sub init_default_attributes
+ {
+ my ($dbh, $phase) = @_;
+
+ $dbh->SUPER::init_default_attributes($phase);
+
+ if( 0 == $phase )
+ {
+ # init all attributes which have no knowledge about
+ # user settings from DSN or the attribute hash
+ $dbh->{foo_manager_type} = "DBD::Foo::Manager";
+ }
+ elsif( 1 == $phase )
+ {
+ # init phase with more knowledge from DSN or attribute
+ # hash
+ $dbh->{foo_manager} = $dbh->{foo_manager_type}->new();
+ }
+
+ return $dbh;
+ }
+
+So far we can prevent the users to use our database driver as data
+storage for anything and everything. We care only about the real important
+stuff for peace on earth and alike attributes. But in fact, the driver
+still can't do anything. It can do less than nothing - meanwhile it's
+not a stupid storage area anymore.
+
+=head2 Dealing with Tables
+
+Let's put some life into it - it's going to be time for it.
+
+This is a good point where a quick side step to L<SQL::Statement::Embed>
+will help to shorten the next paragraph. The documentation in
+SQL::Statement::Embed regarding embedding in own DBD's works pretty
+fine with SQL::Statement and DBI::SQL::Nano.
+
+=head2 Testing
+
+Now you should have your first own DBD. Was easy, wasn't it? But does
+it work well? Prove it by writing tests and remember to use
+dbd_edit_mm_attribs from L<DBI::DBD> to ensure testing even rare cases.
+
+=head1 AUTHOR
+
+This guide is written by Jens Rehsack. DBI::DBD::SqlEngine is written by
+Jens Rehsack using code from DBD::File originally written by Jochen
+Wiedmann and Jeff Zucker.
+
+The module DBI::DBD::SqlEngine is currently maintained by
+
+H.Merijn Brand < h.m.brand at xs4all.nl > and
+Jens Rehsack < rehsack at googlemail.com >
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
+
+All rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License, as
+specified in the Perl README file.
+
+=cut
diff --git a/lib/DBI/FAQ.pm b/lib/DBI/FAQ.pm
new file mode 100644
index 0000000..1ad760b
--- /dev/null
+++ b/lib/DBI/FAQ.pm
@@ -0,0 +1,966 @@
+###
+### $Id: FAQ.pm 14934 2011-09-14 10:02:25Z timbo $
+###
+### DBI Frequently Asked Questions POD
+###
+### Copyright section reproduced from below.
+###
+### This document is Copyright (c)1994-2000 Alligator Descartes, with portions
+### Copyright (c)1994-2000 their original authors. This module is released under
+### the 'Artistic' license which you can find in the perl distribution.
+###
+### This document is Copyright (c)1997-2000 Alligator Descartes. All rights reserved.
+### Permission to distribute this document, in full or in part, via email,
+### Usenet, ftp archives or http is granted providing that no charges are involved,
+### reasonable attempt is made to use the most current version and all credits
+### and copyright notices are retained ( the I<AUTHOR> and I<COPYRIGHT> sections ).
+### Requests for other distribution rights, including incorporation into
+### commercial products, such as books, magazine articles or CD-ROMs should be
+### made to Alligator Descartes.
+###
+
+package DBI::FAQ;
+
+our $VERSION = sprintf("1.%06d", q$Revision: 14934 $ =~ /(\d+)/o);
+
+
+=head1 NAME
+
+DBI::FAQ -- The Frequently Asked Questions for the Perl5 Database Interface
+
+=for html
+<BODY BGCOLOR="#ffffff" TEXT="#000000" LINK="#3a15ff" ALINK="#ff0000" VLINK="#ff282d">
+<!--#include virtual="/technology/perl/DBI/templatetop.html" -->
+<CENTER>
+<FONT SIZE="+2">
+DBI Frequently Asked Questions v.0.38
+</FONT>
+<BR>
+<FONT SIZE="-1">
+<I>Last updated: February 8th, 2000</I>
+</FONT>
+</CENTER>
+<P>
+
+=head1 SYNOPSIS
+
+ perldoc DBI::FAQ
+
+=head1 VERSION
+
+This document is currently at version I<0.38>, as of I<February 8th, 2000>.
+
+That's B<very> old. A newer FAQ can be found at L<http://faq.dbi-support.com/>
+
+Neither this document nor that web site are actively maintained.
+Volunteers are welcome.
+
+=head1 DESCRIPTION
+
+This document serves to answer the most frequently asked questions on both
+the DBI Mailing Lists and personally to members of the DBI development team.
+
+=head1 Basic Information & Information Sources
+
+=head2 1.1 What is DBI, DBperl, Oraperl and *perl?
+
+To quote Tim Bunce, the architect and author of DBI:
+
+ DBI is a database access Application Programming Interface (API)
+ for the Perl Language. The DBI API Specification defines a set
+ of functions, variables and conventions that provide a consistent
+ database interface independent of the actual database being used.
+
+In simple language, the DBI interface allows users to access multiple database
+types transparently. So, if you connecting to an Oracle, Informix, mSQL, Sybase
+or whatever database, you don't need to know the underlying mechanics of the
+3GL layer. The API defined by DBI will work on I<all> these database types.
+
+A similar benefit is gained by the ability to connect to two I<different>
+databases of different vendor within the one perl script, I<ie>, I want
+to read data from an Oracle database and insert it back into an Informix
+database all within one program. The DBI layer allows you to do this simply
+and powerfully.
+
+
+=for html
+Here's a diagram that demonstrates the principle:
+<P>
+<CENTER>
+<IMG SRC="img/dbiarch.gif" WIDTH=451 HEIGHT=321 ALT="[ DBI Architecture ]">
+</CENTER>
+<P>
+
+I<DBperl> is the old name for the interface specification. It's usually
+now used to denote perlI<4> modules on database interfacing, such as,
+I<oraperl>, I<isqlperl>, I<ingperl> and so on. These interfaces
+didn't have a standard API and are generally I<not> supported.
+
+Here's a list of DBperl modules, their corresponding DBI counterparts and
+support information. I<Please note>, the author's listed here generally
+I<do not> maintain the DBI module for the same database. These email
+addresses are unverified and should only be used for queries concerning the
+perl4 modules listed below. DBI driver queries should be directed to the
+I<dbi-users> mailing list.
+
+ Module Name Database Required Author DBI
+ ----------- ----------------- ------ ---
+ Sybperl Sybase Michael Peppler DBD::Sybase
+ <mpeppler@itf.ch>
+ Oraperl Oracle 6 & 7 Kevin Stock DBD::Oracle
+ <dbi-users@perl.org>
+ Ingperl Ingres Tim Bunce & DBD::Ingres
+ Ted Lemon
+ <dbi-users@perl.org>
+ Interperl Interbase Buzz Moschetti DBD::Interbase
+ <buzz@bear.com>
+ Uniperl Unify 5.0 Rick Wargo None
+ <rickers@coe.drexel.edu>
+ Pgperl Postgres Igor Metz DBD::Pg
+ <metz@iam.unibe.ch>
+ Btreeperl NDBM John Conover SDBM?
+ <john@johncon.com>
+ Ctreeperl C-Tree John Conover None
+ <john@johncon.com>
+ Cisamperl Informix C-ISAM Mathias Koerber None
+ <mathias@unicorn.swi.com.sg>
+ Duaperl X.500 Directory Eric Douglas None
+ User Agent
+
+However, some DBI modules have DBperl emulation layers, so, I<DBD::Oracle>
+comes with an Oraperl emulation layer, which allows you to run legacy oraperl
+scripts without modification. The emulation layer translates the oraperl API
+calls into DBI calls and executes them through the DBI switch.
+
+Here's a table of emulation layer information:
+
+ Module Emulation Layer Status
+ ------ --------------- ------
+ DBD::Oracle Oraperl Complete
+ DBD::Informix Isqlperl Under development
+ DBD::Ingres Ingperl Complete?
+ DBD::Sybase Sybperl Working? ( Needs verification )
+ DBD::mSQL Msqlperl Experimentally released with
+ DBD::mSQL-0.61
+
+The I<Msqlperl> emulation is a special case. I<Msqlperl> is a perl5 driver
+for I<mSQL> databases, but does not conform to the DBI Specification. It's
+use is being deprecated in favour of I<DBD::mSQL>. I<Msqlperl> may be downloaded
+from CPAN I<via>:
+
+ http://www.perl.com/cgi-bin/cpan_mod?module=Msqlperl
+
+=head2 1.2. Where can I get it from?
+
+The Comprehensive Perl Archive Network
+resources should be used for retrieving up-to-date versions of the DBI
+and drivers. CPAN may be accessed I<via> Tom Christiansen's splendid
+I<CPAN multiplexer> program located at:
+
+ http://www.perl.com/CPAN/
+
+For more specific version information and exact URLs of drivers, please see
+the DBI drivers list and the DBI module pages which can be found on:
+
+ http://dbi.perl.org/
+
+This list is automatically generated on a nightly basis from CPAN and should
+be up-to-date.
+
+=head2 1.3. Where can I get more information?
+
+There are a few information sources on DBI.
+
+=over 4
+
+=item I<"Programming the Perl DBI">
+
+"Programming the Perl DBI" is the I<official> book on the DBI written by
+Alligator Descartes and Tim Bunce and published by O'Reilly & Associates.
+The book was released on February 9th, 2000.
+
+The table of contents is:
+
+ Preface
+ 1. Introduction
+ From Mainframes to Workstations
+ Perl
+ DBI in the Real World
+ A Historical Interlude and Standing Stones
+ 2. Basic Non-DBI Databases
+ Storage Managers and Layers
+ Query Languages and Data Functions
+ Standing Stones and the Sample Database
+ Flat-File Databases
+ Putting Complex Data into Flat Files
+ Concurrent Database Access and Locking
+ DBM Files and the Berkeley Database Manager
+ The MLDBM Module
+ Summary
+ 3. SQL and Relational Databases
+ The Relational Database Methodology
+ Datatypes and NULL Values
+ Querying Data
+ Modifying Data Within Tables
+ Creating and Destroying Tables
+ 4. Programming with the DBI
+ DBI Architecture
+ Handles
+ Data Source Names
+ Connection and Disconnection
+ Error Handling
+ Utility Methods and Functions
+ 5. Interacting with the Database
+ Issuing Simple Queries
+ Executing Non-SELECT Statements
+ Binding Parameters to Statements
+ Binding Output Columns
+ do() Versus prepare()
+ Atomic and Batch Fetching
+ 6. Advanced DBI
+ Handle Attributes and Metadata
+ Handling LONG/LOB Data
+ Transactions, Locking, and Isolation
+ 7. ODBC and the DBI
+ ODBC -- Embraced and Extended
+ DBI -- Thrashed and Mutated
+ The Nuts and Bolts of ODBC
+ ODBC from Perl
+ The Marriage of DBI and ODBC
+ Questions and Choices
+ Moving Between Win32::ODBC and the DBI
+ And What About ADO?
+ 8. DBI Shell and Database Proxying
+ dbish -- The DBI Shell
+ Database Proxying
+ A. DBI Specification
+ B. Driver and Database Characteristics
+ C. ASLaN Sacred Site Charter
+ Index
+
+The book should be available from all good bookshops and can be ordered online
+either <I>via</I> O'Reilly & Associates
+
+ http://www.oreilly.com/catalog/perldbi
+
+or Amazon
+
+ http://www.amazon.com/exec/obidos/ASIN/1565926994/dbi
+
+=item I<POD documentation>
+
+I<POD>s are chunks of documentation usually embedded within perl programs
+that document the code ``I<in place>'', providing a useful resource for
+programmers and users of modules. POD for DBI and drivers is beginning to
+become more commonplace, and documentation for these modules can be read
+with the C<perldoc> program included with Perl.
+
+=over 4
+
+=item The DBI Specification
+
+The POD for the DBI Specification can be read with the:
+
+ perldoc DBI
+
+command. The Specification also forms Appendix A of "Programming the Perl
+DBI".
+
+=item Oraperl
+
+Users of the Oraperl emulation layer bundled with I<DBD::Oracle>, may read
+up on how to program with the Oraperl interface by typing:
+
+ perldoc Oraperl
+
+This will produce an updated copy of the original oraperl man page written by
+Kevin Stock for perl4. The oraperl API is fully listed and described there.
+
+=item Drivers
+
+Users of the DBD modules may read about some of the private functions
+and quirks of that driver by typing:
+
+ perldoc <driver>
+
+For example, the I<DBD::mSQL> driver is bundled with driver-specific
+documentation that can be accessed by typing
+
+ perldoc DBD::mSQL
+
+=item Frequently Asked Questions
+
+This document, the I<Frequently Asked Questions> is also available as POD
+documentation! You can read this on your own system by typing:
+
+ perldoc DBI::FAQ
+
+This may be more convenient to persons not permanently, or conveniently,
+connected to the Internet. The I<DBI::FAQ> module should be downloaded and
+installed for the more up-to-date version.
+
+The version of I<DBI::FAQ> shipped with the C<DBI> module may be slightly out
+of date.
+
+=item POD in general
+
+Information on writing POD, and on the philosophy of POD in general, can be
+read by typing:
+
+ perldoc perlpod
+
+Users with the Tk module installed may be interested to learn there is a
+Tk-based POD reader available called C<tkpod>, which formats POD in a convenient
+and readable way. This is available I<via> CPAN as the module called
+I<Tk::POD> and is highly recommended.
+
+=back
+
+=item I<Driver and Database Characteristics>
+
+The driver summaries that were produced for Appendix B of "Programming the
+Perl DBI" are available online at:
+
+ http://dbi.perl.org/
+
+in the driver information table. These summaries contain standardised
+information on each driver and database which should aid you in selecting
+a database to use. It will also inform you quickly of any issues within
+drivers or whether a driver is not fully compliant with the DBI Specification.
+
+=item I<Rambles, Tidbits and Observations>
+
+ http://dbi.perl.org/tidbits
+
+There are a series of occasional rambles from various people on the
+DBI mailing lists who, in an attempt to clear up a simple point, end up
+drafting fairly comprehensive documents. These are quite often varying in
+quality, but do provide some insights into the workings of the interfaces.
+
+=item I<Articles>
+
+A list of articles discussing the DBI can be found on the DBI WWW page at:
+
+ http://dbi.perl.org/
+
+These articles are of varying quality and age, from the original Perl Journal
+article written by Alligator and Tim, to more recent debacles published online
+from about.com.
+
+=item I<README files>
+
+The I<README> files included with each driver occasionally contains
+some useful information ( no, really! ) that may be pertinent to the user.
+Please read them. It makes our worthless existences more bearable. These
+can all be read from the main DBI WWW page at:
+
+ http://dbi.perl.org/
+
+=item I<Mailing Lists>
+
+There are three mailing lists for DBI:
+
+ dbi-announce@perl.org -- for announcements, very low traffic
+ dbi-users@perl.org -- general user support
+ dbi-dev@perl.org -- for driver developers (no user support)
+
+For information on how to subscribe, set digest mode etc, and unsubscribe,
+send an email message (the content will be ignored) to:
+
+ dbi-announce-help@perl.org
+ dbi-users-help@perl.org
+ dbi-dev-help@perl.org
+
+=item I<Mailing List Archives>
+
+=over 4
+
+=item I<US Mailing List Archives>
+
+ http://outside.organic.com/mail-archives/dbi-users/
+
+Searchable hypermail archives of the three mailing lists, and some of the
+much older traffic have been set up for users to browse.
+
+=item I<European Mailing List Archives>
+
+ http://www.rosat.mpe-garching.mpg.de/mailing-lists/PerlDB-Interest
+
+As per the US archive above.
+
+=back
+
+=back
+
+=head1 Compilation Problems
+
+=head2 2.1. Compilation problems or "It fails the test!"
+
+First off, consult the README for that driver in case there is useful
+information about the problem. It may be a known problem for your given
+architecture and operating system or database. You can check the README
+files for each driver in advance online at:
+
+ http://dbi.perl.org/
+
+If it's a known problem, you'll probably have to wait till it gets fixed. If
+you're I<really> needing it fixed, try the following:
+
+=over 4
+
+=item I<Attempt to fix it yourself>
+
+This technique is generally I<not> recommended to the faint-hearted.
+If you do think you have managed to fix it, then, send a patch file
+( context diff ) to the author with an explanation of:
+
+=over 4
+
+=item *
+
+What the problem was, and test cases, if possible.
+
+=item *
+
+What you needed to do to fix it. Please make sure you mention everything.
+
+=item *
+
+Platform information, database version, perl version, module version and
+DBI version.
+
+=back
+
+=item I<Email the author> Do I<NOT> whinge!
+
+Please email the address listed in the WWW pages for whichever driver you
+are having problems with. Do I<not> directly email the author at a
+known address unless it corresponds with the one listed.
+
+We tend to have real jobs to do, and we do read the mailing lists for
+problems. Besides, we may not have access to <I<insert your
+favourite brain-damaged platform here>> and couldn't be of any
+assistance anyway! Apologies for sounding harsh, but that's the way of it!
+
+However, you might catch one of these creative genii at 3am when we're
+doing this sort of stuff anyway, and get a patch within 5 minutes. The
+atmosphere in the DBI circle is that we I<do> appreciate the users'
+problems, since we work in similar environments.
+
+If you are planning to email the author, please furnish as much information
+as possible, I<ie>:
+
+=over 4
+
+=item *
+
+I<ALL> the information asked for in the README file in
+the problematic module. And we mean I<ALL> of it. We don't
+put lines like that in documentation for the good of our health, or
+to meet obscure README file standards of length.
+
+=item *
+
+If you have a core dump, try the I<Devel::CoreStack> module for
+generating a stack trace from the core dump. Send us that too.
+I<Devel::CoreStack> can be found on CPAN at:
+
+ http://www.perl.com/cgi-bin/cpan_mod?module=Devel::CoreStack
+
+=item *
+
+Module versions, perl version, test cases, operating system versions
+and I<any other pertinent information>.
+
+=back
+
+Remember, the more information you send us, the quicker we can track
+problems down. If you send us no useful information, expect nothing back.
+
+Finally, please be aware that some authors, including Tim Bunce, specifically
+request that you do I<not> mail them directly. Please respect their wishes and
+use the email addresses listed in the appropriate module C<README> file.
+
+=item I<Email the dbi-users Mailing List>
+
+It's usually a fairly intelligent idea to I<cc> the mailing list
+anyway with problems. The authors all read the lists, so you lose nothing
+by mailing there.
+
+=back
+
+=head1 Platform and Driver Issues
+
+=head2 3.1 What's the difference between ODBC and DBI?
+
+In terms of architecture - not much: Both define programming
+interfaces. Both allow multiple drivers to be loaded to do the
+actual work.
+
+In terms of ease of use - much: The DBI is a 'high level' interface
+that, like Perl itself, strives to make the simple things easy while
+still making the hard things possible. The ODBC is a 'low level'
+interface. All nuts-bolts-knobs-and-dials.
+
+Now there's an ODBC driver for the DBI (DBD::ODBC) the "What's the
+difference" question is more usefully rephrased as:
+
+Chapter 7 of "Programming the Perl DBI" covers this topic in far more
+detail and should be consulted.
+
+=head2 3.2 What's the difference between Win32::ODBC and DBD::ODBC?
+
+The DBI, and thus DBD::ODBC, has a different philosophy from the
+Win32::ODBC module:
+
+The Win32::ODBC module is a 'thin' layer over the low-level ODBC API.
+The DBI defines a simpler 'higher level' interface.
+
+The Win32::ODBC module gives you access to more of the ODBC API.
+The DBI and DBD::ODBC give you access to only the essentials.
+(But, unlike Win32::ODBC, the DBI and DBD::ODBC do support parameter
+binding and multiple prepared statements which reduces the load on
+the database server and can dramatically increase performance.)
+
+The Win32::ODBC module only works on Win32 systems.
+The DBI and DBD::ODBC are very portable and work on Win32 and Unix.
+
+The DBI and DBD::ODBC modules are supplied as a standard part of the
+Perl 5.004 binary distribution for Win32 (they don't work with the
+older, non-standard, ActiveState port).
+
+Scripts written with the DBI and DBD::ODBC are faster than Win32::ODBC
+on Win32 and are trivially portable to other supported database types.
+
+The DBI offers optional automatic printing or die()ing on errors which
+makes applications simpler and more robust.
+
+The current DBD::ODBC driver version 0.16 is new and not yet fully stable.
+A new release is due soon [relative to the date of the next TPJ issue :-]
+and will be much improved and offer more ODBC functionality.
+
+To summarise: The Win32::ODBC module is your best choice if you need
+access to more of the ODBC API than the DBI gives you. Otherwise, the
+DBI and DBD::ODBC combination may be your best bet.
+
+Chapter 7 of "Programming the Perl DBI" covers this topic in far more
+detail and should be consulted.
+
+=head2 3.3 Is DBI supported under Windows 95 / NT platforms?
+
+Finally, yes! Jeff Urlwin has been working diligently on building
+I<DBI> and I<DBD::ODBC> under these platforms, and, with the
+advent of a stabler perl and a port of I<MakeMaker>, the project has
+come on by great leaps and bounds.
+
+The I<DBI> and I<DBD::Oracle> Win32 ports are now a standard part of DBI,
+so, downloading I<DBI> of version higher than I<0.81> should work fine as
+should using the most recent I<DBD::Oracle> version.
+
+=head2 3.4 Can I access Microsoft Access or SQL-Server databases with DBI?
+
+Yes, use the I<DBD::ODBC> driver.
+
+=head2 3.5 Is there a DBD for <I<insert favourite database here>>?
+
+First check if a driver is available on CPAN by searching for the name of the
+database (including common abbreviations and aliases).
+
+Here's a general query that'll match all distributions:
+
+ http://search.cpan.org/search?query=DBD&mode=dist
+
+If you can't find a driver that way, you could check if the database supports
+ODBC drivers. If so then you could probably use the DBD::ODBC driver:
+
+ http://search.cpan.org/dist/DBD-ODBC/
+
+If not, then try asking on the dbi-users mailing list.
+
+=head2 3.6 What's DBM? And why should I use DBI instead?
+
+Extracted from ``I<DBI - The Database Interface for Perl 5>'':
+
+ ``UNIX was originally blessed with simple file-based ``databases'', namely
+ the dbm system. dbm lets you store data in files, and retrieve
+ that data quickly. However, it also has serious drawbacks.
+
+ File Locking
+
+ The dbm systems did not allow particularly robust file locking
+ capabilities, nor any capability for correcting problems arising through
+ simultaneous writes [ to the database ].
+
+ Arbitrary Data Structures
+
+ The dbm systems only allows a single fixed data structure:
+ key-value pairs. That value could be a complex object, such as a
+ [ C ] struct, but the key had to be unique. This was a large
+ limitation on the usefulness of dbm systems.
+
+ However, dbm systems still provide a useful function for users with
+ simple datasets and limited resources, since they are fast, robust and
+ extremely well-tested. Perl modules to access dbm systems have now
+ been integrated into the core Perl distribution via the
+ AnyDBM_File module.''
+
+To sum up, DBM is a perfectly satisfactory solution for essentially read-only
+databases, or small and simple datasets. However, for more
+scaleable dataset handling, not to mention robust transactional locking,
+users are recommended to use a more powerful database engine I<via> I<DBI>.
+
+Chapter 2 of "Programming the Perl DBI" discusses DBM files in detail.
+
+=head2 3.7 What database do you recommend me using?
+
+This is a particularly thorny area in which an objective answer is difficult
+to come by, since each dataset, proposed usage and system configuration
+differs from person to person.
+
+From the current author's point of view, if the dataset is relatively
+small, being tables of less than 1 million rows, and less than 1000 tables
+in a given database, then I<mSQL> is a perfectly acceptable solution
+to your problem. This database is extremely cheap, is wonderfully robust
+and has excellent support. More information is available on the Hughes
+Technology WWW site at:
+
+ http://www.hughes.com.au
+
+You may also wish to look at MySQL which is a more powerful database engine
+that has a similar feel to mSQL.
+
+ http://www.tcx.se
+
+If the dataset is larger than 1 million row tables or 1000 tables, or if you
+have either more money, or larger machines, I would recommend I<Oracle RDBMS>.
+Oracle's WWW site is an excellent source of more information.
+
+ http://www.oracle.com
+
+I<Informix> is another high-end RDBMS that is worth considering. There are
+several differences between Oracle and Informix which are too complex for
+this document to detail. Information on Informix can be found on their
+WWW site at:
+
+ http://www.informix.com
+
+In the case of WWW fronted applications, I<mSQL> may be a better option
+due to slow connection times between a CGI script and the Oracle RDBMS and
+also the amount of resource each Oracle connection will consume. I<mSQL>
+is lighter resource-wise and faster.
+
+These views are not necessarily representative of anyone else's opinions,
+and do not reflect any corporate sponsorship or views. They are provided
+I<as-is>.
+
+=head2 3.8 Is <I<insert feature here>> supported in DBI?
+
+Given that we're making the assumption that the feature you have requested
+is a non-standard database-specific feature, then the answer will be I<no>.
+
+DBI reflects a I<generic> API that will work for most databases, and has
+no database-specific functionality.
+
+However, driver authors may, if they so desire, include hooks to database-specific
+functionality through the C<func()> method defined in the DBI API.
+Script developers should note that use of functionality provided I<via>
+the C<func()> methods is very unlikely to be portable across databases.
+
+=head1 Programming Questions
+
+=head2 4.1 Is DBI any use for CGI programming?
+
+In a word, yes! DBI is hugely useful for CGI programming! In fact, I would
+tentatively say that CGI programming is one of two top uses for DBI.
+
+DBI confers the ability to CGI programmers to power WWW-fronted databases
+to their users, which provides users with vast quantities of ordered
+data to play with. DBI also provides the possibility that, if a site is
+receiving far too much traffic than their database server can cope with, they
+can upgrade the database server behind the scenes with no alterations to
+the CGI scripts.
+
+=head2 4.2 How do I get faster connection times with DBD::Oracle and CGI?
+
+ Contributed by John D. Groenveld
+
+The Apache C<httpd> maintains a pool of C<httpd> children to service client
+requests.
+
+Using the Apache I<mod_perl> module by I<Doug MacEachern>, the perl
+interpreter is embedded with the C<httpd> children. The CGI, DBI, and your
+other favorite modules can be loaded at the startup of each child. These
+modules will not be reloaded unless changed on disk.
+
+For more information on Apache, see the Apache Project's WWW site:
+
+ http://www.apache.org
+
+The I<mod_perl> module can be downloaded from CPAN I<via>:
+
+ http://www.perl.com/cgi-bin/cpan_mod?module=Apache
+
+=head2 4.3 How do I get persistent connections with DBI and CGI?
+
+ Contributed by John D. Groenveld
+
+Using Edmund Mergl's I<Apache::DBI> module, database logins are stored in a
+hash with each of these C<httpd> child. If your application is based on a
+single database user, this connection can be started with each child.
+Currently, database connections cannot be shared between C<httpd> children.
+
+I<Apache::DBI> can be downloaded from CPAN I<via>:
+
+ http://www.perl.com/cgi-bin/cpan_mod?module=Apache::DBI
+
+=head2 4.4 ``When I run a perl script from the command line, it works, but, when I run it under the C<httpd>, it fails!'' Why?
+
+Basically, a good chance this is occurring is due to the fact that the user
+that you ran it from the command line as has a correctly configured set of
+environment variables, in the case of I<DBD::Oracle>, variables like
+C<ORACLE_HOME>, C<ORACLE_SID> or C<TWO_TASK>.
+
+The C<httpd> process usually runs under the user id of C<nobody>,
+which implies there is no configured environment. Any scripts attempting to
+execute in this situation will correctly fail.
+
+One way to solve this problem is to set the environment for your database in a
+C<BEGIN { }> block at the top of your script. Another technique is to configure
+your WWW server to pass-through certain environment variables to your CGI
+scripts.
+
+Similarly, you should check your C<httpd> error logfile for any clues,
+as well as the ``Idiot's Guide To Solving Perl / CGI Problems'' and
+``Perl CGI Programming FAQ'' for further information. It is
+unlikely the problem is DBI-related.
+
+The ``Idiot's Guide To Solving Perl / CGI Problems'' can be located at:
+
+ http://www.perl.com/perl/faq/index.html
+
+as can the ``Perl CGI Programming FAQ''. Read I<BOTH> these documents
+carefully!
+
+=head2 4.5 How do I get the number of rows returned from a C<SELECT> statement?
+
+Count them. Read the DBI docs for the C<rows()> method.
+
+=head1 Miscellaneous Questions
+
+=head2 5.1 Can I do multi-threading with DBI?
+
+Perl version 5.005 and later can be built to support multi-threading.
+The DBI, as of version 1.02, does not yet support multi-threading
+so it would be unsafe to let more than one thread enter the DBI at
+the same time.
+
+It is expected that some future version of the DBI will at least be
+thread-safe (but not thread-hot) by automatically blocking threads
+intering the DBI while it's already in use.
+
+=head2 5.2 How do I handle BLOB data with DBI?
+
+Handling BLOB data with the DBI is very straight-forward. BLOB columns are
+specified in a SELECT statement as per normal columns. However, you also
+need to specify a maximum BLOB size that the <I>database handle</I> can
+fetch using the C<LongReadLen> attribute.
+
+For example:
+
+ ### $dbh is a connected database handle
+ $sth = $dbh->prepare( "SELECT blob_column FROM blobby_table" );
+ $sth->execute;
+
+would fail.
+
+ ### $dbh is a connected database handle
+ ### Set the maximum BLOB size...
+ $dbh->{LongReadLen} = 16384; ### 16Kb...Not much of a BLOB!
+
+ $sth = $dbh->prepare( "..." );
+
+would succeed <I>provided no column values were larger than the specified
+value</I>.
+
+If the BLOB data is longer than the value of C<LongReadLen>, then an
+error will occur. However, the DBI provides an additional piece of
+functionality that will automatically truncate the fetched BLOB to the
+size of C<LongReadLen> if it is longer. This does not cause an error to
+occur, but may make your fetched BLOB data useless.
+
+This behaviour is regulated by the C<LongTruncOk> attribute which is
+defaultly set to a false value ( thus making overlong BLOB fetches fail ).
+
+ ### Set BLOB handling such that it's 16Kb and can be truncated
+ $dbh->{LongReadLen} = 16384;
+ $dbh->{LongTruncOk} = 1;
+
+Truncation of BLOB data may not be a big deal in cases where the BLOB
+contains run-length encoded data, but data containing checksums at the end,
+for example, a ZIP file, would be rendered useless.
+
+=head2 5.3 How can I invoke stored procedures with DBI?
+
+The DBI does not define a database-independent way of calling stored procedures.
+
+However, most database that support them also provide a way to call
+them from SQL statements - and the DBI certainly supports that.
+
+So, assuming that you have created a stored procedure within the target
+database, I<eg>, an Oracle database, you can use C<$dbh>->C<do()> to
+immediately execute the procedure. For example,
+
+ $dbh->do( "BEGIN someProcedure; END;" ); # Oracle-specific
+
+You should also be able to C<prepare> and C<execute>, which is
+the recommended way if you'll be calling the procedure often.
+
+=head2 5.4 How can I get return values from stored procedures with DBI?
+
+ Contributed by Jeff Urlwin
+
+ $sth = $dbh->prepare( "BEGIN foo(:1, :2, :3); END;" );
+ $sth->bind_param(1, $a);
+ $sth->bind_param_inout(2, \$path, 2000);
+ $sth->bind_param_inout(3, \$success, 2000);
+ $sth->execute;
+
+Remember to perform error checking, though! ( Or use the C<RaiseError>
+attribute ).
+
+=head2 5.5 How can I create or drop a database with DBI?
+
+Database creation and deletion are concepts that are entirely too abstract
+to be adequately supported by DBI. For example, Oracle does not support the
+concept of dropping a database at all! Also, in Oracle, the database
+I<server> essentially I<is> the database, whereas in mSQL, the
+server process runs happily without any databases created in it. The
+problem is too disparate to attack in a worthwhile way.
+
+Some drivers, therefore, support database creation and deletion through
+the private C<func()> methods. You should check the documentation for
+the drivers you are using to see if they support this mechanism.
+
+=head2 5.6 How can I C<commit> or C<rollback> a statement with DBI?
+
+See the C<commit()> and C<rollback()> methods in the DBI Specification.
+
+Chapter 6 of "Programming the Perl DBI" discusses transaction handling within
+the context of DBI in more detail.
+
+=head2 5.7 How are C<NULL> values handled by DBI?
+
+C<NULL> values in DBI are specified to be treated as the value C<undef>.
+C<NULL>s can be inserted into databases as C<NULL>, for example:
+
+ $rv = $dbh->do( "INSERT INTO table VALUES( NULL )" );
+
+but when queried back, the C<NULL>s should be tested against C<undef>.
+This is standard across all drivers.
+
+=head2 5.8 What are these C<func()> methods all about?
+
+The C<func()> method is defined within DBI as being an entry point
+for database-specific functionality, I<eg>, the ability to create or
+drop databases. Invoking these driver-specific methods is simple, for example,
+to invoke a C<createDatabase> method that has one argument, we would
+write:
+
+ $rv =$dbh->func( 'argument', 'createDatabase' );
+
+Software developers should note that the C<func()> methods are
+non-portable between databases.
+
+=head2 5.9 Is DBI Year 2000 Compliant?
+
+DBI has no knowledge of understanding of what dates are. Therefore, DBI
+itself does not have a Year 2000 problem. Individual drivers may use date
+handling code internally and therefore be potentially susceptible to the
+Year 2000 problem, but this is unlikely.
+
+You may also wish to read the ``Does Perl have a Year 2000 problem?'' section
+of the Perl FAQ at:
+
+ http://www.perl.com/CPAN/doc/FAQs/FAQ/PerlFAQ.html
+
+=head1 Support and Training
+
+The Perl5 Database Interface is I<FREE> software. IT COMES WITHOUT WARRANTY
+OF ANY KIND. See the DBI README for more details.
+
+However, some organizations are providing either technical support or
+training programs on DBI. The present author has no knowledge as
+to the quality of these services. The links are included for reference
+purposes only and should not be regarded as recommendations in any way.
+I<Caveat emptor>.
+
+=head2 Commercial Support
+
+=over 4
+
+=item The Perl Clinic
+
+The Perl Clinic provides commercial support for I<Perl> and Perl
+related problems, including the I<DBI> and its drivers. Support is
+provided by the company with whom Tim Bunce, author of I<DBI> and
+I<DBD::Oracle>, works and ActiveState. For more information on their
+services, please see:
+
+ http://www.perlclinic.com
+
+=back
+
+=head2 Training
+
+=over 4
+
+=item Westlake Solutions
+
+A hands-on class for experienced Perl CGI developers that teaches
+how to write database-connected CGI scripts using Perl and DBI.pm. This
+course, along with four other courses on CGI scripting with Perl, is
+taught in Washington, DC; Arlington, Virginia; and on-site worldwide upon
+request.
+
+See:
+
+ http://www.westlake.com/training
+
+for more details.
+
+=back
+
+=head1 Other References
+
+In this section, we present some miscellaneous WWW links that may be of
+some interest to DBI users. These are not verified and may result in
+unknown sites or missing documents.
+
+ http://www-ccs.cs.umass.edu/db.html
+ http://www.odmg.org/odmg93/updates_dbarry.html
+ http://www.jcc.com/sql_stnd.html
+
+=head1 AUTHOR
+
+Alligator Descartes.
+Portions are Copyright their original stated authors.
+
+=head1 COPYRIGHT
+
+This document is Copyright (c)1994-2000 Alligator Descartes, with portions
+Copyright (c)1994-2000 their original authors. This module is released under
+the 'Artistic' license which you can find in the perl distribution.
+
+This document is Copyright (c)1997-2000 Alligator Descartes. All rights reserved.
+Permission to distribute this document, in full or in part, via email,
+Usenet, ftp archives or http is granted providing that no charges are involved,
+reasonable attempt is made to use the most current version and all credits
+and copyright notices are retained ( the I<AUTHOR> and I<COPYRIGHT> sections ).
+Requests for other distribution rights, including incorporation into
+commercial products, such as books, magazine articles or CD-ROMs should be
+made to Alligator Descartes.
+
+=for html
+<!--#include virtual="/technology/perl/DBI/templatebottom.html" -->
+</BODY>
+</HTML>
diff --git a/lib/DBI/Gofer/Execute.pm b/lib/DBI/Gofer/Execute.pm
new file mode 100644
index 0000000..7d75df2
--- /dev/null
+++ b/lib/DBI/Gofer/Execute.pm
@@ -0,0 +1,900 @@
+package DBI::Gofer::Execute;
+
+# $Id: Execute.pm 14282 2010-07-26 00:12:54Z theory $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use Carp;
+
+use DBI qw(dbi_time);
+use DBI::Gofer::Request;
+use DBI::Gofer::Response;
+
+use base qw(DBI::Util::_accessor);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 14282 $ =~ /(\d+)/o);
+
+our @all_dbh_methods = sort map { keys %$_ } $DBI::DBI_methods{db}, $DBI::DBI_methods{common};
+our %all_dbh_methods = map { $_ => (DBD::_::db->can($_)||undef) } @all_dbh_methods;
+
+our $local_log = $ENV{DBI_GOFER_LOCAL_LOG}; # do extra logging to stderr
+
+our $current_dbh; # the dbh we're using for this request
+
+
+# set trace for server-side gofer
+# Could use DBI_TRACE env var when it's an unrelated separate process
+# but using DBI_GOFER_TRACE makes testing easier for subprocesses (eg stream)
+DBI->trace(split /=/, $ENV{DBI_GOFER_TRACE}, 2) if $ENV{DBI_GOFER_TRACE};
+
+
+# define valid configuration attributes (args to new())
+# the values here indicate the basic type of values allowed
+my %configuration_attributes = (
+ gofer_execute_class => 1,
+ default_connect_dsn => 1,
+ forced_connect_dsn => 1,
+ default_connect_attributes => {},
+ forced_connect_attributes => {},
+ track_recent => 1,
+ check_request_sub => sub {},
+ check_response_sub => sub {},
+ forced_single_resultset => 1,
+ max_cached_dbh_per_drh => 1,
+ max_cached_sth_per_dbh => 1,
+ forced_response_attributes => {},
+ forced_gofer_random => 1,
+ stats => {},
+);
+
+__PACKAGE__->mk_accessors(
+ keys %configuration_attributes
+);
+
+
+
+sub new {
+ my ($self, $args) = @_;
+ $args->{default_connect_attributes} ||= {};
+ $args->{forced_connect_attributes} ||= {};
+ $args->{max_cached_sth_per_dbh} ||= 1000;
+ $args->{stats} ||= {};
+ return $self->SUPER::new($args);
+}
+
+
+sub valid_configuration_attributes {
+ my $self = shift;
+ return { %configuration_attributes };
+}
+
+
+my %extra_attr = (
+ # Only referenced if the driver doesn't support private_attribute_info method.
+ # What driver-specific attributes should be returned for the driver being used?
+ # keyed by $dbh->{Driver}{Name}
+ # XXX for sth should split into attr specific to resultsets (where NUM_OF_FIELDS > 0) and others
+ # which would reduce processing/traffic for non-select statements
+ mysql => {
+ dbh => [qw(
+ mysql_errno mysql_error mysql_hostinfo mysql_info mysql_insertid
+ mysql_protoinfo mysql_serverinfo mysql_stat mysql_thread_id
+ )],
+ sth => [qw(
+ mysql_is_blob mysql_is_key mysql_is_num mysql_is_pri_key mysql_is_auto_increment
+ mysql_length mysql_max_length mysql_table mysql_type mysql_type_name mysql_insertid
+ )],
+ # XXX this dbh_after_sth stuff is a temporary, but important, hack.
+ # should be done via hash instead of arrays where the hash value contains
+ # flags that can indicate which attributes need to be handled in this way
+ dbh_after_sth => [qw(
+ mysql_insertid
+ )],
+ },
+ Pg => {
+ dbh => [qw(
+ pg_protocol pg_lib_version pg_server_version
+ pg_db pg_host pg_port pg_default_port
+ pg_options pg_pid
+ )],
+ sth => [qw(
+ pg_size pg_type pg_oid_status pg_cmd_status
+ )],
+ },
+ Sybase => {
+ dbh => [qw(
+ syb_dynamic_supported syb_oc_version syb_server_version syb_server_version_string
+ )],
+ sth => [qw(
+ syb_types syb_proc_status syb_result_type
+ )],
+ },
+ SQLite => {
+ dbh => [qw(
+ sqlite_version
+ )],
+ sth => [qw(
+ )],
+ },
+ ExampleP => {
+ dbh => [qw(
+ examplep_private_dbh_attrib
+ )],
+ sth => [qw(
+ examplep_private_sth_attrib
+ )],
+ dbh_after_sth => [qw(
+ examplep_insertid
+ )],
+ },
+);
+
+
+sub _connect {
+ my ($self, $request) = @_;
+
+ my $stats = $self->{stats};
+
+ # discard CachedKids from time to time
+ if (++$stats->{_requests_served} % 1000 == 0 # XXX config?
+ and my $max_cached_dbh_per_drh = $self->{max_cached_dbh_per_drh}
+ ) {
+ my %drivers = DBI->installed_drivers();
+ while ( my ($driver, $drh) = each %drivers ) {
+ next unless my $CK = $drh->{CachedKids};
+ next unless keys %$CK > $max_cached_dbh_per_drh;
+ next if $driver eq 'Gofer'; # ie transport=null when testing
+ DBI->trace_msg(sprintf "Clearing %d cached dbh from $driver",
+ scalar keys %$CK, $self->{max_cached_dbh_per_drh});
+ $_->{Active} && $_->disconnect for values %$CK;
+ %$CK = ();
+ }
+ }
+
+ # local $ENV{...} can leak, so only do it if required
+ local $ENV{DBI_AUTOPROXY} if $ENV{DBI_AUTOPROXY};
+
+ my ($connect_method, $dsn, $username, $password, $attr) = @{ $request->dbh_connect_call };
+ $connect_method ||= 'connect_cached';
+ $stats->{method_calls_dbh}->{$connect_method}++;
+
+ # delete attributes we don't want to affect the server-side
+ # (Could just do this on client-side and trust the client. DoS?)
+ delete @{$attr}{qw(Profile InactiveDestroy AutoInactiveDestroy HandleError HandleSetErr TraceLevel Taint TaintIn TaintOut)};
+
+ $dsn = $self->forced_connect_dsn || $dsn || $self->default_connect_dsn
+ or die "No forced_connect_dsn, requested dsn, or default_connect_dsn for request";
+
+ my $random = $self->{forced_gofer_random} || $ENV{DBI_GOFER_RANDOM} || '';
+
+ my $connect_attr = {
+
+ # the configured default attributes, if any
+ %{ $self->default_connect_attributes },
+
+ # pass username and password as attributes
+ # then they can be overridden by forced_connect_attributes
+ Username => $username,
+ Password => $password,
+
+ # the requested attributes
+ %$attr,
+
+ # force some attributes the way we'd like them
+ PrintWarn => $local_log,
+ PrintError => $local_log,
+
+ # the configured default attributes, if any
+ %{ $self->forced_connect_attributes },
+
+ # RaiseError must be enabled
+ RaiseError => 1,
+
+ # reset Executed flag (of the cached handle) so we can use it to tell
+ # if errors happened before the main part of the request was executed
+ Executed => 0,
+
+ # ensure this connect_cached doesn't have the same args as the client
+ # because that causes subtle issues if in the same process (ie transport=null)
+ # include pid to avoid problems with forking (ie null transport in mod_perl)
+ # include gofer-random to avoid random behaviour leaking to other handles
+ dbi_go_execute_unique => join("|", __PACKAGE__, $$, $random),
+ };
+
+ # XXX implement our own private connect_cached method? (with rate-limited ping)
+ my $dbh = DBI->$connect_method($dsn, undef, undef, $connect_attr);
+
+ $dbh->{ShowErrorStatement} = 1 if $local_log;
+
+ # XXX should probably just be a Callbacks => arg to connect_cached
+ # with a cache of pre-built callback hooks (memoized, without $self)
+ if (my $random = $self->{forced_gofer_random} || $ENV{DBI_GOFER_RANDOM}) {
+ $self->_install_rand_callbacks($dbh, $random);
+ }
+
+ my $CK = $dbh->{CachedKids};
+ if ($CK && keys %$CK > $self->{max_cached_sth_per_dbh}) {
+ %$CK = (); # clear all statement handles
+ }
+
+ #$dbh->trace(0);
+ $current_dbh = $dbh;
+ return $dbh;
+}
+
+
+sub reset_dbh {
+ my ($self, $dbh) = @_;
+ $dbh->set_err(undef, undef); # clear any error state
+}
+
+
+sub new_response_with_err {
+ my ($self, $rv, $eval_error, $dbh) = @_;
+ # this is the usual way to create a response for both success and failure
+ # capture err+errstr etc and merge in $eval_error ($@)
+
+ my ($err, $errstr, $state) = ($DBI::err, $DBI::errstr, $DBI::state);
+
+ if ($eval_error) {
+ $err ||= $DBI::stderr || 1; # ensure err is true
+ if ($errstr) {
+ $eval_error =~ s/(?: : \s)? \Q$errstr//x if $errstr;
+ chomp $errstr;
+ $errstr .= "; $eval_error";
+ }
+ else {
+ $errstr = $eval_error;
+ }
+ }
+ chomp $errstr if $errstr;
+
+ my $flags;
+ # (XXX if we ever add transaction support then we'll need to take extra
+ # steps because the commit/rollback would reset Executed before we get here)
+ $flags |= GOf_RESPONSE_EXECUTED if $dbh && $dbh->{Executed};
+
+ my $response = DBI::Gofer::Response->new({
+ rv => $rv,
+ err => $err,
+ errstr => $errstr,
+ state => $state,
+ flags => $flags,
+ });
+
+ return $response;
+}
+
+
+sub execute_request {
+ my ($self, $request) = @_;
+ # should never throw an exception
+
+ DBI->trace_msg("-----> execute_request\n");
+
+ my @warnings;
+ local $SIG{__WARN__} = sub {
+ push @warnings, @_;
+ warn @_ if $local_log;
+ };
+
+ my $response = eval {
+
+ if (my $check_request_sub = $self->check_request_sub) {
+ $request = $check_request_sub->($request, $self)
+ or die "check_request_sub failed";
+ }
+
+ my $version = $request->version || 0;
+ die ref($request)." version $version is not supported"
+ if $version < 0.009116 or $version >= 1;
+
+ ($request->is_sth_request)
+ ? $self->execute_sth_request($request)
+ : $self->execute_dbh_request($request);
+ };
+ $response ||= $self->new_response_with_err(undef, $@, $current_dbh);
+
+ if (my $check_response_sub = $self->check_response_sub) {
+ # not protected with an eval so it can choose to throw an exception
+ my $new = $check_response_sub->($response, $self, $request);
+ $response = $new if ref $new;
+ }
+
+ undef $current_dbh;
+
+ $response->warnings(\@warnings) if @warnings;
+ DBI->trace_msg("<----- execute_request\n");
+ return $response;
+}
+
+
+sub execute_dbh_request {
+ my ($self, $request) = @_;
+ my $stats = $self->{stats};
+
+ my $dbh;
+ my $rv_ref = eval {
+ $dbh = $self->_connect($request);
+ my $args = $request->dbh_method_call; # [ wantarray, 'method_name', @args ]
+ my $wantarray = shift @$args;
+ my $meth = shift @$args;
+ $stats->{method_calls_dbh}->{$meth}++;
+ my @rv = ($wantarray)
+ ? $dbh->$meth(@$args)
+ : scalar $dbh->$meth(@$args);
+ \@rv;
+ } || [];
+ my $response = $self->new_response_with_err($rv_ref, $@, $dbh);
+
+ return $response if not $dbh;
+
+ # does this request also want any dbh attributes returned?
+ if (my $dbh_attributes = $request->dbh_attributes) {
+ $response->dbh_attributes( $self->gather_dbh_attributes($dbh, $dbh_attributes) );
+ }
+
+ if ($rv_ref and my $lid_args = $request->dbh_last_insert_id_args) {
+ $stats->{method_calls_dbh}->{last_insert_id}++;
+ my $id = $dbh->last_insert_id( @$lid_args );
+ $response->last_insert_id( $id );
+ }
+
+ if ($rv_ref and UNIVERSAL::isa($rv_ref->[0],'DBI::st')) {
+ # dbh_method_call was probably a metadata method like table_info
+ # that returns a statement handle, so turn the $sth into resultset
+ my $sth = $rv_ref->[0];
+ $response->sth_resultsets( $self->gather_sth_resultsets($sth, $request, $response) );
+ $response->rv("(sth)"); # don't try to return actual sth
+ }
+
+ # we're finished with this dbh for this request
+ $self->reset_dbh($dbh);
+
+ return $response;
+}
+
+
+sub gather_dbh_attributes {
+ my ($self, $dbh, $dbh_attributes) = @_;
+ my @req_attr_names = @$dbh_attributes;
+ if ($req_attr_names[0] eq '*') { # auto include std + private
+ shift @req_attr_names;
+ push @req_attr_names, @{ $self->_std_response_attribute_names($dbh) };
+ }
+ my %dbh_attr_values;
+ @dbh_attr_values{@req_attr_names} = $dbh->FETCH_many(@req_attr_names);
+
+ # XXX piggyback installed_methods onto dbh_attributes for now
+ $dbh_attr_values{dbi_installed_methods} = { DBI->installed_methods };
+
+ # XXX piggyback default_methods onto dbh_attributes for now
+ $dbh_attr_values{dbi_default_methods} = _get_default_methods($dbh);
+
+ return \%dbh_attr_values;
+}
+
+
+sub _std_response_attribute_names {
+ my ($self, $h) = @_;
+ $h = tied(%$h) || $h; # switch to inner handle
+
+ # cache the private_attribute_info data for each handle
+ # XXX might be better to cache it in the executor
+ # as it's unlikely to change
+ # or perhaps at least cache it in the dbh even for sth
+ # as the sth are typically very short lived
+
+ my ($dbh, $h_type, $driver_name, @attr_names);
+
+ if ($dbh = $h->{Database}) { # is an sth
+
+ # does the dbh already have the answer cached?
+ return $dbh->{private_gofer_std_attr_names_sth} if $dbh->{private_gofer_std_attr_names_sth};
+
+ ($h_type, $driver_name) = ('sth', $dbh->{Driver}{Name});
+ push @attr_names, qw(NUM_OF_PARAMS NUM_OF_FIELDS NAME TYPE NULLABLE PRECISION SCALE);
+ }
+ else { # is a dbh
+ return $h->{private_gofer_std_attr_names_dbh} if $h->{private_gofer_std_attr_names_dbh};
+
+ ($h_type, $driver_name, $dbh) = ('dbh', $h->{Driver}{Name}, $h);
+ # explicitly add these because drivers may have different defaults
+ # add Name so the client gets the real Name of the connection
+ push @attr_names, qw(ChopBlanks LongReadLen LongTruncOk ReadOnly Name);
+ }
+
+ if (my $pai = $h->private_attribute_info) {
+ push @attr_names, keys %$pai;
+ }
+ else {
+ push @attr_names, @{ $extra_attr{ $driver_name }{$h_type} || []};
+ }
+ if (my $fra = $self->{forced_response_attributes}) {
+ push @attr_names, @{ $fra->{ $driver_name }{$h_type} || []}
+ }
+ $dbh->trace_msg("_std_response_attribute_names for $driver_name $h_type: @attr_names\n");
+
+ # cache into the dbh even for sth, as the dbh is usually longer lived
+ return $dbh->{"private_gofer_std_attr_names_$h_type"} = \@attr_names;
+}
+
+
+sub execute_sth_request {
+ my ($self, $request) = @_;
+ my $dbh;
+ my $sth;
+ my $last_insert_id;
+ my $stats = $self->{stats};
+
+ my $rv = eval {
+ $dbh = $self->_connect($request);
+
+ my $args = $request->dbh_method_call; # [ wantarray, 'method_name', @args ]
+ shift @$args; # discard wantarray
+ my $meth = shift @$args;
+ $stats->{method_calls_sth}->{$meth}++;
+ $sth = $dbh->$meth(@$args);
+ my $last = '(sth)'; # a true value (don't try to return actual sth)
+
+ # execute methods on the sth, e.g., bind_param & execute
+ if (my $calls = $request->sth_method_calls) {
+ for my $meth_call (@$calls) {
+ my $method = shift @$meth_call;
+ $stats->{method_calls_sth}->{$method}++;
+ $last = $sth->$method(@$meth_call);
+ }
+ }
+
+ if (my $lid_args = $request->dbh_last_insert_id_args) {
+ $stats->{method_calls_sth}->{last_insert_id}++;
+ $last_insert_id = $dbh->last_insert_id( @$lid_args );
+ }
+
+ $last;
+ };
+ my $response = $self->new_response_with_err($rv, $@, $dbh);
+
+ return $response if not $dbh;
+
+ $response->last_insert_id( $last_insert_id )
+ if defined $last_insert_id;
+
+ # even if the eval failed we still want to try to gather attribute values
+ # (XXX would be nice to be able to support streaming of results.
+ # which would reduce memory usage and latency for large results)
+ if ($sth) {
+ $response->sth_resultsets( $self->gather_sth_resultsets($sth, $request, $response) );
+ $sth->finish;
+ }
+
+ # does this request also want any dbh attributes returned?
+ my $dbh_attr_set;
+ if (my $dbh_attributes = $request->dbh_attributes) {
+ $dbh_attr_set = $self->gather_dbh_attributes($dbh, $dbh_attributes);
+ }
+ # XXX needs to be integrated with private_attribute_info() etc
+ if (my $dbh_attr = $extra_attr{$dbh->{Driver}{Name}}{dbh_after_sth}) {
+ @{$dbh_attr_set}{@$dbh_attr} = $dbh->FETCH_many(@$dbh_attr);
+ }
+ $response->dbh_attributes($dbh_attr_set) if $dbh_attr_set && %$dbh_attr_set;
+
+ $self->reset_dbh($dbh);
+
+ return $response;
+}
+
+
+sub gather_sth_resultsets {
+ my ($self, $sth, $request, $response) = @_;
+ my $resultsets = eval {
+
+ my $attr_names = $self->_std_response_attribute_names($sth);
+ my $sth_attr = {};
+ $sth_attr->{$_} = 1 for @$attr_names;
+
+ # let the client add/remove sth atributes
+ if (my $sth_result_attr = $request->sth_result_attr) {
+ $sth_attr->{$_} = $sth_result_attr->{$_}
+ for keys %$sth_result_attr;
+ }
+ my @sth_attr = grep { $sth_attr->{$_} } keys %$sth_attr;
+
+ my $row_count = 0;
+ my $rs_list = [];
+ while (1) {
+ my $rs = $self->fetch_result_set($sth, \@sth_attr);
+ push @$rs_list, $rs;
+ if (my $rows = $rs->{rowset}) {
+ $row_count += @$rows;
+ }
+ last if $self->{forced_single_resultset};
+ last if !($sth->more_results || $sth->{syb_more_results});
+ }
+
+ my $stats = $self->{stats};
+ $stats->{rows_returned_total} += $row_count;
+ $stats->{rows_returned_max} = $row_count
+ if $row_count > ($stats->{rows_returned_max}||0);
+
+ $rs_list;
+ };
+ $response->add_err(1, $@) if $@;
+ return $resultsets;
+}
+
+
+sub fetch_result_set {
+ my ($self, $sth, $sth_attr) = @_;
+ my %meta;
+ eval {
+ @meta{ @$sth_attr } = $sth->FETCH_many(@$sth_attr);
+ # we assume @$sth_attr contains NUM_OF_FIELDS
+ $meta{rowset} = $sth->fetchall_arrayref()
+ if (($meta{NUM_OF_FIELDS}||0) > 0); # is SELECT
+ # the fetchall_arrayref may fail with a 'not executed' kind of error
+ # because gather_sth_resultsets/fetch_result_set are called even if
+ # execute() failed, or even if there was no execute() call at all.
+ # The corresponding error goes into the resultset err, not the top-level
+ # response err, so in most cases this resultset err is never noticed.
+ };
+ if ($@) {
+ chomp $@;
+ $meta{err} = $DBI::err || 1;
+ $meta{errstr} = $DBI::errstr || $@;
+ $meta{state} = $DBI::state;
+ }
+ return \%meta;
+}
+
+
+sub _get_default_methods {
+ my ($dbh) = @_;
+ # returns a ref to a hash of dbh method names for methods which the driver
+ # hasn't overridden i.e., quote(). These don't need to be forwarded via gofer.
+ my $ImplementorClass = $dbh->{ImplementorClass} or die;
+ my %default_methods;
+ for my $method (@all_dbh_methods) {
+ my $dbi_sub = $all_dbh_methods{$method} || 42;
+ my $imp_sub = $ImplementorClass->can($method) || 42;
+ next if $imp_sub != $dbi_sub;
+ #warn("default $method\n");
+ $default_methods{$method} = 1;
+ }
+ return \%default_methods;
+}
+
+
+# XXX would be nice to make this a generic DBI module
+sub _install_rand_callbacks {
+ my ($self, $dbh, $dbi_gofer_random) = @_;
+
+ my $callbacks = $dbh->{Callbacks} || {};
+ my $prev = $dbh->{private_gofer_rand_fail_callbacks} || {};
+
+ # return if we've already setup this handle with callbacks for these specs
+ return if (($callbacks->{_dbi_gofer_random_spec}||'') eq $dbi_gofer_random);
+ #warn "$dbh # $callbacks->{_dbi_gofer_random_spec}";
+ $callbacks->{_dbi_gofer_random_spec} = $dbi_gofer_random;
+
+ my ($fail_percent, $fail_err, $delay_percent, $delay_duration, %spec_part, @spec_note);
+ my @specs = split /,/, $dbi_gofer_random;
+ for my $spec (@specs) {
+ if ($spec =~ m/^fail=(-?[.\d]+)%?$/) {
+ $fail_percent = $1;
+ $spec_part{fail} = $spec;
+ next;
+ }
+ if ($spec =~ m/^err=(-?\d+)$/) {
+ $fail_err = $1;
+ $spec_part{err} = $spec;
+ next;
+ }
+ if ($spec =~ m/^delay([.\d]+)=(-?[.\d]+)%?$/) {
+ $delay_duration = $1;
+ $delay_percent = $2;
+ $spec_part{delay} = $spec;
+ next;
+ }
+ elsif ($spec !~ m/^(\w+|\*)$/) {
+ warn "Ignored DBI_GOFER_RANDOM item '$spec' which isn't a config or a dbh method name";
+ next;
+ }
+
+ my $method = $spec;
+ if ($callbacks->{$method} && $prev->{$method} && $callbacks->{$method} != $prev->{$method}) {
+ warn "Callback for $method method already installed so DBI_GOFER_RANDOM callback not installed\n";
+ next;
+ }
+ unless (defined $fail_percent or defined $delay_percent) {
+ warn "Ignored DBI_GOFER_RANDOM item '$spec' because not preceeded by 'fail=N' and/or 'delayN=N'";
+ next;
+ }
+
+ push @spec_note, join(",", values(%spec_part), $method);
+ $callbacks->{$method} = $self->_mk_rand_callback($method, $fail_percent, $delay_percent, $delay_duration, $fail_err);
+ }
+ warn "DBI_GOFER_RANDOM failures/delays enabled: @spec_note\n"
+ if @spec_note;
+ $dbh->{Callbacks} = $callbacks;
+ $dbh->{private_gofer_rand_fail_callbacks} = $callbacks;
+}
+
+my %_mk_rand_callback_seqn;
+
+sub _mk_rand_callback {
+ my ($self, $method, $fail_percent, $delay_percent, $delay_duration, $fail_err) = @_;
+ my ($fail_modrate, $delay_modrate);
+ $fail_percent ||= 0; $fail_modrate = int(1/(-$fail_percent )*100) if $fail_percent;
+ $delay_percent ||= 0; $delay_modrate = int(1/(-$delay_percent)*100) if $delay_percent;
+ # note that $method may be "*" but that's not recommended or documented or wise
+ return sub {
+ my ($h) = @_;
+ my $seqn = ++$_mk_rand_callback_seqn{$method};
+ my $delay = ($delay_percent > 0) ? rand(100) < $delay_percent :
+ ($delay_percent < 0) ? !($seqn % $delay_modrate): 0;
+ my $fail = ($fail_percent > 0) ? rand(100) < $fail_percent :
+ ($fail_percent < 0) ? !($seqn % $fail_modrate) : 0;
+ #no warnings 'uninitialized';
+ #warn "_mk_rand_callback($fail_percent:$fail_modrate, $delay_percent:$delay_modrate): seqn=$seqn fail=$fail delay=$delay";
+ if ($delay) {
+ my $msg = "DBI_GOFER_RANDOM delaying execution of $method() by $delay_duration seconds\n";
+ # Note what's happening in a trace message. If the delay percent is an even
+ # number then use warn() instead so it's sent back to the client.
+ ($delay_percent % 2 == 1) ? warn($msg) : $h->trace_msg($msg);
+ select undef, undef, undef, $delay_duration; # allows floating point value
+ }
+ if ($fail) {
+ undef $_; # tell DBI to not call the method
+ # the "induced by DBI_GOFER_RANDOM" is special and must be included in errstr
+ # as it's checked for in a few places, such as the gofer retry logic
+ return $h->set_err($fail_err || $DBI::stderr,
+ "fake error from $method method induced by DBI_GOFER_RANDOM env var ($fail_percent%)");
+ }
+ return;
+ }
+}
+
+
+sub update_stats {
+ my ($self,
+ $request, $response,
+ $frozen_request, $frozen_response,
+ $time_received,
+ $store_meta, $other_meta,
+ ) = @_;
+
+ # should always have a response object here
+ carp("No response object provided") unless $request;
+
+ my $stats = $self->{stats};
+ $stats->{frozen_request_max_bytes} = length($frozen_request)
+ if $frozen_request
+ && length($frozen_request) > ($stats->{frozen_request_max_bytes}||0);
+ $stats->{frozen_response_max_bytes} = length($frozen_response)
+ if $frozen_response
+ && length($frozen_response) > ($stats->{frozen_response_max_bytes}||0);
+
+ my $recent;
+ if (my $track_recent = $self->{track_recent}) {
+ $recent = {
+ request => $frozen_request,
+ response => $frozen_response,
+ time_received => $time_received,
+ duration => dbi_time()-$time_received,
+ # for any other info
+ ($store_meta) ? (meta => $store_meta) : (),
+ };
+ $recent->{request_object} = $request
+ if !$frozen_request && $request;
+ $recent->{response_object} = $response
+ if !$frozen_response;
+ my @queues = ($stats->{recent_requests} ||= []);
+ push @queues, ($stats->{recent_errors} ||= [])
+ if !$response or $response->err;
+ for my $queue (@queues) {
+ push @$queue, $recent;
+ shift @$queue if @$queue > $track_recent;
+ }
+ }
+ return $recent;
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DBI::Gofer::Execute - Executes Gofer requests and returns Gofer responses
+
+=head1 SYNOPSIS
+
+ $executor = DBI::Gofer::Execute->new( { ...config... });
+
+ $response = $executor->execute_request( $request );
+
+=head1 DESCRIPTION
+
+Accepts a DBI::Gofer::Request object, executes the requested DBI method calls,
+and returns a DBI::Gofer::Response object.
+
+Any error, including any internal 'fatal' errors are caught and converted into
+a DBI::Gofer::Response object.
+
+This module is usually invoked by a 'server-side' Gofer transport module.
+They usually have names in the "C<DBI::Gofer::Transport::*>" namespace.
+Examples include: L<DBI::Gofer::Transport::stream> and L<DBI::Gofer::Transport::mod_perl>.
+
+=head1 CONFIGURATION
+
+=head2 check_request_sub
+
+If defined, it must be a reference to a subroutine that will 'check' the request.
+It is passed the request object and the executor as its only arguments.
+
+The subroutine can either return the original request object or die with a
+suitable error message (which will be turned into a Gofer response).
+
+It can also construct and return a new request that should be executed instead
+of the original request.
+
+=head2 check_response_sub
+
+If defined, it must be a reference to a subroutine that will 'check' the response.
+It is passed the response object, the executor, and the request object.
+The sub may alter the response object and return undef, or return a new response object.
+
+This mechanism can be used to, for example, terminate the service if specific
+database errors are seen.
+
+=head2 forced_connect_dsn
+
+If set, this DSN is always used instead of the one in the request.
+
+=head2 default_connect_dsn
+
+If set, this DSN is used if C<forced_connect_dsn> is not set and the request does not contain a DSN itself.
+
+=head2 forced_connect_attributes
+
+A reference to a hash of connect() attributes. Individual attributes in
+C<forced_connect_attributes> will take precedence over corresponding attributes
+in the request.
+
+=head2 default_connect_attributes
+
+A reference to a hash of connect() attributes. Individual attributes in the
+request take precedence over corresponding attributes in C<default_connect_attributes>.
+
+=head2 max_cached_dbh_per_drh
+
+If set, the loaded drivers will be checked to ensure they don't have more than
+this number of cached connections. There is no default value. This limit is not
+enforced for every request.
+
+=head2 max_cached_sth_per_dbh
+
+If set, all the cached statement handles will be cleared once the number of
+cached statement handles rises above this limit. The default is 1000.
+
+=head2 forced_single_resultset
+
+If true, then only the first result set will be fetched and returned in the response.
+
+=head2 forced_response_attributes
+
+A reference to a data structure that can specify extra attributes to be returned in responses.
+
+ forced_response_attributes => {
+ DriverName => {
+ dbh => [ qw(dbh_attrib_name) ],
+ sth => [ qw(sth_attrib_name) ],
+ },
+ },
+
+This can be useful in cases where the driver has not implemented the
+private_attribute_info() method and DBI::Gofer::Execute's own fallback list of
+private attributes doesn't include the driver or attributes you need.
+
+=head2 track_recent
+
+If set, specifies the number of recent requests and responses that should be
+kept by the update_stats() method for diagnostics. See L<DBI::Gofer::Transport::mod_perl>.
+
+Note that this setting can significantly increase memory use. Use with caution.
+
+=head2 forced_gofer_random
+
+Enable forced random failures and/or delays for testing. See L</DBI_GOFER_RANDOM> below.
+
+=head1 DRIVER-SPECIFIC ISSUES
+
+Gofer needs to know about any driver-private attributes that should have their
+values sent back to the client.
+
+If the driver doesn't support private_attribute_info() method, and very few do,
+then the module fallsback to using some hard-coded details, if available, for
+the driver being used. Currently hard-coded details are available for the
+mysql, Pg, Sybase, and SQLite drivers.
+
+=head1 TESTING
+
+DBD::Gofer, DBD::Execute and related packages are well tested by executing the
+DBI test suite with DBI_AUTOPROXY configured to route all DBI calls via DBD::Gofer.
+
+Because Gofer includes timeout and 'retry on error' mechanisms there is a need
+for some way to trigger delays and/or errors. This can be done via the
+C<forced_gofer_random> configuration item, or else the DBI_GOFER_RANDOM environment
+variable.
+
+=head2 DBI_GOFER_RANDOM
+
+The value of the C<forced_gofer_random> configuration item (or else the
+DBI_GOFER_RANDOM environment variable) is treated as a series of tokens
+separated by commas.
+
+The tokens can be one of three types:
+
+=over 4
+
+=item fail=R%
+
+Set the current failure rate to R where R is a percentage.
+The value R can be floating point, e.g., C<fail=0.05%>.
+Negative values for R have special meaning, see below.
+
+=item err=N
+
+Sets the current failure err value to N (instead of the DBI's default 'standard
+err value' of 2000000000). This is useful when you want to simulate a
+specific error.
+
+=item delayN=R%
+
+Set the current random delay rate to R where R is a percentage, and set the
+current delay duration to N seconds. The values of R and N can be floating point,
+e.g., C<delay0.5=0.2%>. Negative values for R have special meaning, see below.
+
+If R is an odd number (R % 2 == 1) then a message is logged via warn() which
+will be returned to, and echoed at, the client.
+
+=item methodname
+
+Applies the current fail, err, and delay values to the named method.
+If neither a fail nor delay have been set yet then a warning is generated.
+
+=back
+
+For example:
+
+ $executor = DBI::Gofer::Execute->new( {
+ forced_gofer_random => "fail=0.01%,do,delay60=1%,execute",
+ });
+
+will cause the do() method to fail for 0.01% of calls, and the execute() method to
+fail 0.01% of calls and be delayed by 60 seconds on 1% of calls.
+
+If the percentage value (C<R>) is negative then instead of the failures being
+triggered randomly (via the rand() function) they are triggered via a sequence
+number. In other words "C<fail=-20%>" will mean every fifth call will fail.
+Each method has a distinct sequence number.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
diff --git a/lib/DBI/Gofer/Request.pm b/lib/DBI/Gofer/Request.pm
new file mode 100644
index 0000000..d6464a6
--- /dev/null
+++ b/lib/DBI/Gofer/Request.pm
@@ -0,0 +1,200 @@
+package DBI::Gofer::Request;
+
+# $Id: Request.pm 12536 2009-02-24 22:37:09Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+
+use DBI qw(neat neat_list);
+
+use base qw(DBI::Util::_accessor);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 12536 $ =~ /(\d+)/o);
+
+use constant GOf_REQUEST_IDEMPOTENT => 0x0001;
+use constant GOf_REQUEST_READONLY => 0x0002;
+
+our @EXPORT = qw(GOf_REQUEST_IDEMPOTENT GOf_REQUEST_READONLY);
+
+
+__PACKAGE__->mk_accessors(qw(
+ version
+ flags
+ dbh_connect_call
+ dbh_method_call
+ dbh_attributes
+ dbh_last_insert_id_args
+ sth_method_calls
+ sth_result_attr
+));
+__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
+ meta
+));
+
+
+sub new {
+ my ($self, $args) = @_;
+ $args->{version} ||= $VERSION;
+ return $self->SUPER::new($args);
+}
+
+
+sub reset {
+ my ($self, $flags) = @_;
+ # remove everything except connect and version
+ %$self = (
+ version => $self->{version},
+ dbh_connect_call => $self->{dbh_connect_call},
+ );
+ $self->{flags} = $flags if $flags;
+}
+
+
+sub init_request {
+ my ($self, $method_and_args, $dbh) = @_;
+ $self->reset( $dbh->{ReadOnly} ? GOf_REQUEST_READONLY : 0 );
+ $self->dbh_method_call($method_and_args);
+}
+
+
+sub is_sth_request {
+ return shift->{sth_result_attr};
+}
+
+
+sub statements {
+ my $self = shift;
+ my @statements;
+ if (my $dbh_method_call = $self->dbh_method_call) {
+ my $statement_method_regex = qr/^(?:do|prepare)$/;
+ my (undef, $method, $arg1) = @$dbh_method_call;
+ push @statements, $arg1 if $method && $method =~ $statement_method_regex;
+ }
+ return @statements;
+}
+
+
+sub is_idempotent {
+ my $self = shift;
+
+ if (my $flags = $self->flags) {
+ return 1 if $flags & (GOf_REQUEST_IDEMPOTENT|GOf_REQUEST_READONLY);
+ }
+
+ # else check if all statements are SELECT statement that don't include FOR UPDATE
+ my @statements = $self->statements;
+ # XXX this is very minimal for now, doesn't even allow comments before the select
+ # (and can't ever work for "exec stored_procedure_name" kinds of statements)
+ # XXX it also doesn't deal with multiple statements: prepare("select foo; update bar")
+ return 1 if @statements == grep {
+ m/^ \s* SELECT \b /xmsi && !m/ \b FOR \s+ UPDATE \b /xmsi
+ } @statements;
+
+ return 0;
+}
+
+
+sub summary_as_text {
+ my $self = shift;
+ my ($context) = @_;
+ my @s = '';
+
+ if ($context && %$context) {
+ my @keys = sort keys %$context;
+ push @s, join(", ", map { "$_=>".$context->{$_} } @keys);
+ }
+
+ my ($method, $dsn, $user, $pass, $attr) = @{ $self->dbh_connect_call };
+ $method ||= 'connect_cached';
+ $pass = '***' if defined $pass;
+ my $tmp = '';
+ if ($attr) {
+ $tmp = { %{$attr||{}} }; # copy so we can edit
+ $tmp->{Password} = '***' if exists $tmp->{Password};
+ $tmp = "{ ".neat_list([ %$tmp ])." }";
+ }
+ push @s, sprintf "dbh= $method(%s, %s)", neat_list([$dsn, $user, $pass]), $tmp;
+
+ if (my $flags = $self->flags) {
+ push @s, sprintf "flags: 0x%x", $flags;
+ }
+
+ if (my $dbh_attr = $self->dbh_attributes) {
+ push @s, sprintf "dbh->FETCH: %s", @$dbh_attr
+ if @$dbh_attr;
+ }
+
+ my ($wantarray, $meth, @args) = @{ $self->dbh_method_call };
+ my $args = neat_list(\@args);
+ $args =~ s/\n+/ /g;
+ push @s, sprintf "dbh->%s(%s)", $meth, $args;
+
+ if (my $lii_args = $self->dbh_last_insert_id_args) {
+ push @s, sprintf "dbh->last_insert_id(%s)", neat_list($lii_args);
+ }
+
+ for my $call (@{ $self->sth_method_calls || [] }) {
+ my ($meth, @args) = @$call;
+ ($args = neat_list(\@args)) =~ s/\n+/ /g;
+ push @s, sprintf "sth->%s(%s)", $meth, $args;
+ }
+
+ if (my $sth_attr = $self->sth_result_attr) {
+ push @s, sprintf "sth->FETCH: %s", %$sth_attr
+ if %$sth_attr;
+ }
+
+ return join("\n\t", @s) . "\n";
+}
+
+
+sub outline_as_text { # one-line version of summary_as_text
+ my $self = shift;
+ my @s = '';
+ my $neatlen = 80;
+
+ if (my $flags = $self->flags) {
+ push @s, sprintf "flags=0x%x", $flags;
+ }
+
+ my (undef, $meth, @args) = @{ $self->dbh_method_call };
+ push @s, sprintf "%s(%s)", $meth, neat_list(\@args, $neatlen);
+
+ for my $call (@{ $self->sth_method_calls || [] }) {
+ my ($meth, @args) = @$call;
+ push @s, sprintf "%s(%s)", $meth, neat_list(\@args, $neatlen);
+ }
+
+ my ($method, $dsn) = @{ $self->dbh_connect_call };
+ push @s, "$method($dsn,...)"; # dsn last as it's usually less interesting
+
+ (my $outline = join("; ", @s)) =~ s/\s+/ /g; # squish whitespace, incl newlines
+ return $outline;
+}
+
+1;
+
+=head1 NAME
+
+DBI::Gofer::Request - Encapsulate a request from DBD::Gofer to DBI::Gofer::Execute
+
+=head1 DESCRIPTION
+
+This is an internal class.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
diff --git a/lib/DBI/Gofer/Response.pm b/lib/DBI/Gofer/Response.pm
new file mode 100644
index 0000000..b09782e
--- /dev/null
+++ b/lib/DBI/Gofer/Response.pm
@@ -0,0 +1,218 @@
+package DBI::Gofer::Response;
+
+# $Id: Response.pm 11565 2008-07-22 20:17:33Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+
+use Carp;
+use DBI qw(neat neat_list);
+
+use base qw(DBI::Util::_accessor Exporter);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 11565 $ =~ /(\d+)/o);
+
+use constant GOf_RESPONSE_EXECUTED => 0x0001;
+
+our @EXPORT = qw(GOf_RESPONSE_EXECUTED);
+
+
+__PACKAGE__->mk_accessors(qw(
+ version
+ rv
+ err
+ errstr
+ state
+ flags
+ last_insert_id
+ dbh_attributes
+ sth_resultsets
+ warnings
+));
+__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
+ meta
+));
+
+
+sub new {
+ my ($self, $args) = @_;
+ $args->{version} ||= $VERSION;
+ chomp $args->{errstr} if $args->{errstr};
+ return $self->SUPER::new($args);
+}
+
+
+sub err_errstr_state {
+ my $self = shift;
+ return @{$self}{qw(err errstr state)};
+}
+
+sub executed_flag_set {
+ my $flags = shift->flags
+ or return 0;
+ return $flags & GOf_RESPONSE_EXECUTED;
+}
+
+
+sub add_err {
+ my ($self, $err, $errstr, $state, $trace) = @_;
+
+ # acts like the DBI's set_err method.
+ # this code copied from DBI::PurePerl's set_err method.
+
+ chomp $errstr if $errstr;
+ $state ||= '';
+ carp ref($self)."->add_err($err, $errstr, $state)"
+ if $trace and defined($err) || $errstr;
+
+ my ($r_err, $r_errstr, $r_state) = ($self->{err}, $self->{errstr}, $self->{state});
+
+ if ($r_errstr) {
+ $r_errstr .= sprintf " [err was %s now %s]", $r_err, $err
+ if $r_err && $err && $r_err ne $err;
+ $r_errstr .= sprintf " [state was %s now %s]", $r_state, $state
+ if $r_state and $r_state ne "S1000" && $state && $r_state ne $state;
+ $r_errstr .= "\n$errstr" if $r_errstr ne $errstr;
+ }
+ else {
+ $r_errstr = $errstr;
+ }
+
+ # assign if higher priority: err > "0" > "" > undef
+ my $err_changed;
+ if ($err # new error: so assign
+ or !defined $r_err # no existing warn/info: so assign
+ # new warn ("0" len 1) > info ("" len 0): so assign
+ or defined $err && length($err) > length($r_err)
+ ) {
+ $r_err = $err;
+ ++$err_changed;
+ }
+
+ $r_state = ($state eq "00000") ? "" : $state
+ if $state && $err_changed;
+
+ ($self->{err}, $self->{errstr}, $self->{state}) = ($r_err, $r_errstr, $r_state);
+
+ return undef;
+}
+
+
+sub summary_as_text {
+ my $self = shift;
+ my ($context) = @_;
+
+ my ($rv, $err, $errstr, $state) = ($self->{rv}, $self->{err}, $self->{errstr}, $self->{state});
+
+ my @s = sprintf("\trv=%s", (ref $rv) ? "[".neat_list($rv)."]" : neat($rv));
+ $s[-1] .= sprintf(", err=%s, errstr=%s", $err, neat($errstr))
+ if defined $err;
+ $s[-1] .= sprintf(", flags=0x%x", $self->{flags})
+ if defined $self->{flags};
+
+ push @s, "last_insert_id=%s", $self->last_insert_id
+ if defined $self->last_insert_id;
+
+ if (my $dbh_attr = $self->dbh_attributes) {
+ my @keys = sort keys %$dbh_attr;
+ push @s, sprintf "dbh= { %s }", join(", ", map { "$_=>".neat($dbh_attr->{$_},100) } @keys)
+ if @keys;
+ }
+
+ for my $rs (@{$self->sth_resultsets || []}) {
+ my ($rowset, $err, $errstr, $state)
+ = @{$rs}{qw(rowset err errstr state)};
+ my $summary = "rowset: ";
+ my $NUM_OF_FIELDS = $rs->{NUM_OF_FIELDS} || 0;
+ my $rows = $rowset ? @$rowset : 0;
+ if ($rowset || $NUM_OF_FIELDS > 0) {
+ $summary .= sprintf "%d rows, %d columns", $rows, $NUM_OF_FIELDS;
+ }
+ $summary .= sprintf ", err=%s, errstr=%s", $err, neat($errstr) if defined $err;
+ if ($rows) {
+ my $NAME = $rs->{NAME};
+ # generate
+ my @colinfo = map { "$NAME->[$_]=".neat($rowset->[0][$_], 30) } 0..@{$NAME}-1;
+ $summary .= sprintf " [%s]", join ", ", @colinfo;
+ $summary .= ",..." if $rows > 1;
+ # we can be a little more helpful for Sybase/MSSQL user
+ $summary .= " syb_result_type=$rs->{syb_result_type}"
+ if $rs->{syb_result_type} and $rs->{syb_result_type} != 4040;
+ }
+ push @s, $summary;
+ }
+ for my $w (@{$self->warnings || []}) {
+ chomp $w;
+ push @s, "warning: $w";
+ }
+ if ($context && %$context) {
+ my @keys = sort keys %$context;
+ push @s, join(", ", map { "$_=>".$context->{$_} } @keys);
+ }
+ return join("\n\t", @s). "\n";
+}
+
+
+sub outline_as_text { # one-line version of summary_as_text
+ my $self = shift;
+ my ($context) = @_;
+
+ my ($rv, $err, $errstr, $state) = ($self->{rv}, $self->{err}, $self->{errstr}, $self->{state});
+
+ my $s = sprintf("rv=%s", (ref $rv) ? "[".neat_list($rv)."]" : neat($rv));
+ $s .= sprintf(", err=%s %s", $err, neat($errstr))
+ if defined $err;
+ $s .= sprintf(", flags=0x%x", $self->{flags})
+ if $self->{flags};
+
+ if (my $sth_resultsets = $self->sth_resultsets) {
+ $s .= sprintf(", %d resultsets ", scalar @$sth_resultsets);
+
+ my @rs;
+ for my $rs (@{$self->sth_resultsets || []}) {
+ my $summary = "";
+ my ($rowset, $err, $errstr)
+ = @{$rs}{qw(rowset err errstr)};
+ my $NUM_OF_FIELDS = $rs->{NUM_OF_FIELDS} || 0;
+ my $rows = $rowset ? @$rowset : 0;
+ if ($rowset || $NUM_OF_FIELDS > 0) {
+ $summary .= sprintf "%dr x %dc", $rows, $NUM_OF_FIELDS;
+ }
+ $summary .= sprintf "%serr %s %s", ($summary?", ":""), $err, neat($errstr)
+ if defined $err;
+ push @rs, $summary;
+ }
+ $s .= join "; ", map { "[$_]" } @rs;
+ }
+
+ return $s;
+}
+
+
+1;
+
+=head1 NAME
+
+DBI::Gofer::Response - Encapsulate a response from DBI::Gofer::Execute to DBD::Gofer
+
+=head1 DESCRIPTION
+
+This is an internal class.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBI/Gofer/Serializer/Base.pm b/lib/DBI/Gofer/Serializer/Base.pm
new file mode 100644
index 0000000..53fc7e7
--- /dev/null
+++ b/lib/DBI/Gofer/Serializer/Base.pm
@@ -0,0 +1,64 @@
+package DBI::Gofer::Serializer::Base;
+
+# $Id: Base.pm 9949 2007-09-18 09:38:15Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+=head1 NAME
+
+DBI::Gofer::Serializer::Base - base class for Gofer serialization
+
+=head1 SYNOPSIS
+
+ $serializer = $serializer_class->new();
+
+ $string = $serializer->serialize( $data );
+ ($string, $deserializer_class) = $serializer->serialize( $data );
+
+ $data = $serializer->deserialize( $string );
+
+=head1 DESCRIPTION
+
+DBI::Gofer::Serializer::* classes implement a very minimal subset of the L<Data::Serializer> API.
+
+Gofer serializers are expected to be very fast and are not required to deal
+with anything other than non-blessed references to arrays and hashes, and plain scalars.
+
+=cut
+
+
+use strict;
+use warnings;
+
+use Carp qw(croak);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 9949 $ =~ /(\d+)/o);
+
+
+sub new {
+ my $class = shift;
+ my $deserializer_class = $class->deserializer_class;
+ return bless { deserializer_class => $deserializer_class } => $class;
+}
+
+sub deserializer_class {
+ my $self = shift;
+ my $class = ref($self) || $self;
+ $class =~ s/^DBI::Gofer::Serializer:://;
+ return $class;
+}
+
+sub serialize {
+ my $self = shift;
+ croak ref($self)." has not implemented the serialize method";
+}
+
+sub deserialize {
+ my $self = shift;
+ croak ref($self)." has not implemented the deserialize method";
+}
+
+1;
diff --git a/lib/DBI/Gofer/Serializer/DataDumper.pm b/lib/DBI/Gofer/Serializer/DataDumper.pm
new file mode 100644
index 0000000..c6fc3a1
--- /dev/null
+++ b/lib/DBI/Gofer/Serializer/DataDumper.pm
@@ -0,0 +1,53 @@
+package DBI::Gofer::Serializer::DataDumper;
+
+use strict;
+use warnings;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 9949 $ =~ /(\d+)/o);
+
+# $Id: DataDumper.pm 9949 2007-09-18 09:38:15Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+=head1 NAME
+
+DBI::Gofer::Serializer::DataDumper - Gofer serialization using DataDumper
+
+=head1 SYNOPSIS
+
+ $serializer = DBI::Gofer::Serializer::DataDumper->new();
+
+ $string = $serializer->serialize( $data );
+
+=head1 DESCRIPTION
+
+Uses DataDumper to serialize. Deserialization is not supported.
+The output of this class is only meant for human consumption.
+
+See also L<DBI::Gofer::Serializer::Base>.
+
+=cut
+
+use Data::Dumper;
+
+use base qw(DBI::Gofer::Serializer::Base);
+
+
+sub serialize {
+ my $self = shift;
+ local $Data::Dumper::Indent = 1;
+ local $Data::Dumper::Terse = 1;
+ local $Data::Dumper::Useqq = 0; # enabling this disables xs
+ local $Data::Dumper::Sortkeys = 1;
+ local $Data::Dumper::Quotekeys = 0;
+ local $Data::Dumper::Deparse = 0;
+ local $Data::Dumper::Purity = 0;
+ my $frozen = Data::Dumper::Dumper(shift);
+ return $frozen unless wantarray;
+ return ($frozen, $self->{deserializer_class});
+}
+
+1;
diff --git a/lib/DBI/Gofer/Serializer/Storable.pm b/lib/DBI/Gofer/Serializer/Storable.pm
new file mode 100644
index 0000000..9a571bd
--- /dev/null
+++ b/lib/DBI/Gofer/Serializer/Storable.pm
@@ -0,0 +1,59 @@
+package DBI::Gofer::Serializer::Storable;
+
+use strict;
+use warnings;
+
+use base qw(DBI::Gofer::Serializer::Base);
+
+# $Id: Storable.pm 9949 2007-09-18 09:38:15Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+=head1 NAME
+
+DBI::Gofer::Serializer::Storable - Gofer serialization using Storable
+
+=head1 SYNOPSIS
+
+ $serializer = DBI::Gofer::Serializer::Storable->new();
+
+ $string = $serializer->serialize( $data );
+ ($string, $deserializer_class) = $serializer->serialize( $data );
+
+ $data = $serializer->deserialize( $string );
+
+=head1 DESCRIPTION
+
+Uses Storable::nfreeze() to serialize and Storable::thaw() to deserialize.
+
+The serialize() method sets local $Storable::forgive_me = 1; so it doesn't
+croak if it encounters any data types that can't be serialized, such as code refs.
+
+See also L<DBI::Gofer::Serializer::Base>.
+
+=cut
+
+use Storable qw(nfreeze thaw);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 9949 $ =~ /(\d+)/o);
+
+use base qw(DBI::Gofer::Serializer::Base);
+
+
+sub serialize {
+ my $self = shift;
+ local $Storable::forgive_me = 1; # for CODE refs etc
+ my $frozen = nfreeze(shift);
+ return $frozen unless wantarray;
+ return ($frozen, $self->{deserializer_class});
+}
+
+sub deserialize {
+ my $self = shift;
+ return thaw(shift);
+}
+
+1;
diff --git a/lib/DBI/Gofer/Transport/Base.pm b/lib/DBI/Gofer/Transport/Base.pm
new file mode 100644
index 0000000..b688689
--- /dev/null
+++ b/lib/DBI/Gofer/Transport/Base.pm
@@ -0,0 +1,176 @@
+package DBI::Gofer::Transport::Base;
+
+# $Id: Base.pm 12536 2009-02-24 22:37:09Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use DBI;
+
+use base qw(DBI::Util::_accessor);
+
+use DBI::Gofer::Serializer::Storable;
+use DBI::Gofer::Serializer::DataDumper;
+
+
+our $VERSION = sprintf("0.%06d", q$Revision: 12536 $ =~ /(\d+)/o);
+
+
+__PACKAGE__->mk_accessors(qw(
+ trace
+ keep_meta_frozen
+ serializer_obj
+));
+
+
+# see also $ENV{DBI_GOFER_TRACE} in DBI::Gofer::Execute
+sub _init_trace { (split(/=/,$ENV{DBI_GOFER_TRACE}||0))[0] }
+
+
+sub new {
+ my ($class, $args) = @_;
+ $args->{trace} ||= $class->_init_trace;
+ $args->{serializer_obj} ||= DBI::Gofer::Serializer::Storable->new();
+ my $self = bless {}, $class;
+ $self->$_( $args->{$_} ) for keys %$args;
+ $self->trace_msg("$class->new({ @{[ %$args ]} })\n") if $self->trace;
+ return $self;
+}
+
+my $packet_header_text = "GoFER1:";
+my $packet_header_regex = qr/^GoFER(\d+):/;
+
+
+sub _freeze_data {
+ my ($self, $data, $serializer, $skip_trace) = @_;
+ my $frozen = eval {
+ $self->_dump("freezing $self->{trace} ".ref($data), $data)
+ if !$skip_trace and $self->trace;
+
+ local $data->{meta}; # don't include meta in serialization
+ $serializer ||= $self->{serializer_obj};
+ my ($data, $deserializer_class) = $serializer->serialize($data);
+
+ $packet_header_text . $data;
+ };
+ if ($@) {
+ chomp $@;
+ die "Error freezing ".ref($data)." object: $@";
+ }
+
+ # stash the frozen data into the data structure itself
+ # to make life easy for the client caching code in DBD::Gofer::Transport::Base
+ $data->{meta}{frozen} = $frozen if $self->keep_meta_frozen;
+
+ return $frozen;
+}
+# public aliases used by subclasses
+*freeze_request = \&_freeze_data;
+*freeze_response = \&_freeze_data;
+
+
+sub _thaw_data {
+ my ($self, $frozen_data, $serializer, $skip_trace) = @_;
+ my $data;
+ eval {
+ # check for and extract our gofer header and the info it contains
+ (my $frozen = $frozen_data) =~ s/$packet_header_regex//o
+ or die "does not have gofer header\n";
+ my ($t_version) = $1;
+ $serializer ||= $self->{serializer_obj};
+ $data = $serializer->deserialize($frozen);
+ die ref($serializer)."->deserialize didn't return a reference"
+ unless ref $data;
+ $data->{_transport}{version} = $t_version;
+
+ $data->{meta}{frozen} = $frozen_data if $self->keep_meta_frozen;
+ };
+ if ($@) {
+ chomp(my $err = $@);
+ # remove extra noise from Storable
+ $err =~ s{ at \S+?/Storable.pm \(autosplit into \S+?/Storable/thaw.al\) line \d+(, \S+ line \d+)?}{};
+ my $msg = sprintf "Error thawing: %s (data=%s)", $err, DBI::neat($frozen_data,50);
+ Carp::cluck("$msg, pid $$ stack trace follows:"); # XXX if $self->trace;
+ die $msg;
+ }
+ $self->_dump("thawing $self->{trace} ".ref($data), $data)
+ if !$skip_trace and $self->trace;
+
+ return $data;
+}
+# public aliases used by subclasses
+*thaw_request = \&_thaw_data;
+*thaw_response = \&_thaw_data;
+
+
+# this should probably live in the request and response classes
+# and the tace level passed in
+sub _dump {
+ my ($self, $label, $data) = @_;
+
+ # don't dump the binary
+ local $data->{meta}{frozen} if $data->{meta} && $data->{meta}{frozen};
+
+ my $trace_level = $self->trace;
+ my $summary;
+ if ($trace_level >= 4) {
+ require Data::Dumper;
+ local $Data::Dumper::Indent = 1;
+ local $Data::Dumper::Terse = 1;
+ local $Data::Dumper::Useqq = 0;
+ local $Data::Dumper::Sortkeys = 1;
+ local $Data::Dumper::Quotekeys = 0;
+ local $Data::Dumper::Deparse = 0;
+ local $Data::Dumper::Purity = 0;
+ $summary = Data::Dumper::Dumper($data);
+ }
+ elsif ($trace_level >= 2) {
+ $summary = eval { $data->summary_as_text } || $@ || "no summary available\n";
+ }
+ else {
+ $summary = eval { $data->outline_as_text."\n" } || $@ || "no summary available\n";
+ }
+ $self->trace_msg("$label: $summary");
+}
+
+
+sub trace_msg {
+ my ($self, $msg, $min_level) = @_;
+ $min_level = 1 unless defined $min_level;
+ # transport trace level can override DBI's trace level
+ $min_level = 0 if $self->trace >= $min_level;
+ return DBI->trace_msg("gofer ".$msg, $min_level);
+}
+
+1;
+
+=head1 NAME
+
+DBI::Gofer::Transport::Base - Base class for Gofer transports
+
+=head1 DESCRIPTION
+
+This is the base class for server-side Gofer transports.
+
+It's also the base class for the client-side base class L<DBD::Gofer::Transport::Base>.
+
+This is an internal class.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBI/Gofer/Transport/pipeone.pm b/lib/DBI/Gofer/Transport/pipeone.pm
new file mode 100644
index 0000000..d79c2eb
--- /dev/null
+++ b/lib/DBI/Gofer/Transport/pipeone.pm
@@ -0,0 +1,61 @@
+package DBI::Gofer::Transport::pipeone;
+
+# $Id: pipeone.pm 12536 2009-02-24 22:37:09Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use DBI::Gofer::Execute;
+
+use base qw(DBI::Gofer::Transport::Base Exporter);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 12536 $ =~ /(\d+)/o);
+
+our @EXPORT = qw(run_one_stdio);
+
+my $executor = DBI::Gofer::Execute->new();
+
+sub run_one_stdio {
+
+ my $transport = DBI::Gofer::Transport::pipeone->new();
+
+ my $frozen_request = do { local $/; <STDIN> };
+
+ my $response = $executor->execute_request( $transport->thaw_request($frozen_request) );
+
+ my $frozen_response = $transport->freeze_response($response);
+
+ print $frozen_response;
+
+ # no point calling $executor->update_stats(...) for pipeONE
+}
+
+1;
+__END__
+
+=head1 NAME
+
+DBI::Gofer::Transport::pipeone - DBD::Gofer server-side transport for pipeone
+
+=head1 SYNOPSIS
+
+See L<DBD::Gofer::Transport::pipeone>.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
+
diff --git a/lib/DBI/Gofer/Transport/stream.pm b/lib/DBI/Gofer/Transport/stream.pm
new file mode 100644
index 0000000..49de550
--- /dev/null
+++ b/lib/DBI/Gofer/Transport/stream.pm
@@ -0,0 +1,76 @@
+package DBI::Gofer::Transport::stream;
+
+# $Id: stream.pm 12536 2009-02-24 22:37:09Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+use DBI qw(dbi_time);
+use DBI::Gofer::Execute;
+
+use base qw(DBI::Gofer::Transport::pipeone Exporter);
+
+our $VERSION = sprintf("0.%06d", q$Revision: 12536 $ =~ /(\d+)/o);
+
+our @EXPORT = qw(run_stdio_hex);
+
+my $executor = DBI::Gofer::Execute->new();
+
+sub run_stdio_hex {
+
+ my $transport = DBI::Gofer::Transport::stream->new();
+ local $| = 1;
+
+ DBI->trace_msg("$0 started (pid $$)\n");
+
+ local $\; # OUTPUT_RECORD_SEPARATOR
+ local $/ = "\012"; # INPUT_RECORD_SEPARATOR
+ while ( defined( my $encoded_request = <STDIN> ) ) {
+ my $time_received = dbi_time();
+ $encoded_request =~ s/\015?\012$//;
+
+ my $frozen_request = pack "H*", $encoded_request;
+ my $request = $transport->thaw_request( $frozen_request );
+
+ my $response = $executor->execute_request( $request );
+
+ my $frozen_response = $transport->freeze_response($response);
+ my $encoded_response = unpack "H*", $frozen_response;
+
+ print $encoded_response, "\015\012"; # autoflushed due to $|=1
+
+ # there's no way to access the stats currently
+ # so this just serves as a basic test and illustration of update_stats()
+ $executor->update_stats($request, $response, $frozen_request, $frozen_response, $time_received, 1);
+ }
+ DBI->trace_msg("$0 ending (pid $$)\n");
+}
+
+1;
+__END__
+
+=head1 NAME
+
+DBI::Gofer::Transport::stream - DBD::Gofer server-side transport for stream
+
+=head1 SYNOPSIS
+
+See L<DBD::Gofer::Transport::stream>.
+
+=head1 AUTHOR
+
+Tim Bunce, L<http://www.tim.bunce.name>
+
+=head1 LICENCE AND COPYRIGHT
+
+Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
+
+This module is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself. See L<perlartistic>.
+
+=cut
diff --git a/lib/DBI/Profile.pm b/lib/DBI/Profile.pm
new file mode 100644
index 0000000..a468c05
--- /dev/null
+++ b/lib/DBI/Profile.pm
@@ -0,0 +1,949 @@
+package DBI::Profile;
+
+=head1 NAME
+
+DBI::Profile - Performance profiling and benchmarking for the DBI
+
+=head1 SYNOPSIS
+
+The easiest way to enable DBI profiling is to set the DBI_PROFILE
+environment variable to 2 and then run your code as usual:
+
+ DBI_PROFILE=2 prog.pl
+
+This will profile your program and then output a textual summary
+grouped by query when the program exits. You can also enable profiling by
+setting the Profile attribute of any DBI handle:
+
+ $dbh->{Profile} = 2;
+
+Then the summary will be printed when the handle is destroyed.
+
+Many other values apart from are possible - see L<"ENABLING A PROFILE"> below.
+
+=head1 DESCRIPTION
+
+The DBI::Profile module provides a simple interface to collect and
+report performance and benchmarking data from the DBI.
+
+For a more elaborate interface, suitable for larger programs, see
+L<DBI::ProfileDumper|DBI::ProfileDumper> and L<dbiprof|dbiprof>.
+For Apache/mod_perl applications see
+L<DBI::ProfileDumper::Apache|DBI::ProfileDumper::Apache>.
+
+=head1 OVERVIEW
+
+Performance data collection for the DBI is built around several
+concepts which are important to understand clearly.
+
+=over 4
+
+=item Method Dispatch
+
+Every method call on a DBI handle passes through a single 'dispatch'
+function which manages all the common aspects of DBI method calls,
+such as handling the RaiseError attribute.
+
+=item Data Collection
+
+If profiling is enabled for a handle then the dispatch code takes
+a high-resolution timestamp soon after it is entered. Then, after
+calling the appropriate method and just before returning, it takes
+another high-resolution timestamp and calls a function to record
+the information. That function is passed the two timestamps
+plus the DBI handle and the name of the method that was called.
+That data about a single DBI method call is called a I<profile sample>.
+
+=item Data Filtering
+
+If the method call was invoked by the DBI or by a driver then the call is
+ignored for profiling because the time spent will be accounted for by the
+original 'outermost' call for your code.
+
+For example, the calls that the selectrow_arrayref() method makes
+to prepare() and execute() etc. are not counted individually
+because the time spent in those methods is going to be allocated
+to the selectrow_arrayref() method when it returns. If this was not
+done then it would be very easy to double count time spent inside
+the DBI.
+
+=item Data Storage Tree
+
+The profile data is accumulated as 'leaves on a tree'. The 'path' through the
+branches of the tree to a particular leaf is determined dynamically for each sample.
+This is a key feature of DBI profiling.
+
+For each profiled method call the DBI walks along the Path and uses each value
+in the Path to step into and grow the Data tree.
+
+For example, if the Path is
+
+ [ 'foo', 'bar', 'baz' ]
+
+then the new profile sample data will be I<merged> into the tree at
+
+ $h->{Profile}->{Data}->{foo}->{bar}->{baz}
+
+But it's not very useful to merge all the call data into one leaf node (except
+to get an overall 'time spent inside the DBI' total). It's more common to want
+the Path to include dynamic values such as the current statement text and/or
+the name of the method called to show what the time spent inside the DBI was for.
+
+The Path can contain some 'magic cookie' values that are automatically replaced
+by corresponding dynamic values when they're used. These magic cookies always
+start with a punctuation character.
+
+For example a value of 'C<!MethodName>' in the Path causes the corresponding
+entry in the Data to be the name of the method that was called.
+For example, if the Path was:
+
+ [ 'foo', '!MethodName', 'bar' ]
+
+and the selectall_arrayref() method was called, then the profile sample data
+for that call will be merged into the tree at:
+
+ $h->{Profile}->{Data}->{foo}->{selectall_arrayref}->{bar}
+
+=item Profile Data
+
+Profile data is stored at the 'leaves' of the tree as references
+to an array of numeric values. For example:
+
+ [
+ 106, # 0: count of samples at this node
+ 0.0312958955764771, # 1: total duration
+ 0.000490069389343262, # 2: first duration
+ 0.000176072120666504, # 3: shortest duration
+ 0.00140702724456787, # 4: longest duration
+ 1023115819.83019, # 5: time of first sample
+ 1023115819.86576, # 6: time of last sample
+ ]
+
+After the first sample, later samples always update elements 0, 1, and 6, and
+may update 3 or 4 depending on the duration of the sampled call.
+
+=back
+
+=head1 ENABLING A PROFILE
+
+Profiling is enabled for a handle by assigning to the Profile
+attribute. For example:
+
+ $h->{Profile} = DBI::Profile->new();
+
+The Profile attribute holds a blessed reference to a hash object
+that contains the profile data and attributes relating to it.
+
+The class the Profile object is blessed into is expected to
+provide at least a DESTROY method which will dump the profile data
+to the DBI trace file handle (STDERR by default).
+
+All these examples have the same effect as each other:
+
+ $h->{Profile} = 0;
+ $h->{Profile} = "/DBI::Profile";
+ $h->{Profile} = DBI::Profile->new();
+ $h->{Profile} = {};
+ $h->{Profile} = { Path => [] };
+
+Similarly, these examples have the same effect as each other:
+
+ $h->{Profile} = 6;
+ $h->{Profile} = "6/DBI::Profile";
+ $h->{Profile} = "!Statement:!MethodName/DBI::Profile";
+ $h->{Profile} = { Path => [ '!Statement', '!MethodName' ] };
+
+If a non-blessed hash reference is given then the DBI::Profile
+module is automatically C<require>'d and the reference is blessed
+into that class.
+
+If a string is given then it is processed like this:
+
+ ($path, $module, $args) = split /\//, $string, 3
+
+ @path = split /:/, $path
+ @args = split /:/, $args
+
+ eval "require $module" if $module
+ $module ||= "DBI::Profile"
+
+ $module->new( Path => \@Path, @args )
+
+So the first value is used to select the Path to be used (see below).
+The second value, if present, is used as the name of a module which
+will be loaded and it's C<new> method called. If not present it
+defaults to DBI::Profile. Any other values are passed as arguments
+to the C<new> method. For example: "C<2/DBIx::OtherProfile/Foo:42>".
+
+Numbers can be used as a shorthand way to enable common Path values.
+The simplest way to explain how the values are interpreted is to show the code:
+
+ push @Path, "DBI" if $path_elem & 0x01;
+ push @Path, "!Statement" if $path_elem & 0x02;
+ push @Path, "!MethodName" if $path_elem & 0x04;
+ push @Path, "!MethodClass" if $path_elem & 0x08;
+ push @Path, "!Caller2" if $path_elem & 0x10;
+
+So "2" is the same as "!Statement" and "6" (2+4) is the same as
+"!Statement:!Method". Those are the two most commonly used values. Using a
+negative number will reverse the path. Thus "-6" will group by method name then
+statement.
+
+The splitting and parsing of string values assigned to the Profile
+attribute may seem a little odd, but there's a good reason for it.
+Remember that attributes can be embedded in the Data Source Name
+string which can be passed in to a script as a parameter. For
+example:
+
+ dbi:DriverName(Profile=>2):dbname
+ dbi:DriverName(Profile=>{Username}:!Statement/MyProfiler/Foo:42):dbname
+
+And also, if the C<DBI_PROFILE> environment variable is set then
+The DBI arranges for every driver handle to share the same profile
+object. When perl exits a single profile summary will be generated
+that reflects (as nearly as practical) the total use of the DBI by
+the application.
+
+
+=head1 THE PROFILE OBJECT
+
+The DBI core expects the Profile attribute value to be a hash
+reference and if the following values don't exist it will create
+them as needed:
+
+=head2 Data
+
+A reference to a hash containing the collected profile data.
+
+=head2 Path
+
+The Path value is a reference to an array. Each element controls the
+value to use at the corresponding level of the profile Data tree.
+
+If the value of Path is anything other than an array reference,
+it is treated as if it was:
+
+ [ '!Statement' ]
+
+The elements of Path array can be one of the following types:
+
+=head3 Special Constant
+
+B<!Statement>
+
+Use the current Statement text. Typically that's the value of the Statement
+attribute for the handle the method was called with. Some methods, like
+commit() and rollback(), are unrelated to a particular statement. For those
+methods !Statement records an empty string.
+
+For statement handles this is always simply the string that was
+given to prepare() when the handle was created. For database handles
+this is the statement that was last prepared or executed on that
+database handle. That can lead to a little 'fuzzyness' because, for
+example, calls to the quote() method to build a new statement will
+typically be associated with the previous statement. In practice
+this isn't a significant issue and the dynamic Path mechanism can
+be used to setup your own rules.
+
+B<!MethodName>
+
+Use the name of the DBI method that the profile sample relates to.
+
+B<!MethodClass>
+
+Use the fully qualified name of the DBI method, including
+the package, that the profile sample relates to. This shows you
+where the method was implemented. For example:
+
+ 'DBD::_::db::selectrow_arrayref' =>
+ 0.022902s
+ 'DBD::mysql::db::selectrow_arrayref' =>
+ 2.244521s / 99 = 0.022445s avg (first 0.022813s, min 0.022051s, max 0.028932s)
+
+The "DBD::_::db::selectrow_arrayref" shows that the driver has
+inherited the selectrow_arrayref method provided by the DBI.
+
+But you'll note that there is only one call to
+DBD::_::db::selectrow_arrayref but another 99 to
+DBD::mysql::db::selectrow_arrayref. Currently the first
+call doesn't record the true location. That may change.
+
+B<!Caller>
+
+Use a string showing the filename and line number of the code calling the method.
+
+B<!Caller2>
+
+Use a string showing the filename and line number of the code calling the
+method, as for !Caller, but also include filename and line number of the code
+that called that. Calls from DBI:: and DBD:: packages are skipped.
+
+B<!File>
+
+Same as !Caller above except that only the filename is included, not the line number.
+
+B<!File2>
+
+Same as !Caller2 above except that only the filenames are included, not the line number.
+
+B<!Time>
+
+Use the current value of time(). Rarely used. See the more useful C<!Time~N> below.
+
+B<!Time~N>
+
+Where C<N> is an integer. Use the current value of time() but with reduced precision.
+The value used is determined in this way:
+
+ int( time() / N ) * N
+
+This is a useful way to segregate a profile into time slots. For example:
+
+ [ '!Time~60', '!Statement' ]
+
+=head3 Code Reference
+
+The subroutine is passed the handle it was called on and the DBI method name.
+The current Statement is in $_. The statement string should not be modified,
+so most subs start with C<local $_ = $_;>.
+
+The list of values it returns is used at that point in the Profile Path.
+
+The sub can 'veto' (reject) a profile sample by including a reference to undef
+in the returned list. That can be useful when you want to only profile
+statements that match a certain pattern, or only profile certain methods.
+
+=head3 Subroutine Specifier
+
+A Path element that begins with 'C<&>' is treated as the name of a subroutine
+in the DBI::ProfileSubs namespace and replaced with the corresponding code reference.
+
+Currently this only works when the Path is specified by the C<DBI_PROFILE>
+environment variable.
+
+Also, currently, the only subroutine in the DBI::ProfileSubs namespace is
+C<'&norm_std_n3'>. That's a very handy subroutine when profiling code that
+doesn't use placeholders. See L<DBI::ProfileSubs> for more information.
+
+=head3 Attribute Specifier
+
+A string enclosed in braces, such as 'C<{Username}>', specifies that the current
+value of the corresponding database handle attribute should be used at that
+point in the Path.
+
+=head3 Reference to a Scalar
+
+Specifies that the current value of the referenced scalar be used at that point
+in the Path. This provides an efficient way to get 'contextual' values into
+your profile.
+
+=head3 Other Values
+
+Any other values are stringified and used literally.
+
+(References, and values that begin with punctuation characters are reserved.)
+
+
+=head1 REPORTING
+
+=head2 Report Format
+
+The current accumulated profile data can be formatted and output using
+
+ print $h->{Profile}->format;
+
+To discard the profile data and start collecting fresh data
+you can do:
+
+ $h->{Profile}->{Data} = undef;
+
+
+The default results format looks like this:
+
+ DBI::Profile: 0.001015s 42.7% (5 calls) programname @ YYYY-MM-DD HH:MM:SS
+ '' =>
+ 0.000024s / 2 = 0.000012s avg (first 0.000015s, min 0.000009s, max 0.000015s)
+ 'SELECT mode,size,name FROM table' =>
+ 0.000991s / 3 = 0.000330s avg (first 0.000678s, min 0.000009s, max 0.000678s)
+
+Which shows the total time spent inside the DBI, with a count of
+the total number of method calls and the name of the script being
+run, then a formatted version of the profile data tree.
+
+If the results are being formatted when the perl process is exiting
+(which is usually the case when the DBI_PROFILE environment variable
+is used) then the percentage of time the process spent inside the
+DBI is also shown. If the process is not exiting then the percentage is
+calculated using the time between the first and last call to the DBI.
+
+In the example above the paths in the tree are only one level deep and
+use the Statement text as the value (that's the default behaviour).
+
+The merged profile data at the 'leaves' of the tree are presented
+as total time spent, count, average time spent (which is simply total
+time divided by the count), then the time spent on the first call,
+the time spent on the fastest call, and finally the time spent on
+the slowest call.
+
+The 'avg', 'first', 'min' and 'max' times are not particularly
+useful when the profile data path only contains the statement text.
+Here's an extract of a more detailed example using both statement
+text and method name in the path:
+
+ 'SELECT mode,size,name FROM table' =>
+ 'FETCH' =>
+ 0.000076s
+ 'fetchrow_hashref' =>
+ 0.036203s / 108 = 0.000335s avg (first 0.000490s, min 0.000152s, max 0.002786s)
+
+Here you can see the 'avg', 'first', 'min' and 'max' for the
+108 calls to fetchrow_hashref() become rather more interesting.
+Also the data for FETCH just shows a time value because it was only
+called once.
+
+Currently the profile data is output sorted by branch names. That
+may change in a later version so the leaf nodes are sorted by total
+time per leaf node.
+
+
+=head2 Report Destination
+
+The default method of reporting is for the DESTROY method of the
+Profile object to format the results and write them using:
+
+ DBI->trace_msg($results, 0); # see $ON_DESTROY_DUMP below
+
+to write them to the DBI trace() filehandle (which defaults to
+STDERR). To direct the DBI trace filehandle to write to a file
+without enabling tracing the trace() method can be called with a
+trace level of 0. For example:
+
+ DBI->trace(0, $filename);
+
+The same effect can be achieved without changing the code by
+setting the C<DBI_TRACE> environment variable to C<0=filename>.
+
+The $DBI::Profile::ON_DESTROY_DUMP variable holds a code ref
+that's called to perform the output of the formatted results.
+The default value is:
+
+ $ON_DESTROY_DUMP = sub { DBI->trace_msg($results, 0) };
+
+Apart from making it easy to send the dump elsewhere, it can also
+be useful as a simple way to disable dumping results.
+
+=head1 CHILD HANDLES
+
+Child handles inherit a reference to the Profile attribute value
+of their parent. So if profiling is enabled for a database handle
+then by default the statement handles created from it all contribute
+to the same merged profile data tree.
+
+
+=head1 PROFILE OBJECT METHODS
+
+=head2 format
+
+See L</REPORTING>.
+
+=head2 as_node_path_list
+
+ @ary = $dbh->{Profile}->as_node_path_list();
+ @ary = $dbh->{Profile}->as_node_path_list($node, $path);
+
+Returns the collected data ($dbh->{Profile}{Data}) restructured into a list of
+array refs, one for each leaf node in the Data tree. This 'flat' structure is
+often much simpler for applications to work with.
+
+The first element of each array ref is a reference to the leaf node.
+The remaining elements are the 'path' through the data tree to that node.
+
+For example, given a data tree like this:
+
+ {key1a}{key2a}[node1]
+ {key1a}{key2b}[node2]
+ {key1b}{key2a}{key3a}[node3]
+
+The as_node_path_list() method will return this list:
+
+ [ [node1], 'key1a', 'key2a' ]
+ [ [node2], 'key1a', 'key2b' ]
+ [ [node3], 'key1b', 'key2a', 'key3a' ]
+
+The nodes are ordered by key, depth-first.
+
+The $node argument can be used to focus on a sub-tree.
+If not specified it defaults to $dbh->{Profile}{Data}.
+
+The $path argument can be used to specify a list of path elements that will be
+added to each element of the returned list. If not specified it defaults to a a
+ref to an empty array.
+
+=head2 as_text
+
+ @txt = $dbh->{Profile}->as_text();
+ $txt = $dbh->{Profile}->as_text({
+ node => undef,
+ path => [],
+ separator => " > ",
+ format => '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
+ sortsub => sub { ... },
+ );
+
+Returns the collected data ($dbh->{Profile}{Data}) reformatted into a list of formatted strings.
+In scalar context the list is returned as a single concatenated string.
+
+A hashref can be used to pass in arguments, the default values are shown in the example above.
+
+The C<node> and <path> arguments are passed to as_node_path_list().
+
+The C<separator> argument is used to join the elements of the path for each leaf node.
+
+The C<sortsub> argument is used to pass in a ref to a sub that will order the list.
+The subroutine will be passed a reference to the array returned by
+as_node_path_list() and should sort the contents of the array in place.
+The return value from the sub is ignored. For example, to sort the nodes by the
+second level key you could use:
+
+ sortsub => sub { my $ary=shift; @$ary = sort { $a->[2] cmp $b->[2] } @$ary }
+
+The C<format> argument is a C<sprintf> format string that specifies the format
+to use for each leaf node. It uses the explicit format parameter index
+mechanism to specify which of the arguments should appear where in the string.
+The arguments to sprintf are:
+
+ 1: path to node, joined with the separator
+ 2: average duration (total duration/count)
+ (3 thru 9 are currently unused)
+ 10: count
+ 11: total duration
+ 12: first duration
+ 13: smallest duration
+ 14: largest duration
+ 15: time of first call
+ 16: time of first call
+
+=head1 CUSTOM DATA MANIPULATION
+
+Recall that C<< $h->{Profile}->{Data} >> is a reference to the collected data.
+Either to a 'leaf' array (when the Path is empty, i.e., DBI_PROFILE env var is 1),
+or a reference to hash containing values that are either further hash
+references or leaf array references.
+
+Sometimes it's useful to be able to summarise some or all of the collected data.
+The dbi_profile_merge_nodes() function can be used to merge leaf node values.
+
+=head2 dbi_profile_merge_nodes
+
+ use DBI qw(dbi_profile_merge_nodes);
+
+ $time_in_dbi = dbi_profile_merge_nodes(my $totals=[], @$leaves);
+
+Merges profile data node. Given a reference to a destination array, and zero or
+more references to profile data, merges the profile data into the destination array.
+For example:
+
+ $time_in_dbi = dbi_profile_merge_nodes(
+ my $totals=[],
+ [ 10, 0.51, 0.11, 0.01, 0.22, 1023110000, 1023110010 ],
+ [ 15, 0.42, 0.12, 0.02, 0.23, 1023110005, 1023110009 ],
+ );
+
+$totals will then contain
+
+ [ 25, 0.93, 0.11, 0.01, 0.23, 1023110000, 1023110010 ]
+
+and $time_in_dbi will be 0.93;
+
+The second argument need not be just leaf nodes. If given a reference to a hash
+then the hash is recursively searched for for leaf nodes and all those found
+are merged.
+
+For example, to get the time spent 'inside' the DBI during an http request,
+your logging code run at the end of the request (i.e. mod_perl LogHandler)
+could use:
+
+ my $time_in_dbi = 0;
+ if (my $Profile = $dbh->{Profile}) { # if DBI profiling is enabled
+ $time_in_dbi = dbi_profile_merge_nodes(my $total=[], $Profile->{Data});
+ $Profile->{Data} = {}; # reset the profile data
+ }
+
+If profiling has been enabled then $time_in_dbi will hold the time spent inside
+the DBI for that handle (and any other handles that share the same profile data)
+since the last request.
+
+Prior to DBI 1.56 the dbi_profile_merge_nodes() function was called dbi_profile_merge().
+That name still exists as an alias.
+
+=head1 CUSTOM DATA COLLECTION
+
+=head2 Using The Path Attribute
+
+ XXX example to be added later using a selectall_arrayref call
+ XXX nested inside a fetch loop where the first column of the
+ XXX outer loop is bound to the profile Path using
+ XXX bind_column(1, \${ $dbh->{Profile}->{Path}->[0] })
+ XXX so you end up with separate profiles for each loop
+ XXX (patches welcome to add this to the docs :)
+
+=head2 Adding Your Own Samples
+
+The dbi_profile() function can be used to add extra sample data
+into the profile data tree. For example:
+
+ use DBI;
+ use DBI::Profile (dbi_profile dbi_time);
+
+ my $t1 = dbi_time(); # floating point high-resolution time
+
+ ... execute code you want to profile here ...
+
+ my $t2 = dbi_time();
+ dbi_profile($h, $statement, $method, $t1, $t2);
+
+The $h parameter is the handle the extra profile sample should be
+associated with. The $statement parameter is the string to use where
+the Path specifies !Statement. If $statement is undef
+then $h->{Statement} will be used. Similarly $method is the string
+to use if the Path specifies !MethodName. There is no
+default value for $method.
+
+The $h->{Profile}{Path} attribute is processed by dbi_profile() in
+the usual way.
+
+The $h parameter is usually a DBI handle but it can also be a reference to a
+hash, in which case the dbi_profile() acts on each defined value in the hash.
+This is an efficient way to update multiple profiles with a single sample,
+and is used by the L<DashProfiler> module.
+
+=head1 SUBCLASSING
+
+Alternate profile modules must subclass DBI::Profile to help ensure
+they work with future versions of the DBI.
+
+
+=head1 CAVEATS
+
+Applications which generate many different statement strings
+(typically because they don't use placeholders) and profile with
+!Statement in the Path (the default) will consume memory
+in the Profile Data structure for each statement. Use a code ref
+in the Path to return an edited (simplified) form of the statement.
+
+If a method throws an exception itself (not via RaiseError) then
+it won't be counted in the profile.
+
+If a HandleError subroutine throws an exception (rather than returning
+0 and letting RaiseError do it) then the method call won't be counted
+in the profile.
+
+Time spent in DESTROY is added to the profile of the parent handle.
+
+Time spent in DBI->*() methods is not counted. The time spent in
+the driver connect method, $drh->connect(), when it's called by
+DBI->connect is counted if the DBI_PROFILE environment variable is set.
+
+Time spent fetching tied variables, $DBI::errstr, is counted.
+
+Time spent in FETCH for $h->{Profile} is not counted, so getting the profile
+data doesn't alter it.
+
+DBI::PurePerl does not support profiling (though it could in theory).
+
+For asynchronous queries, time spent while the query is running on the
+backend is not counted.
+
+A few platforms don't support the gettimeofday() high resolution
+time function used by the DBI (and available via the dbi_time() function).
+In which case you'll get integer resolution time which is mostly useless.
+
+On Windows platforms the dbi_time() function is limited to millisecond
+resolution. Which isn't sufficiently fine for our needs, but still
+much better than integer resolution. This limited resolution means
+that fast method calls will often register as taking 0 time. And
+timings in general will have much more 'jitter' depending on where
+within the 'current millisecond' the start and and timing was taken.
+
+This documentation could be more clear. Probably needs to be reordered
+to start with several examples and build from there. Trying to
+explain the concepts first seems painful and to lead to just as
+many forward references. (Patches welcome!)
+
+=cut
+
+
+use strict;
+use vars qw(@ISA @EXPORT @EXPORT_OK $VERSION);
+use Exporter ();
+use UNIVERSAL ();
+use Carp;
+
+use DBI qw(dbi_time dbi_profile dbi_profile_merge_nodes dbi_profile_merge);
+
+$VERSION = sprintf("2.%06d", q$Revision: 15064 $ =~ /(\d+)/o);
+
+
+@ISA = qw(Exporter);
+@EXPORT = qw(
+ DBIprofile_Statement
+ DBIprofile_MethodName
+ DBIprofile_MethodClass
+ dbi_profile
+ dbi_profile_merge_nodes
+ dbi_profile_merge
+ dbi_time
+);
+@EXPORT_OK = qw(
+ format_profile_thingy
+);
+
+use constant DBIprofile_Statement => '!Statement';
+use constant DBIprofile_MethodName => '!MethodName';
+use constant DBIprofile_MethodClass => '!MethodClass';
+
+our $ON_DESTROY_DUMP = sub { DBI->trace_msg(shift, 0) };
+our $ON_FLUSH_DUMP = sub { DBI->trace_msg(shift, 0) };
+
+sub new {
+ my $class = shift;
+ my $profile = { @_ };
+ return bless $profile => $class;
+}
+
+
+sub _auto_new {
+ my $class = shift;
+ my ($arg) = @_;
+
+ # This sub is called by DBI internals when a non-hash-ref is
+ # assigned to the Profile attribute. For example
+ # dbi:mysql(RaiseError=>1,Profile=>!Statement:!MethodName/DBIx::MyProfile/arg1:arg2):dbname
+ # This sub works out what to do and returns a suitable hash ref.
+
+ $arg =~ s/^DBI::/2\/DBI::/
+ and carp "Automatically changed old-style DBI::Profile specification to $arg";
+
+ # it's a path/module/k1:v1:k2:v2:... list
+ my ($path, $package, $args) = split /\//, $arg, 3;
+ my @args = (defined $args) ? split(/:/, $args, -1) : ();
+ my @Path;
+
+ for my $element (split /:/, $path) {
+ if (DBI::looks_like_number($element)) {
+ my $reverse = ($element < 0) ? ($element=-$element, 1) : 0;
+ my @p;
+ # a single "DBI" is special-cased in format()
+ push @p, "DBI" if $element & 0x01;
+ push @p, DBIprofile_Statement if $element & 0x02;
+ push @p, DBIprofile_MethodName if $element & 0x04;
+ push @p, DBIprofile_MethodClass if $element & 0x08;
+ push @p, '!Caller2' if $element & 0x10;
+ push @Path, ($reverse ? reverse @p : @p);
+ }
+ elsif ($element =~ m/^&(\w.*)/) {
+ my $name = "DBI::ProfileSubs::$1"; # capture $1 early
+ require DBI::ProfileSubs;
+ my $code = do { no strict; *{$name}{CODE} };
+ if (defined $code) {
+ push @Path, $code;
+ }
+ else {
+ warn "$name: subroutine not found\n";
+ push @Path, $element;
+ }
+ }
+ else {
+ push @Path, $element;
+ }
+ }
+
+ eval "require $package" if $package; # sliently ignores errors
+ $package ||= $class;
+
+ return $package->new(Path => \@Path, @args);
+}
+
+
+sub empty { # empty out profile data
+ my $self = shift;
+ DBI->trace_msg("profile data discarded\n",0) if $self->{Trace};
+ $self->{Data} = undef;
+}
+
+sub filename { # baseclass method, see DBI::ProfileDumper
+ return undef;
+}
+
+sub flush_to_disk { # baseclass method, see DBI::ProfileDumper & DashProfiler::Core
+ my $self = shift;
+ return unless $ON_FLUSH_DUMP;
+ return unless $self->{Data};
+ my $detail = $self->format();
+ $ON_FLUSH_DUMP->($detail) if $detail;
+}
+
+
+sub as_node_path_list {
+ my ($self, $node, $path) = @_;
+ # convert the tree into an array of arrays
+ # from
+ # {key1a}{key2a}[node1]
+ # {key1a}{key2b}[node2]
+ # {key1b}{key2a}{key3a}[node3]
+ # to
+ # [ [node1], 'key1a', 'key2a' ]
+ # [ [node2], 'key1a', 'key2b' ]
+ # [ [node3], 'key1b', 'key2a', 'key3a' ]
+
+ $node ||= $self->{Data} or return;
+ $path ||= [];
+ if (ref $node eq 'HASH') { # recurse
+ $path = [ @$path, undef ];
+ return map {
+ $path->[-1] = $_;
+ ($node->{$_}) ? $self->as_node_path_list($node->{$_}, $path) : ()
+ } sort keys %$node;
+ }
+ return [ $node, @$path ];
+}
+
+
+sub as_text {
+ my ($self, $args_ref) = @_;
+ my $separator = $args_ref->{separator} || " > ";
+ my $format_path_element = $args_ref->{format_path_element}
+ || "%s"; # or e.g., " key%2$d='%s'"
+ my $format = $args_ref->{format}
+ || '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
+
+ my @node_path_list = $self->as_node_path_list(undef, $args_ref->{path});
+
+ $args_ref->{sortsub}->(\@node_path_list) if $args_ref->{sortsub};
+
+ my $eval = "qr/".quotemeta($separator)."/";
+ my $separator_re = eval($eval) || quotemeta($separator);
+ #warn "[$eval] = [$separator_re]";
+ my @text;
+ my @spare_slots = (undef) x 7;
+ for my $node_path (@node_path_list) {
+ my ($node, @path) = @$node_path;
+ my $idx = 0;
+ for (@path) {
+ s/[\r\n]+/ /g;
+ s/$separator_re/ /g;
+ $_ = sprintf $format_path_element, $_, ++$idx;
+ }
+ push @text, sprintf $format,
+ join($separator, @path), # 1=path
+ ($node->[0] ? $node->[1]/$node->[0] : 0), # 2=avg
+ @spare_slots,
+ @$node; # 10=count, 11=dur, 12=first_dur, 13=min, 14=max, 15=first_called, 16=last_called
+ }
+ return @text if wantarray;
+ return join "", @text;
+}
+
+
+sub format {
+ my $self = shift;
+ my $class = ref($self) || $self;
+
+ my $prologue = "$class: ";
+ my $detail = $self->format_profile_thingy(
+ $self->{Data}, 0, " ",
+ my $path = [],
+ my $leaves = [],
+ )."\n";
+
+ if (@$leaves) {
+ dbi_profile_merge_nodes(my $totals=[], @$leaves);
+ my ($count, $time_in_dbi, undef, undef, undef, $t1, $t2) = @$totals;
+ (my $progname = $0) =~ s:.*/::;
+ if ($count) {
+ $prologue .= sprintf "%fs ", $time_in_dbi;
+ my $perl_time = ($DBI::PERL_ENDING) ? time() - $^T : $t2-$t1;
+ $prologue .= sprintf "%.2f%% ", $time_in_dbi/$perl_time*100 if $perl_time;
+ my @lt = localtime(time);
+ my $ts = sprintf "%d-%02d-%02d %02d:%02d:%02d",
+ 1900+$lt[5], $lt[4]+1, @lt[3,2,1,0];
+ $prologue .= sprintf "(%d calls) $progname \@ $ts\n", $count;
+ }
+ if (@$leaves == 1 && ref($self->{Data}) eq 'HASH' && $self->{Data}->{DBI}) {
+ $detail = ""; # hide the "DBI" from DBI_PROFILE=1
+ }
+ }
+ return ($prologue, $detail) if wantarray;
+ return $prologue.$detail;
+}
+
+
+sub format_profile_leaf {
+ my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
+ croak "format_profile_leaf called on non-leaf ($thingy)"
+ unless UNIVERSAL::isa($thingy,'ARRAY');
+
+ push @$leaves, $thingy if $leaves;
+ my ($count, $total_time, $first_time, $min, $max, $first_called, $last_called) = @$thingy;
+ return sprintf "%s%fs\n", ($pad x $depth), $total_time
+ if $count <= 1;
+ return sprintf "%s%fs / %d = %fs avg (first %fs, min %fs, max %fs)\n",
+ ($pad x $depth), $total_time, $count, $count ? $total_time/$count : 0,
+ $first_time, $min, $max;
+}
+
+
+sub format_profile_branch {
+ my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
+ croak "format_profile_branch called on non-branch ($thingy)"
+ unless UNIVERSAL::isa($thingy,'HASH');
+ my @chunk;
+ my @keys = sort keys %$thingy;
+ while ( @keys ) {
+ my $k = shift @keys;
+ my $v = $thingy->{$k};
+ push @$path, $k;
+ push @chunk, sprintf "%s'%s' =>\n%s",
+ ($pad x $depth), $k,
+ $self->format_profile_thingy($v, $depth+1, $pad, $path, $leaves);
+ pop @$path;
+ }
+ return join "", @chunk;
+}
+
+
+sub format_profile_thingy {
+ my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
+ return "undef" if not defined $thingy;
+ return $self->format_profile_leaf( $thingy, $depth, $pad, $path, $leaves)
+ if UNIVERSAL::isa($thingy,'ARRAY');
+ return $self->format_profile_branch($thingy, $depth, $pad, $path, $leaves)
+ if UNIVERSAL::isa($thingy,'HASH');
+ return "$thingy\n";
+}
+
+
+sub on_destroy {
+ my $self = shift;
+ return unless $ON_DESTROY_DUMP;
+ return unless $self->{Data};
+ my $detail = $self->format();
+ $ON_DESTROY_DUMP->($detail) if $detail;
+ $self->{Data} = undef;
+}
+
+sub DESTROY {
+ my $self = shift;
+ local $@;
+ DBI->trace_msg("profile data DESTROY\n",0)
+ if (($self->{Trace}||0) >= 2);
+ eval { $self->on_destroy };
+ if ($@) {
+ chomp $@;
+ my $class = ref($self) || $self;
+ DBI->trace_msg("$class on_destroy failed: $@", 0);
+ }
+}
+
+1;
+
diff --git a/lib/DBI/ProfileData.pm b/lib/DBI/ProfileData.pm
new file mode 100644
index 0000000..b2db087
--- /dev/null
+++ b/lib/DBI/ProfileData.pm
@@ -0,0 +1,737 @@
+package DBI::ProfileData;
+use strict;
+
+=head1 NAME
+
+DBI::ProfileData - manipulate DBI::ProfileDumper data dumps
+
+=head1 SYNOPSIS
+
+The easiest way to use this module is through the dbiprof frontend
+(see L<dbiprof> for details):
+
+ dbiprof --number 15 --sort count
+
+This module can also be used to roll your own profile analysis:
+
+ # load data from dbi.prof
+ $prof = DBI::ProfileData->new(File => "dbi.prof");
+
+ # get a count of the records (unique paths) in the data set
+ $count = $prof->count();
+
+ # sort by longest overall time
+ $prof->sort(field => "longest");
+
+ # sort by longest overall time, least to greatest
+ $prof->sort(field => "longest", reverse => 1);
+
+ # exclude records with key2 eq 'disconnect'
+ $prof->exclude(key2 => 'disconnect');
+
+ # exclude records with key1 matching /^UPDATE/i
+ $prof->exclude(key1 => qr/^UPDATE/i);
+
+ # remove all records except those where key1 matches /^SELECT/i
+ $prof->match(key1 => qr/^SELECT/i);
+
+ # produce a formatted report with the given number of items
+ $report = $prof->report(number => 10);
+
+ # clone the profile data set
+ $clone = $prof->clone();
+
+ # get access to hash of header values
+ $header = $prof->header();
+
+ # get access to sorted array of nodes
+ $nodes = $prof->nodes();
+
+ # format a single node in the same style as report()
+ $text = $prof->format($nodes->[0]);
+
+ # get access to Data hash in DBI::Profile format
+ $Data = $prof->Data();
+
+=head1 DESCRIPTION
+
+This module offers the ability to read, manipulate and format
+DBI::ProfileDumper profile data.
+
+Conceptually, a profile consists of a series of records, or nodes,
+each of each has a set of statistics and set of keys. Each record
+must have a unique set of keys, but there is no requirement that every
+record have the same number of keys.
+
+=head1 METHODS
+
+The following methods are supported by DBI::ProfileData objects.
+
+=cut
+
+
+our $VERSION = sprintf("2.%06d", q$Revision: 10007 $ =~ /(\d+)/o);
+
+use Carp qw(croak);
+use Symbol;
+use Fcntl qw(:flock);
+
+use DBI::Profile qw(dbi_profile_merge);
+
+# some constants for use with node data arrays
+sub COUNT () { 0 };
+sub TOTAL () { 1 };
+sub FIRST () { 2 };
+sub SHORTEST () { 3 };
+sub LONGEST () { 4 };
+sub FIRST_AT () { 5 };
+sub LAST_AT () { 6 };
+sub PATH () { 7 };
+
+
+my $HAS_FLOCK = (defined $ENV{DBI_PROFILE_FLOCK})
+ ? $ENV{DBI_PROFILE_FLOCK}
+ : do { local $@; eval { flock STDOUT, 0; 1 } };
+
+
+=head2 $prof = DBI::ProfileData->new(File => "dbi.prof")
+
+=head2 $prof = DBI::ProfileData->new(File => "dbi.prof", Filter => sub { ... })
+
+=head2 $prof = DBI::ProfileData->new(Files => [ "dbi.prof.1", "dbi.prof.2" ])
+
+Creates a a new DBI::ProfileData object. Takes either a single file
+through the File option or a list of Files in an array ref. If
+multiple files are specified then the header data from the first file
+is used.
+
+=head3 Files
+
+Reference to an array of file names to read.
+
+=head3 File
+
+Name of file to read. Takes precedence over C<Files>.
+
+=head3 DeleteFiles
+
+If true, the files are deleted after being read.
+
+Actually the files are renamed with a C.deleteme> suffix before being read,
+and then, after reading all the files, they're all deleted together.
+
+The files are locked while being read which, combined with the rename, makes it
+safe to 'consume' files that are still being generated by L<DBI::ProfileDumper>.
+
+=head3 Filter
+
+The C<Filter> parameter can be used to supply a code reference that can
+manipulate the profile data as it is being read. This is most useful for
+editing SQL statements so that slightly different statements in the raw data
+will be merged and aggregated in the loaded data. For example:
+
+ Filter => sub {
+ my ($path_ref, $data_ref) = @_;
+ s/foo = '.*?'/foo = '...'/ for @$path_ref;
+ }
+
+Here's an example that performs some normalization on the SQL. It converts all
+numbers to C<N> and all quoted strings to C<S>. It can also convert digits to
+N within names. Finally, it summarizes long "IN (...)" clauses.
+
+It's aggressive and simplistic, but it's often sufficient, and serves as an
+example that you can tailor to suit your own needs:
+
+ Filter => sub {
+ my ($path_ref, $data_ref) = @_;
+ local $_ = $path_ref->[0]; # whichever element contains the SQL Statement
+ s/\b\d+\b/N/g; # 42 -> N
+ s/\b0x[0-9A-Fa-f]+\b/N/g; # 0xFE -> N
+ s/'.*?'/'S'/g; # single quoted strings (doesn't handle escapes)
+ s/".*?"/"S"/g; # double quoted strings (doesn't handle escapes)
+ # convert names like log_20001231 into log_NNNNNNNN, controlled by $opt{n}
+ s/([a-z_]+)(\d{$opt{n},})/$1.('N' x length($2))/ieg if $opt{n};
+ # abbreviate massive "in (...)" statements and similar
+ s!(([NS],){100,})!sprintf("$2,{repeated %d times}",length($1)/2)!eg;
+ }
+
+It's often better to perform this kinds of normalization in the DBI while the
+data is being collected, to avoid too much memory being used by storing profile
+data for many different SQL statement. See L<DBI::Profile>.
+
+=cut
+
+sub new {
+ my $pkg = shift;
+ my $self = {
+ Files => [ "dbi.prof" ],
+ Filter => undef,
+ DeleteFiles => 0,
+ LockFile => $HAS_FLOCK,
+ _header => {},
+ _nodes => [],
+ _node_lookup => {},
+ _sort => 'none',
+ @_
+ };
+ bless $self, $pkg;
+
+ # File (singular) overrides Files (plural)
+ $self->{Files} = [ $self->{File} ] if exists $self->{File};
+
+ $self->_read_files();
+ return $self;
+}
+
+# read files into _header and _nodes
+sub _read_files {
+ my $self = shift;
+ my $files = $self->{Files};
+ my $read_header = 0;
+ my @files_to_delete;
+
+ my $fh = gensym;
+ foreach (@$files) {
+ my $filename = $_;
+
+ if ($self->{DeleteFiles}) {
+ my $newfilename = $filename . ".deleteme";
+ if ($^O eq 'VMS') {
+ # VMS default filesystem can only have one period
+ $newfilename = $filename . 'deleteme';
+ }
+ # will clobber an existing $newfilename
+ rename($filename, $newfilename)
+ or croak "Can't rename($filename, $newfilename): $!";
+ # On a versioned filesystem we want old versions to be removed
+ 1 while (unlink $filename);
+ $filename = $newfilename;
+ }
+
+ open($fh, "<", $filename)
+ or croak("Unable to read profile file '$filename': $!");
+
+ # lock the file in case it's still being written to
+ # (we'll be foced to wait till the write is complete)
+ flock($fh, LOCK_SH) if $self->{LockFile};
+
+ if (-s $fh) { # not empty
+ $self->_read_header($fh, $filename, $read_header ? 0 : 1);
+ $read_header = 1;
+ $self->_read_body($fh, $filename);
+ }
+ close($fh); # and release lock
+
+ push @files_to_delete, $filename
+ if $self->{DeleteFiles};
+ }
+ for (@files_to_delete){
+ # for versioned file systems
+ 1 while (unlink $_);
+ if(-e $_){
+ warn "Can't delete '$_': $!";
+ }
+ }
+
+ # discard node_lookup now that all files are read
+ delete $self->{_node_lookup};
+}
+
+# read the header from the given $fh named $filename. Discards the
+# data unless $keep.
+sub _read_header {
+ my ($self, $fh, $filename, $keep) = @_;
+
+ # get profiler module id
+ my $first = <$fh>;
+ chomp $first;
+ $self->{_profiler} = $first if $keep;
+
+ # collect variables from the header
+ local $_;
+ while (<$fh>) {
+ chomp;
+ last unless length $_;
+ /^(\S+)\s*=\s*(.*)/
+ or croak("Syntax error in header in $filename line $.: $_");
+ # XXX should compare new with existing (from previous file)
+ # and warn if they differ (diferent program or path)
+ $self->{_header}{$1} = unescape_key($2) if $keep;
+ }
+}
+
+
+sub unescape_key { # inverse of escape_key() in DBI::ProfileDumper
+ local $_ = shift;
+ s/(?<!\\)\\n/\n/g; # expand \n, unless it's a \\n
+ s/(?<!\\)\\r/\r/g; # expand \r, unless it's a \\r
+ s/\\\\/\\/g; # \\ to \
+ return $_;
+}
+
+
+# reads the body of the profile data
+sub _read_body {
+ my ($self, $fh, $filename) = @_;
+ my $nodes = $self->{_nodes};
+ my $lookup = $self->{_node_lookup};
+ my $filter = $self->{Filter};
+
+ # build up node array
+ my @path = ("");
+ my (@data, $path_key);
+ local $_;
+ while (<$fh>) {
+ chomp;
+ if (/^\+\s+(\d+)\s?(.*)/) {
+ # it's a key
+ my ($key, $index) = ($2, $1 - 1);
+
+ $#path = $index; # truncate path to new length
+ $path[$index] = unescape_key($key); # place new key at end
+
+ }
+ elsif (s/^=\s+//) {
+ # it's data - file in the node array with the path in index 0
+ # (the optional minus is to make it more robust against systems
+ # with unstable high-res clocks - typically due to poor NTP config
+ # of kernel SMP behaviour, i.e. min time may be -0.000008))
+
+ @data = split / /, $_;
+
+ # corrupt data?
+ croak("Invalid number of fields in $filename line $.: $_")
+ unless @data == 7;
+ croak("Invalid leaf node characters $filename line $.: $_")
+ unless m/^[-+ 0-9eE\.]+$/;
+
+ # hook to enable pre-processing of the data - such as mangling SQL
+ # so that slightly different statements get treated as the same
+ # and so merged in the results
+ $filter->(\@path, \@data) if $filter;
+
+ # elements of @path can't have NULLs in them, so this
+ # forms a unique string per @path. If there's some way I
+ # can get this without arbitrarily stripping out a
+ # character I'd be happy to hear it!
+ $path_key = join("\0",@path);
+
+ # look for previous entry
+ if (exists $lookup->{$path_key}) {
+ # merge in the new data
+ dbi_profile_merge($nodes->[$lookup->{$path_key}], \@data);
+ } else {
+ # insert a new node - nodes are arrays with data in 0-6
+ # and path data after that
+ push(@$nodes, [ @data, @path ]);
+
+ # record node in %seen
+ $lookup->{$path_key} = $#$nodes;
+ }
+ }
+ else {
+ croak("Invalid line type syntax error in $filename line $.: $_");
+ }
+ }
+}
+
+
+
+=head2 $copy = $prof->clone();
+
+Clone a profile data set creating a new object.
+
+=cut
+
+sub clone {
+ my $self = shift;
+
+ # start with a simple copy
+ my $clone = bless { %$self }, ref($self);
+
+ # deep copy nodes
+ $clone->{_nodes} = [ map { [ @$_ ] } @{$self->{_nodes}} ];
+
+ # deep copy header
+ $clone->{_header} = { %{$self->{_header}} };
+
+ return $clone;
+}
+
+=head2 $header = $prof->header();
+
+Returns a reference to a hash of header values. These are the key
+value pairs included in the header section of the DBI::ProfileDumper
+data format. For example:
+
+ $header = {
+ Path => [ '!Statement', '!MethodName' ],
+ Program => 't/42profile_data.t',
+ };
+
+Note that modifying this hash will modify the header data stored
+inside the profile object.
+
+=cut
+
+sub header { shift->{_header} }
+
+
+=head2 $nodes = $prof->nodes()
+
+Returns a reference the sorted nodes array. Each element in the array
+is a single record in the data set. The first seven elements are the
+same as the elements provided by DBI::Profile. After that each key is
+in a separate element. For example:
+
+ $nodes = [
+ [
+ 2, # 0, count
+ 0.0312958955764771, # 1, total duration
+ 0.000490069389343262, # 2, first duration
+ 0.000176072120666504, # 3, shortest duration
+ 0.00140702724456787, # 4, longest duration
+ 1023115819.83019, # 5, time of first event
+ 1023115819.86576, # 6, time of last event
+ 'SELECT foo FROM bar' # 7, key1
+ 'execute' # 8, key2
+ # 6+N, keyN
+ ],
+ # ...
+ ];
+
+Note that modifying this array will modify the node data stored inside
+the profile object.
+
+=cut
+
+sub nodes { shift->{_nodes} }
+
+
+=head2 $count = $prof->count()
+
+Returns the number of items in the profile data set.
+
+=cut
+
+sub count { scalar @{shift->{_nodes}} }
+
+
+=head2 $prof->sort(field => "field")
+
+=head2 $prof->sort(field => "field", reverse => 1)
+
+Sorts data by the given field. Available fields are:
+
+ longest
+ total
+ count
+ shortest
+
+The default sort is greatest to smallest, which is the opposite of the
+normal Perl meaning. This, however, matches the expected behavior of
+the dbiprof frontend.
+
+=cut
+
+
+# sorts data by one of the available fields
+{
+ my %FIELDS = (
+ longest => LONGEST,
+ total => TOTAL,
+ count => COUNT,
+ shortest => SHORTEST,
+ key1 => PATH+0,
+ key2 => PATH+1,
+ key3 => PATH+2,
+ );
+ sub sort {
+ my $self = shift;
+ my $nodes = $self->{_nodes};
+ my %opt = @_;
+
+ croak("Missing required field option.") unless $opt{field};
+
+ my $index = $FIELDS{$opt{field}};
+
+ croak("Unrecognized sort field '$opt{field}'.")
+ unless defined $index;
+
+ # sort over index
+ if ($opt{reverse}) {
+ @$nodes = sort {
+ $a->[$index] <=> $b->[$index]
+ } @$nodes;
+ } else {
+ @$nodes = sort {
+ $b->[$index] <=> $a->[$index]
+ } @$nodes;
+ }
+
+ # remember how we're sorted
+ $self->{_sort} = $opt{field};
+
+ return $self;
+ }
+}
+
+
+=head2 $count = $prof->exclude(key2 => "disconnect")
+
+=head2 $count = $prof->exclude(key2 => "disconnect", case_sensitive => 1)
+
+=head2 $count = $prof->exclude(key1 => qr/^SELECT/i)
+
+Removes records from the data set that match the given string or
+regular expression. This method modifies the data in a permanent
+fashion - use clone() first to maintain the original data after
+exclude(). Returns the number of nodes left in the profile data set.
+
+=cut
+
+sub exclude {
+ my $self = shift;
+ my $nodes = $self->{_nodes};
+ my %opt = @_;
+
+ # find key index number
+ my ($index, $val);
+ foreach (keys %opt) {
+ if (/^key(\d+)$/) {
+ $index = PATH + $1 - 1;
+ $val = $opt{$_};
+ last;
+ }
+ }
+ croak("Missing required keyN option.") unless $index;
+
+ if (UNIVERSAL::isa($val,"Regexp")) {
+ # regex match
+ @$nodes = grep {
+ $#$_ < $index or $_->[$index] !~ /$val/
+ } @$nodes;
+ } else {
+ if ($opt{case_sensitive}) {
+ @$nodes = grep {
+ $#$_ < $index or $_->[$index] ne $val;
+ } @$nodes;
+ } else {
+ $val = lc $val;
+ @$nodes = grep {
+ $#$_ < $index or lc($_->[$index]) ne $val;
+ } @$nodes;
+ }
+ }
+
+ return scalar @$nodes;
+}
+
+
+=head2 $count = $prof->match(key2 => "disconnect")
+
+=head2 $count = $prof->match(key2 => "disconnect", case_sensitive => 1)
+
+=head2 $count = $prof->match(key1 => qr/^SELECT/i)
+
+Removes records from the data set that do not match the given string
+or regular expression. This method modifies the data in a permanent
+fashion - use clone() first to maintain the original data after
+match(). Returns the number of nodes left in the profile data set.
+
+=cut
+
+sub match {
+ my $self = shift;
+ my $nodes = $self->{_nodes};
+ my %opt = @_;
+
+ # find key index number
+ my ($index, $val);
+ foreach (keys %opt) {
+ if (/^key(\d+)$/) {
+ $index = PATH + $1 - 1;
+ $val = $opt{$_};
+ last;
+ }
+ }
+ croak("Missing required keyN option.") unless $index;
+
+ if (UNIVERSAL::isa($val,"Regexp")) {
+ # regex match
+ @$nodes = grep {
+ $#$_ >= $index and $_->[$index] =~ /$val/
+ } @$nodes;
+ } else {
+ if ($opt{case_sensitive}) {
+ @$nodes = grep {
+ $#$_ >= $index and $_->[$index] eq $val;
+ } @$nodes;
+ } else {
+ $val = lc $val;
+ @$nodes = grep {
+ $#$_ >= $index and lc($_->[$index]) eq $val;
+ } @$nodes;
+ }
+ }
+
+ return scalar @$nodes;
+}
+
+
+=head2 $Data = $prof->Data()
+
+Returns the same Data hash structure as seen in DBI::Profile. This
+structure is not sorted. The nodes() structure probably makes more
+sense for most analysis.
+
+=cut
+
+sub Data {
+ my $self = shift;
+ my (%Data, @data, $ptr);
+
+ foreach my $node (@{$self->{_nodes}}) {
+ # traverse to key location
+ $ptr = \%Data;
+ foreach my $key (@{$node}[PATH .. $#$node - 1]) {
+ $ptr->{$key} = {} unless exists $ptr->{$key};
+ $ptr = $ptr->{$key};
+ }
+
+ # slice out node data
+ $ptr->{$node->[-1]} = [ @{$node}[0 .. 6] ];
+ }
+
+ return \%Data;
+}
+
+
+=head2 $text = $prof->format($nodes->[0])
+
+Formats a single node into a human-readable block of text.
+
+=cut
+
+sub format {
+ my ($self, $node) = @_;
+ my $format;
+
+ # setup keys
+ my $keys = "";
+ for (my $i = PATH; $i <= $#$node; $i++) {
+ my $key = $node->[$i];
+
+ # remove leading and trailing space
+ $key =~ s/^\s+//;
+ $key =~ s/\s+$//;
+
+ # if key has newlines or is long take special precautions
+ if (length($key) > 72 or $key =~ /\n/) {
+ $keys .= " Key " . ($i - PATH + 1) . " :\n\n$key\n\n";
+ } else {
+ $keys .= " Key " . ($i - PATH + 1) . " : $key\n";
+ }
+ }
+
+ # nodes with multiple runs get the long entry format, nodes with
+ # just one run get a single count.
+ if ($node->[COUNT] > 1) {
+ $format = <<END;
+ Count : %d
+ Total Time : %3.6f seconds
+ Longest Time : %3.6f seconds
+ Shortest Time : %3.6f seconds
+ Average Time : %3.6f seconds
+END
+ return sprintf($format, @{$node}[COUNT,TOTAL,LONGEST,SHORTEST],
+ $node->[TOTAL] / $node->[COUNT]) . $keys;
+ } else {
+ $format = <<END;
+ Count : %d
+ Time : %3.6f seconds
+END
+
+ return sprintf($format, @{$node}[COUNT,TOTAL]) . $keys;
+
+ }
+}
+
+
+=head2 $text = $prof->report(number => 10)
+
+Produces a report with the given number of items.
+
+=cut
+
+sub report {
+ my $self = shift;
+ my $nodes = $self->{_nodes};
+ my %opt = @_;
+
+ croak("Missing required number option") unless exists $opt{number};
+
+ $opt{number} = @$nodes if @$nodes < $opt{number};
+
+ my $report = $self->_report_header($opt{number});
+ for (0 .. $opt{number} - 1) {
+ $report .= sprintf("#" x 5 . "[ %d ]". "#" x 59 . "\n",
+ $_ + 1);
+ $report .= $self->format($nodes->[$_]);
+ $report .= "\n";
+ }
+ return $report;
+}
+
+# format the header for report()
+sub _report_header {
+ my ($self, $number) = @_;
+ my $nodes = $self->{_nodes};
+ my $node_count = @$nodes;
+
+ # find total runtime and method count
+ my ($time, $count) = (0,0);
+ foreach my $node (@$nodes) {
+ $time += $node->[TOTAL];
+ $count += $node->[COUNT];
+ }
+
+ my $header = <<END;
+
+DBI Profile Data ($self->{_profiler})
+
+END
+
+ # output header fields
+ while (my ($key, $value) = each %{$self->{_header}}) {
+ $header .= sprintf(" %-13s : %s\n", $key, $value);
+ }
+
+ # output summary data fields
+ $header .= sprintf(<<END, $node_count, $number, $self->{_sort}, $count, $time);
+ Total Records : %d (showing %d, sorted by %s)
+ Total Count : %d
+ Total Runtime : %3.6f seconds
+
+END
+
+ return $header;
+}
+
+
+1;
+
+__END__
+
+=head1 AUTHOR
+
+Sam Tregar <sam@tregar.com>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2002 Sam Tregar
+
+This program is free software; you can redistribute it and/or modify
+it under the same terms as Perl 5 itself.
+
+=cut
diff --git a/lib/DBI/ProfileDumper.pm b/lib/DBI/ProfileDumper.pm
new file mode 100644
index 0000000..89bb884
--- /dev/null
+++ b/lib/DBI/ProfileDumper.pm
@@ -0,0 +1,351 @@
+package DBI::ProfileDumper;
+use strict;
+
+=head1 NAME
+
+DBI::ProfileDumper - profile DBI usage and output data to a file
+
+=head1 SYNOPSIS
+
+To profile an existing program using DBI::ProfileDumper, set the
+DBI_PROFILE environment variable and run your program as usual. For
+example, using bash:
+
+ DBI_PROFILE=2/DBI::ProfileDumper program.pl
+
+Then analyze the generated file (F<dbi.prof>) with L<dbiprof|dbiprof>:
+
+ dbiprof
+
+You can also activate DBI::ProfileDumper from within your code:
+
+ use DBI;
+
+ # profile with default path (2) and output file (dbi.prof)
+ $dbh->{Profile} = "!Statement/DBI::ProfileDumper";
+
+ # same thing, spelled out
+ $dbh->{Profile} = "!Statement/DBI::ProfileDumper/File:dbi.prof";
+
+ # another way to say it
+ use DBI::ProfileDumper;
+ $dbh->{Profile} = DBI::ProfileDumper->new(
+ Path => [ '!Statement' ],
+ File => 'dbi.prof' );
+
+ # using a custom path
+ $dbh->{Profile} = DBI::ProfileDumper->new(
+ Path => [ "foo", "bar" ],
+ File => 'dbi.prof',
+ );
+
+
+=head1 DESCRIPTION
+
+DBI::ProfileDumper is a subclass of L<DBI::Profile|DBI::Profile> which
+dumps profile data to disk instead of printing a summary to your
+screen. You can then use L<dbiprof|dbiprof> to analyze the data in
+a number of interesting ways, or you can roll your own analysis using
+L<DBI::ProfileData|DBI::ProfileData>.
+
+B<NOTE:> For Apache/mod_perl applications, use
+L<DBI::ProfileDumper::Apache|DBI::ProfileDumper::Apache>.
+
+=head1 USAGE
+
+One way to use this module is just to enable it in your C<$dbh>:
+
+ $dbh->{Profile} = "1/DBI::ProfileDumper";
+
+This will write out profile data by statement into a file called
+F<dbi.prof>. If you want to modify either of these properties, you
+can construct the DBI::ProfileDumper object yourself:
+
+ use DBI::ProfileDumper;
+ $dbh->{Profile} = DBI::ProfileDumper->new(
+ Path => [ '!Statement' ],
+ File => 'dbi.prof'
+ );
+
+The C<Path> option takes the same values as in
+L<DBI::Profile>. The C<File> option gives the name of the
+file where results will be collected. If it already exists it will be
+overwritten.
+
+You can also activate this module by setting the DBI_PROFILE
+environment variable:
+
+ $ENV{DBI_PROFILE} = "!Statement/DBI::ProfileDumper";
+
+This will cause all DBI handles to share the same profiling object.
+
+=head1 METHODS
+
+The following methods are available to be called using the profile
+object. You can get access to the profile object from the Profile key
+in any DBI handle:
+
+ my $profile = $dbh->{Profile};
+
+=head2 flush_to_disk
+
+ $profile->flush_to_disk()
+
+Flushes all collected profile data to disk and empties the Data hash. Returns
+the filename writen to. If no profile data has been collected then the file is
+not written and flush_to_disk() returns undef.
+
+The file is locked while it's being written. A process 'consuming' the files
+while they're being written to, should rename the file first, then lock it,
+then read it, then close and delete it. The C<DeleteFiles> option to
+L<DBI::ProfileData> does the right thing.
+
+This method may be called multiple times during a program run.
+
+=head2 empty
+
+ $profile->empty()
+
+Clears the Data hash without writing to disk.
+
+=head2 filename
+
+ $filename = $profile->filename();
+
+Get or set the filename.
+
+The filename can be specified as a CODE reference, in which case the referenced
+code should return the filename to be used. The code will be called with the
+profile object as its first argument.
+
+=head1 DATA FORMAT
+
+The data format written by DBI::ProfileDumper starts with a header
+containing the version number of the module used to generate it. Then
+a block of variable declarations describes the profile. After two
+newlines, the profile data forms the body of the file. For example:
+
+ DBI::ProfileDumper 2.003762
+ Path = [ '!Statement', '!MethodName' ]
+ Program = t/42profile_data.t
+
+ + 1 SELECT name FROM users WHERE id = ?
+ + 2 prepare
+ = 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ + 2 execute
+ 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ + 2 fetchrow_hashref
+ = 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ + 1 UPDATE users SET name = ? WHERE id = ?
+ + 2 prepare
+ = 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ + 2 execute
+ = 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+
+The lines beginning with C<+> signs signify keys. The number after
+the C<+> sign shows the nesting level of the key. Lines beginning
+with C<=> are the actual profile data, in the same order as
+in DBI::Profile.
+
+Note that the same path may be present multiple times in the data file
+since C<format()> may be called more than once. When read by
+DBI::ProfileData the data points will be merged to produce a single
+data set for each distinct path.
+
+The key strings are transformed in three ways. First, all backslashes
+are doubled. Then all newlines and carriage-returns are transformed
+into C<\n> and C<\r> respectively. Finally, any NULL bytes (C<\0>)
+are entirely removed. When DBI::ProfileData reads the file the first
+two transformations will be reversed, but NULL bytes will not be
+restored.
+
+=head1 AUTHOR
+
+Sam Tregar <sam@tregar.com>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2002 Sam Tregar
+
+This program is free software; you can redistribute it and/or modify
+it under the same terms as Perl 5 itself.
+
+=cut
+
+# inherit from DBI::Profile
+use DBI::Profile;
+
+our @ISA = ("DBI::Profile");
+
+our $VERSION = sprintf("2.%06d", q$Revision: 15324 $ =~ /(\d+)/o);
+
+use Carp qw(croak);
+use Fcntl qw(:flock);
+use Symbol;
+
+my $HAS_FLOCK = (defined $ENV{DBI_PROFILE_FLOCK})
+ ? $ENV{DBI_PROFILE_FLOCK}
+ : do { local $@; eval { flock STDOUT, 0; 1 } };
+
+my $program_header;
+
+
+# validate params and setup default
+sub new {
+ my $pkg = shift;
+ my $self = $pkg->SUPER::new(
+ LockFile => $HAS_FLOCK,
+ @_,
+ );
+
+ # provide a default filename
+ $self->filename("dbi.prof") unless $self->filename;
+
+ DBI->trace_msg("$self: @{[ %$self ]}\n",0)
+ if $self->{Trace} && $self->{Trace} >= 2;
+
+ return $self;
+}
+
+
+# get/set filename to use
+sub filename {
+ my $self = shift;
+ $self->{File} = shift if @_;
+ my $filename = $self->{File};
+ $filename = $filename->($self) if ref($filename) eq 'CODE';
+ return $filename;
+}
+
+
+# flush available data to disk
+sub flush_to_disk {
+ my $self = shift;
+ my $class = ref $self;
+ my $filename = $self->filename;
+ my $data = $self->{Data};
+
+ if (1) { # make an option
+ if (not $data or ref $data eq 'HASH' && !%$data) {
+ DBI->trace_msg("flush_to_disk skipped for empty profile\n",0) if $self->{Trace};
+ return undef;
+ }
+ }
+
+ my $fh = gensym;
+ if (($self->{_wrote_header}||'') eq $filename) {
+ # append more data to the file
+ # XXX assumes that Path hasn't changed
+ open($fh, ">>", $filename)
+ or croak("Unable to open '$filename' for $class output: $!");
+ } else {
+ # create new file (or overwrite existing)
+ if (-f $filename) {
+ my $bak = $filename.'.prev';
+ unlink($bak);
+ rename($filename, $bak)
+ or warn "Error renaming $filename to $bak: $!\n";
+ }
+ open($fh, ">", $filename)
+ or croak("Unable to open '$filename' for $class output: $!");
+ }
+ # lock the file (before checking size and writing the header)
+ flock($fh, LOCK_EX) if $self->{LockFile};
+ # write header if file is empty - typically because we just opened it
+ # in '>' mode, or perhaps we used '>>' but the file had been truncated externally.
+ if (-s $fh == 0) {
+ DBI->trace_msg("flush_to_disk wrote header to $filename\n",0) if $self->{Trace};
+ $self->write_header($fh);
+ $self->{_wrote_header} = $filename;
+ }
+
+ my $lines = $self->write_data($fh, $self->{Data}, 1);
+ DBI->trace_msg("flush_to_disk wrote $lines lines to $filename\n",0) if $self->{Trace};
+
+ close($fh) # unlocks the file
+ or croak("Error closing '$filename': $!");
+
+ $self->empty();
+
+
+ return $filename;
+}
+
+
+# write header to a filehandle
+sub write_header {
+ my ($self, $fh) = @_;
+
+ # isolate us against globals which effect print
+ local($\, $,);
+
+ # $self->VERSION can return undef during global destruction
+ my $version = $self->VERSION || $VERSION;
+
+ # module name and version number
+ print $fh ref($self)." $version\n";
+
+ # print out Path (may contain CODE refs etc)
+ my @path_words = map { escape_key($_) } @{ $self->{Path} || [] };
+ print $fh "Path = [ ", join(', ', @path_words), " ]\n";
+
+ # print out $0 and @ARGV
+ if (!$program_header) {
+ # XXX should really quote as well as escape
+ $program_header = "Program = "
+ . join(" ", map { escape_key($_) } $0, @ARGV)
+ . "\n";
+ }
+ print $fh $program_header;
+
+ # all done
+ print $fh "\n";
+}
+
+
+# write data in the proscribed format
+sub write_data {
+ my ($self, $fh, $data, $level) = @_;
+
+ # XXX it's valid for $data to be an ARRAY ref, i.e., Path is empty.
+ # produce an empty profile for invalid $data
+ return 0 unless $data and UNIVERSAL::isa($data,'HASH');
+
+ # isolate us against globals which affect print
+ local ($\, $,);
+
+ my $lines = 0;
+ while (my ($key, $value) = each(%$data)) {
+ # output a key
+ print $fh "+ $level ". escape_key($key). "\n";
+ if (UNIVERSAL::isa($value,'ARRAY')) {
+ # output a data set for a leaf node
+ print $fh "= ".join(' ', @$value)."\n";
+ $lines += 1;
+ } else {
+ # recurse through keys - this could be rewritten to use a
+ # stack for some small performance gain
+ $lines += $self->write_data($fh, $value, $level + 1);
+ }
+ }
+ return $lines;
+}
+
+
+# escape a key for output
+sub escape_key {
+ my $key = shift;
+ $key =~ s!\\!\\\\!g;
+ $key =~ s!\n!\\n!g;
+ $key =~ s!\r!\\r!g;
+ $key =~ s!\0!!g;
+ return $key;
+}
+
+
+# flush data to disk when profile object goes out of scope
+sub on_destroy {
+ shift->flush_to_disk();
+}
+
+1;
diff --git a/lib/DBI/ProfileDumper/Apache.pm b/lib/DBI/ProfileDumper/Apache.pm
new file mode 100644
index 0000000..1f58926
--- /dev/null
+++ b/lib/DBI/ProfileDumper/Apache.pm
@@ -0,0 +1,219 @@
+package DBI::ProfileDumper::Apache;
+
+use strict;
+
+=head1 NAME
+
+DBI::ProfileDumper::Apache - capture DBI profiling data from Apache/mod_perl
+
+=head1 SYNOPSIS
+
+Add this line to your F<httpd.conf>:
+
+ PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
+
+(If you're using mod_perl2, see L</When using mod_perl2> for some additional notes.)
+
+Then restart your server. Access the code you wish to test using a
+web browser, then shutdown your server. This will create a set of
+F<dbi.prof.*> files in your Apache log directory.
+
+Get a profiling report with L<dbiprof|dbiprof>:
+
+ dbiprof /path/to/your/apache/logs/dbi.prof.*
+
+When you're ready to perform another profiling run, delete the old files and start again.
+
+=head1 DESCRIPTION
+
+This module interfaces DBI::ProfileDumper to Apache/mod_perl. Using
+this module you can collect profiling data from mod_perl applications.
+It works by creating a DBI::ProfileDumper data file for each Apache
+process. These files are created in your Apache log directory. You
+can then use the dbiprof utility to analyze the profile files.
+
+=head1 USAGE
+
+=head2 LOADING THE MODULE
+
+The easiest way to use this module is just to set the DBI_PROFILE
+environment variable in your F<httpd.conf>:
+
+ PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
+
+The DBI will look after loading and using the module when the first DBI handle
+is created.
+
+It's also possible to use this module by setting the Profile attribute
+of any DBI handle:
+
+ $dbh->{Profile} = "2/DBI::ProfileDumper::Apache";
+
+See L<DBI::ProfileDumper> for more possibilities, and L<DBI::Profile> for full
+details of the DBI's profiling mechanism.
+
+=head2 WRITING PROFILE DATA
+
+The profile data files will be written to your Apache log directory by default.
+
+The user that the httpd processes run as will need write access to the
+directory. So, for example, if you're running the child httpds as user 'nobody'
+and using chronolog to write to the logs directory, then you'll need to change
+the default.
+
+You can change the destination directory either by specifying a C<Dir> value
+when creating the profile (like C<File> in the L<DBI::ProfileDumper> docs),
+or you can use the C<DBI_PROFILE_APACHE_LOG_DIR> env var to change that. For example:
+
+ PerlSetEnv DBI_PROFILE_APACHE_LOG_DIR /server_root/logs
+
+=head3 When using mod_perl2
+
+Under mod_perl2 you'll need to either set the C<DBI_PROFILE_APACHE_LOG_DIR> env var,
+or enable the mod_perl2 C<GlobalRequest> option, like this:
+
+ PerlOptions +GlobalRequest
+
+to the global config section you're about test with DBI::ProfileDumper::Apache.
+If you don't do one of those then you'll see messages in your error_log similar to:
+
+ DBI::ProfileDumper::Apache on_destroy failed: Global $r object is not available. Set:
+ PerlOptions +GlobalRequest in httpd.conf at ..../DBI/ProfileDumper/Apache.pm line 144
+
+=head3 Naming the files
+
+The default file name is inherited from L<DBI::ProfileDumper> via the
+filename() method, but DBI::ProfileDumper::Apache appends the parent pid and
+the current pid, separated by dots, to that name.
+
+=head3 Silencing the log
+
+By default a message is written to STDERR (i.e., the apache error_log file)
+when flush_to_disk() is called (either explicitly, or implicitly via DESTROY).
+
+That's usually very useful. If you don't want the log message you can silence
+it by setting the C<Quiet> attribute true.
+
+ PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache/Quiet:1
+
+ $dbh->{Profile} = "!Statement/DBI::ProfileDumper/Quiet:1";
+
+ $dbh->{Profile} = DBI::ProfileDumper->new(
+ Path => [ '!Statement' ]
+ Quiet => 1
+ );
+
+
+=head2 GATHERING PROFILE DATA
+
+Once you have the module loaded, use your application as you normally
+would. Stop the webserver when your tests are complete. Profile data
+files will be produced when Apache exits and you'll see something like
+this in your error_log:
+
+ DBI::ProfileDumper::Apache writing to /usr/local/apache/logs/dbi.prof.2604.2619
+
+Now you can use dbiprof to examine the data:
+
+ dbiprof /usr/local/apache/logs/dbi.prof.2604.*
+
+By passing dbiprof a list of all generated files, dbiprof will
+automatically merge them into one result set. You can also pass
+dbiprof sorting and querying options, see L<dbiprof> for details.
+
+=head2 CLEANING UP
+
+Once you've made some code changes, you're ready to start again.
+First, delete the old profile data files:
+
+ rm /usr/local/apache/logs/dbi.prof.*
+
+Then restart your server and get back to work.
+
+=head1 OTHER ISSUES
+
+=head2 Memory usage
+
+DBI::Profile can use a lot of memory for very active applications because it
+collects profiling data in memory for each distinct query run.
+Calling C<flush_to_disk()> will write the current data to disk and free the
+memory it's using. For example:
+
+ $dbh->{Profile}->flush_to_disk() if $dbh->{Profile};
+
+or, rather than flush every time, you could flush less often:
+
+ $dbh->{Profile}->flush_to_disk()
+ if $dbh->{Profile} and ++$i % 100;
+
+=head1 AUTHOR
+
+Sam Tregar <sam@tregar.com>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2002 Sam Tregar
+
+This program is free software; you can redistribute it and/or modify
+it under the same terms as Perl 5 itself.
+
+=cut
+
+our $VERSION = sprintf("2.%06d", q$Revision: 14120 $ =~ /(\d+)/o);
+
+our @ISA = qw(DBI::ProfileDumper);
+
+use DBI::ProfileDumper;
+use File::Spec;
+
+my $initial_pid = $$;
+
+use constant MP2 => ($ENV{MOD_PERL_API_VERSION} and $ENV{MOD_PERL_API_VERSION} == 2) ? 1 : 0;
+
+my $server_root_dir;
+
+if (MP2) {
+ require Apache2::ServerUtil;
+ $server_root_dir = Apache2::ServerUtil::server_root();
+}
+else {
+ require Apache;
+ $server_root_dir = eval { Apache->server_root_relative('') } || "/tmp";
+}
+
+
+sub _dirname {
+ my $self = shift;
+ return $self->{Dir} ||= $ENV{DBI_PROFILE_APACHE_LOG_DIR}
+ || File::Spec->catdir($server_root_dir, "logs");
+}
+
+
+sub filename {
+ my $self = shift;
+ my $filename = $self->SUPER::filename(@_);
+ return $filename if not $filename; # not set yet
+
+ # to be able to identify groups of profile files from the same set of
+ # apache processes, we include the parent pid in the file name
+ # as well as the pid.
+ my $group_pid = ($$ eq $initial_pid) ? $$ : getppid();
+ $filename .= ".$group_pid.$$";
+
+ return $filename if File::Spec->file_name_is_absolute($filename);
+ return File::Spec->catfile($self->_dirname, $filename);
+}
+
+
+sub flush_to_disk {
+ my $self = shift;
+
+ my $filename = $self->SUPER::flush_to_disk(@_);
+
+ print STDERR ref($self)." pid$$ written to $filename\n"
+ if $filename && not $self->{Quiet};
+
+ return $filename;
+}
+
+1;
diff --git a/lib/DBI/ProfileSubs.pm b/lib/DBI/ProfileSubs.pm
new file mode 100644
index 0000000..02ca64d
--- /dev/null
+++ b/lib/DBI/ProfileSubs.pm
@@ -0,0 +1,50 @@
+package DBI::ProfileSubs;
+
+our $VERSION = sprintf("0.%06d", q$Revision: 9395 $ =~ /(\d+)/o);
+
+=head1 NAME
+
+DBI::ProfileSubs - Subroutines for dynamic profile Path
+
+=head1 SYNOPSIS
+
+ DBI_PROFILE='&norm_std_n3' prog.pl
+
+This is new and still experimental.
+
+=head1 TO DO
+
+Define come kind of naming convention for the subs.
+
+=cut
+
+use strict;
+use warnings;
+
+
+# would be good to refactor these regex into separate subs and find some
+# way to compose them in various combinations into multiple subs.
+# Perhaps via AUTOLOAD where \&auto_X_Y_Z creates a sub that does X, Y, and Z.
+# The final subs always need to be very fast.
+#
+
+sub norm_std_n3 {
+ # my ($h, $method_name) = @_;
+ local $_ = $_;
+
+ s/\b\d+\b/<N>/g; # 42 -> <N>
+ s/\b0x[0-9A-Fa-f]+\b/<N>/g; # 0xFE -> <N>
+
+ s/'.*?'/'<S>'/g; # single quoted strings (doesn't handle escapes)
+ s/".*?"/"<S>"/g; # double quoted strings (doesn't handle escapes)
+
+ # convert names like log20001231 into log<N>
+ s/([a-z_]+)(\d{3,})\b/${1}<N>/ig;
+
+ # abbreviate massive "in (...)" statements and similar
+ s!((\s*<[NS]>\s*,\s*){100,})!sprintf("$2,<repeated %d times>",length($1)/2)!eg;
+
+ return $_;
+}
+
+1;
diff --git a/lib/DBI/ProxyServer.pm b/lib/DBI/ProxyServer.pm
new file mode 100644
index 0000000..89e2de6
--- /dev/null
+++ b/lib/DBI/ProxyServer.pm
@@ -0,0 +1,890 @@
+# $Header: /home/timbo/dbi/lib/DBI/RCS/ProxyServer.pm,v 11.9 2003/05/14 11:08:17 timbo Exp $
+# -*- perl -*-
+#
+# DBI::ProxyServer - a proxy server for DBI drivers
+#
+# Copyright (c) 1997 Jochen Wiedmann
+#
+# The DBD::Proxy module is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself. In particular permission
+# is granted to Tim Bunce for distributing this as a part of the DBI.
+#
+#
+# Author: Jochen Wiedmann
+# Am Eisteich 9
+# 72555 Metzingen
+# Germany
+#
+# Email: joe@ispsoft.de
+# Phone: +49 7123 14881
+#
+#
+##############################################################################
+
+
+require 5.004;
+use strict;
+
+use RPC::PlServer 0.2001;
+require DBI;
+require Config;
+
+
+package DBI::ProxyServer;
+
+
+
+############################################################################
+#
+# Constants
+#
+############################################################################
+
+use vars qw($VERSION @ISA);
+
+$VERSION = "0.3005";
+@ISA = qw(RPC::PlServer DBI);
+
+
+# Most of the options below are set to default values, we note them here
+# just for the sake of documentation.
+my %DEFAULT_SERVER_OPTIONS;
+{
+ my $o = \%DEFAULT_SERVER_OPTIONS;
+ $o->{'chroot'} = undef, # To be used in the initfile,
+ # after loading the required
+ # DBI drivers.
+ $o->{'clients'} =
+ [ { 'mask' => '.*',
+ 'accept' => 1,
+ 'cipher' => undef
+ }
+ ];
+ $o->{'configfile'} = '/etc/dbiproxy.conf' if -f '/etc/dbiproxy.conf';
+ $o->{'debug'} = 0;
+ $o->{'facility'} = 'daemon';
+ $o->{'group'} = undef;
+ $o->{'localaddr'} = undef; # Bind to any local IP number
+ $o->{'localport'} = undef; # Must set port number on the
+ # command line.
+ $o->{'logfile'} = undef; # Use syslog or EventLog.
+
+ # XXX don't restrict methods that can be called (trust users once connected)
+ $o->{'XXX_methods'} = {
+ 'DBI::ProxyServer' => {
+ 'Version' => 1,
+ 'NewHandle' => 1,
+ 'CallMethod' => 1,
+ 'DestroyHandle' => 1
+ },
+ 'DBI::ProxyServer::db' => {
+ 'prepare' => 1,
+ 'commit' => 1,
+ 'rollback' => 1,
+ 'STORE' => 1,
+ 'FETCH' => 1,
+ 'func' => 1,
+ 'quote' => 1,
+ 'type_info_all' => 1,
+ 'table_info' => 1,
+ 'disconnect' => 1,
+ },
+ 'DBI::ProxyServer::st' => {
+ 'execute' => 1,
+ 'STORE' => 1,
+ 'FETCH' => 1,
+ 'func' => 1,
+ 'fetch' => 1,
+ 'finish' => 1
+ }
+ };
+ if ($Config::Config{'usethreads'} eq 'define') {
+ $o->{'mode'} = 'threads';
+ } elsif ($Config::Config{'d_fork'} eq 'define') {
+ $o->{'mode'} = 'fork';
+ } else {
+ $o->{'mode'} = 'single';
+ }
+ # No pidfile by default, configuration must provide one if needed
+ $o->{'pidfile'} = 'none';
+ $o->{'user'} = undef;
+};
+
+
+############################################################################
+#
+# Name: Version
+#
+# Purpose: Return version string
+#
+# Inputs: $class - This class
+#
+# Result: Version string; suitable for printing by "--version"
+#
+############################################################################
+
+sub Version {
+ my $version = $DBI::ProxyServer::VERSION;
+ "DBI::ProxyServer $version, Copyright (C) 1998, Jochen Wiedmann";
+}
+
+
+############################################################################
+#
+# Name: AcceptApplication
+#
+# Purpose: Verify DBI DSN
+#
+# Inputs: $self - This instance
+# $dsn - DBI dsn
+#
+# Returns: TRUE for a valid DSN, FALSE otherwise
+#
+############################################################################
+
+sub AcceptApplication {
+ my $self = shift; my $dsn = shift;
+ $dsn =~ /^dbi:\w+:/i;
+}
+
+
+############################################################################
+#
+# Name: AcceptVersion
+#
+# Purpose: Verify requested DBI version
+#
+# Inputs: $self - Instance
+# $version - DBI version being requested
+#
+# Returns: TRUE for ok, FALSE otherwise
+#
+############################################################################
+
+sub AcceptVersion {
+ my $self = shift; my $version = shift;
+ require DBI;
+ DBI::ProxyServer->init_rootclass();
+ $DBI::VERSION >= $version;
+}
+
+
+############################################################################
+#
+# Name: AcceptUser
+#
+# Purpose: Verify user and password by connecting to the client and
+# creating a database connection
+#
+# Inputs: $self - Instance
+# $user - User name
+# $password - Password
+#
+############################################################################
+
+sub AcceptUser {
+ my $self = shift; my $user = shift; my $password = shift;
+ return 0 if (!$self->SUPER::AcceptUser($user, $password));
+ my $dsn = $self->{'application'};
+ $self->Debug("Connecting to $dsn as $user");
+ local $ENV{DBI_AUTOPROXY} = ''; # :-)
+ $self->{'dbh'} = eval {
+ DBI::ProxyServer->connect($dsn, $user, $password,
+ { 'PrintError' => 0,
+ 'Warn' => 0,
+ 'RaiseError' => 1,
+ 'HandleError' => sub {
+ my $err = $_[1]->err;
+ my $state = $_[1]->state || '';
+ $_[0] .= " [err=$err,state=$state]";
+ return 0;
+ } })
+ };
+ if ($@) {
+ $self->Error("Error while connecting to $dsn as $user: $@");
+ return 0;
+ }
+ [1, $self->StoreHandle($self->{'dbh'}) ];
+}
+
+
+sub CallMethod {
+ my $server = shift;
+ my $dbh = $server->{'dbh'};
+ # We could store the private_server attribute permanently in
+ # $dbh. However, we'd have a reference loop in that case and
+ # I would be concerned about garbage collection. :-(
+ $dbh->{'private_server'} = $server;
+ $server->Debug("CallMethod: => " . do { local $^W; join(",", @_)});
+ my @result = eval { $server->SUPER::CallMethod(@_) };
+ my $msg = $@;
+ undef $dbh->{'private_server'};
+ if ($msg) {
+ $server->Debug("CallMethod died with: $@");
+ die $msg;
+ } else {
+ $server->Debug("CallMethod: <= " . do { local $^W; join(",", @result) });
+ }
+ @result;
+}
+
+
+sub main {
+ my $server = DBI::ProxyServer->new(\%DEFAULT_SERVER_OPTIONS, \@_);
+ $server->Bind();
+}
+
+
+############################################################################
+#
+# The DBI part of the proxyserver is implemented as a DBI subclass.
+# Thus we can reuse some of the DBI methods and overwrite only
+# those that need additional handling.
+#
+############################################################################
+
+package DBI::ProxyServer::dr;
+
+@DBI::ProxyServer::dr::ISA = qw(DBI::dr);
+
+
+package DBI::ProxyServer::db;
+
+@DBI::ProxyServer::db::ISA = qw(DBI::db);
+
+sub prepare {
+ my($dbh, $statement, $attr, $params, $proto_ver) = @_;
+ my $server = $dbh->{'private_server'};
+ if (my $client = $server->{'client'}) {
+ if ($client->{'sql'}) {
+ if ($statement =~ /^\s*(\S+)/) {
+ my $st = $1;
+ if (!($statement = $client->{'sql'}->{$st})) {
+ die "Unknown SQL query: $st";
+ }
+ } else {
+ die "Cannot parse restricted SQL statement: $statement";
+ }
+ }
+ }
+ my $sth = $dbh->SUPER::prepare($statement, $attr);
+ my $handle = $server->StoreHandle($sth);
+
+ if ( $proto_ver and $proto_ver > 1 ) {
+ $sth->{private_proxyserver_described} = 0;
+ return $handle;
+
+ } else {
+ # The difference between the usual prepare and ours is that we implement
+ # a combined prepare/execute. The DBD::Proxy driver doesn't call us for
+ # prepare. Only if an execute happens, then we are called with method
+ # "prepare". Further execute's are called as "execute".
+ my @result = $sth->execute($params);
+ my ($NAME, $TYPE);
+ my $NUM_OF_FIELDS = $sth->{NUM_OF_FIELDS};
+ if ($NUM_OF_FIELDS) { # is a SELECT
+ $NAME = $sth->{NAME};
+ $TYPE = $sth->{TYPE};
+ }
+ ($handle, $NUM_OF_FIELDS, $sth->{'NUM_OF_PARAMS'},
+ $NAME, $TYPE, @result);
+ }
+}
+
+sub table_info {
+ my $dbh = shift;
+ my $sth = $dbh->SUPER::table_info();
+ my $numFields = $sth->{'NUM_OF_FIELDS'};
+ my $names = $sth->{'NAME'};
+ my $types = $sth->{'TYPE'};
+
+ # We wouldn't need to send all the rows at this point, instead we could
+ # make use of $rsth->fetch() on the client as usual.
+ # The problem is that some drivers (namely DBD::ExampleP, DBD::mysql and
+ # DBD::mSQL) are returning foreign sth's here, thus an instance of
+ # DBI::st and not DBI::ProxyServer::st. We could fix this by permitting
+ # the client to execute method DBI::st, but I don't like this.
+ my @rows;
+ while (my ($row) = $sth->fetch()) {
+ last unless defined $row;
+ push(@rows, [@$row]);
+ }
+ ($numFields, $names, $types, @rows);
+}
+
+
+package DBI::ProxyServer::st;
+
+@DBI::ProxyServer::st::ISA = qw(DBI::st);
+
+sub execute {
+ my $sth = shift; my $params = shift; my $proto_ver = shift;
+ my @outParams;
+ if ($params) {
+ for (my $i = 0; $i < @$params;) {
+ my $param = $params->[$i++];
+ if (!ref($param)) {
+ $sth->bind_param($i, $param);
+ }
+ else {
+ if (!ref(@$param[0])) {#It's not a reference
+ $sth->bind_param($i, @$param);
+ }
+ else {
+ $sth->bind_param_inout($i, @$param);
+ my $ref = shift @$param;
+ push(@outParams, $ref);
+ }
+ }
+ }
+ }
+ my $rows = $sth->SUPER::execute();
+ if ( $proto_ver and $proto_ver > 1 and not $sth->{private_proxyserver_described} ) {
+ my ($NAME, $TYPE);
+ my $NUM_OF_FIELDS = $sth->{NUM_OF_FIELDS};
+ if ($NUM_OF_FIELDS) { # is a SELECT
+ $NAME = $sth->{NAME};
+ $TYPE = $sth->{TYPE};
+ }
+ $sth->{private_proxyserver_described} = 1;
+ # First execution, we ship back description.
+ return ($rows, $NUM_OF_FIELDS, $sth->{'NUM_OF_PARAMS'}, $NAME, $TYPE, @outParams);
+ }
+ ($rows, @outParams);
+}
+
+sub fetch {
+ my $sth = shift; my $numRows = shift || 1;
+ my($ref, @rows);
+ while ($numRows-- && ($ref = $sth->SUPER::fetch())) {
+ push(@rows, [@$ref]);
+ }
+ @rows;
+}
+
+
+1;
+
+
+__END__
+
+=head1 NAME
+
+DBI::ProxyServer - a server for the DBD::Proxy driver
+
+=head1 SYNOPSIS
+
+ use DBI::ProxyServer;
+ DBI::ProxyServer::main(@ARGV);
+
+=head1 DESCRIPTION
+
+DBI::Proxy Server is a module for implementing a proxy for the DBI proxy
+driver, DBD::Proxy. It allows access to databases over the network if the
+DBMS does not offer networked operations. But the proxy server might be
+useful for you, even if you have a DBMS with integrated network
+functionality: It can be used as a DBI proxy in a firewalled environment.
+
+DBI::ProxyServer runs as a daemon on the machine with the DBMS or on the
+firewall. The client connects to the agent using the DBI driver DBD::Proxy,
+thus in the exactly same way than using DBD::mysql, DBD::mSQL or any other
+DBI driver.
+
+The agent is implemented as a RPC::PlServer application. Thus you have
+access to all the possibilities of this module, in particular encryption
+and a similar configuration file. DBI::ProxyServer adds the possibility of
+query restrictions: You can define a set of queries that a client may
+execute and restrict access to those. (Requires a DBI driver that supports
+parameter binding.) See L</CONFIGURATION FILE>.
+
+The provided driver script, L<dbiproxy>, may either be used as it is or
+used as the basis for a local version modified to meet your needs.
+
+=head1 OPTIONS
+
+When calling the DBI::ProxyServer::main() function, you supply an
+array of options. These options are parsed by the Getopt::Long module.
+The ProxyServer inherits all of RPC::PlServer's and hence Net::Daemon's
+options and option handling, in particular the ability to read
+options from either the command line or a config file. See
+L<RPC::PlServer>. See L<Net::Daemon>. Available options include
+
+=over 4
+
+=item I<chroot> (B<--chroot=dir>)
+
+(UNIX only) After doing a bind(), change root directory to the given
+directory by doing a chroot(). This is useful for security, but it
+restricts the environment a lot. For example, you need to load DBI
+drivers in the config file or you have to create hard links to Unix
+sockets, if your drivers are using them. For example, with MySQL, a
+config file might contain the following lines:
+
+ my $rootdir = '/var/dbiproxy';
+ my $unixsockdir = '/tmp';
+ my $unixsockfile = 'mysql.sock';
+ foreach $dir ($rootdir, "$rootdir$unixsockdir") {
+ mkdir 0755, $dir;
+ }
+ link("$unixsockdir/$unixsockfile",
+ "$rootdir$unixsockdir/$unixsockfile");
+ require DBD::mysql;
+
+ {
+ 'chroot' => $rootdir,
+ ...
+ }
+
+If you don't know chroot(), think of an FTP server where you can see a
+certain directory tree only after logging in. See also the --group and
+--user options.
+
+=item I<clients>
+
+An array ref with a list of clients. Clients are hash refs, the attributes
+I<accept> (0 for denying access and 1 for permitting) and I<mask>, a Perl
+regular expression for the clients IP number or its host name.
+
+=item I<configfile> (B<--configfile=file>)
+
+Config files are assumed to return a single hash ref that overrides the
+arguments of the new method. However, command line arguments in turn take
+precedence over the config file. See the L<"CONFIGURATION FILE"> section
+below for details on the config file.
+
+=item I<debug> (B<--debug>)
+
+Turn debugging mode on. Mainly this asserts that logging messages of
+level "debug" are created.
+
+=item I<facility> (B<--facility=mode>)
+
+(UNIX only) Facility to use for L<Sys::Syslog>. The default is
+B<daemon>.
+
+=item I<group> (B<--group=gid>)
+
+After doing a bind(), change the real and effective GID to the given.
+This is useful, if you want your server to bind to a privileged port
+(<1024), but don't want the server to execute as root. See also
+the --user option.
+
+GID's can be passed as group names or numeric values.
+
+=item I<localaddr> (B<--localaddr=ip>)
+
+By default a daemon is listening to any IP number that a machine
+has. This attribute allows to restrict the server to the given
+IP number.
+
+=item I<localport> (B<--localport=port>)
+
+This attribute sets the port on which the daemon is listening. It
+must be given somehow, as there's no default.
+
+=item I<logfile> (B<--logfile=file>)
+
+Be default logging messages will be written to the syslog (Unix) or
+to the event log (Windows NT). On other operating systems you need to
+specify a log file. The special value "STDERR" forces logging to
+stderr. See L<Net::Daemon::Log> for details.
+
+=item I<mode> (B<--mode=modename>)
+
+The server can run in three different modes, depending on the environment.
+
+If you are running Perl 5.005 and did compile it for threads, then the
+server will create a new thread for each connection. The thread will
+execute the server's Run() method and then terminate. This mode is the
+default, you can force it with "--mode=threads".
+
+If threads are not available, but you have a working fork(), then the
+server will behave similar by creating a new process for each connection.
+This mode will be used automatically in the absence of threads or if
+you use the "--mode=fork" option.
+
+Finally there's a single-connection mode: If the server has accepted a
+connection, he will enter the Run() method. No other connections are
+accepted until the Run() method returns (if the client disconnects).
+This operation mode is useful if you have neither threads nor fork(),
+for example on the Macintosh. For debugging purposes you can force this
+mode with "--mode=single".
+
+=item I<pidfile> (B<--pidfile=file>)
+
+(UNIX only) If this option is present, a PID file will be created at the
+given location. Default is to not create a pidfile.
+
+=item I<user> (B<--user=uid>)
+
+After doing a bind(), change the real and effective UID to the given.
+This is useful, if you want your server to bind to a privileged port
+(<1024), but don't want the server to execute as root. See also
+the --group and the --chroot options.
+
+UID's can be passed as group names or numeric values.
+
+=item I<version> (B<--version>)
+
+Suppresses startup of the server; instead the version string will
+be printed and the program exits immediately.
+
+=back
+
+=head1 SHUTDOWN
+
+DBI::ProxyServer is built on L<RPC::PlServer> which is, in turn, built on L<Net::Daemon>.
+
+You should refer to L<Net::Daemon> for how to shutdown the server, except that
+you can't because it's not currently documented there (as of v0.43).
+The bottom-line is that it seems that there's no support for graceful shutdown.
+
+=head1 CONFIGURATION FILE
+
+The configuration file is just that of I<RPC::PlServer> or I<Net::Daemon>
+with some additional attributes in the client list.
+
+The config file is a Perl script. At the top of the file you may include
+arbitrary Perl source, for example load drivers at the start (useful
+to enhance performance), prepare a chroot environment and so on.
+
+The important thing is that you finally return a hash ref of option
+name/value pairs. The possible options are listed above.
+
+All possibilities of Net::Daemon and RPC::PlServer apply, in particular
+
+=over 4
+
+=item Host and/or User dependent access control
+
+=item Host and/or User dependent encryption
+
+=item Changing UID and/or GID after binding to the port
+
+=item Running in a chroot() environment
+
+=back
+
+Additionally the server offers you query restrictions. Suggest the
+following client list:
+
+ 'clients' => [
+ { 'mask' => '^admin\.company\.com$',
+ 'accept' => 1,
+ 'users' => [ 'root', 'wwwrun' ],
+ },
+ {
+ 'mask' => '^admin\.company\.com$',
+ 'accept' => 1,
+ 'users' => [ 'root', 'wwwrun' ],
+ 'sql' => {
+ 'select' => 'SELECT * FROM foo',
+ 'insert' => 'INSERT INTO foo VALUES (?, ?, ?)'
+ }
+ }
+
+then only the users root and wwwrun may connect from admin.company.com,
+executing arbitrary queries, but only wwwrun may connect from other
+hosts and is restricted to
+
+ $sth->prepare("select");
+
+or
+
+ $sth->prepare("insert");
+
+which in fact are "SELECT * FROM foo" or "INSERT INTO foo VALUES (?, ?, ?)".
+
+
+=head1 Proxyserver Configuration file (bigger example)
+
+This section tells you how to restrict a DBI-Proxy: Not every user from
+every workstation shall be able to execute every query.
+
+There is a perl program "dbiproxy" which runs on a machine which is able
+to connect to all the databases we wish to reach. All Perl-DBD-drivers must
+be installed on this machine. You can also reach databases for which drivers
+are not available on the machine where you run the program querying the
+database, e.g. ask MS-Access-database from Linux.
+
+Create a configuration file "proxy_oracle.cfg" at the dbproxy-server:
+
+ {
+ # This shall run in a shell or a DOS-window
+ # facility => 'daemon',
+ pidfile => 'your_dbiproxy.pid',
+ logfile => 1,
+ debug => 0,
+ mode => 'single',
+ localport => '12400',
+
+ # Access control, the first match in this list wins!
+ # So the order is important
+ clients => [
+ # hint to organize:
+ # the most specialized rules for single machines/users are 1st
+ # then the denying rules
+ # the the rules about whole networks
+
+ # rule: internal_webserver
+ # desc: to get statistical information
+ {
+ # this IP-address only is meant
+ mask => '^10\.95\.81\.243$',
+ # accept (not defer) connections like this
+ accept => 1,
+ # only users from this list
+ # are allowed to log on
+ users => [ 'informationdesk' ],
+ # only this statistical query is allowed
+ # to get results for a web-query
+ sql => {
+ alive => 'select count(*) from dual',
+ statistic_area => 'select count(*) from e01admin.e01e203 where geb_bezei like ?',
+ }
+ },
+
+ # rule: internal_bad_guy_1
+ {
+ mask => '^10\.95\.81\.1$',
+ accept => 0,
+ },
+
+ # rule: employee_workplace
+ # desc: get detailled information
+ {
+ # any IP-address is meant here
+ mask => '^10\.95\.81\.(\d+)$',
+ # accept (not defer) connections like this
+ accept => 1,
+ # only users from this list
+ # are allowed to log on
+ users => [ 'informationdesk', 'lippmann' ],
+ # all these queries are allowed:
+ sql => {
+ search_city => 'select ort_nr, plz, ort from e01admin.e01e200 where plz like ?',
+ search_area => 'select gebiettyp, geb_bezei from e01admin.e01e203 where geb_bezei like ? or geb_bezei like ?',
+ }
+ },
+
+ # rule: internal_bad_guy_2
+ # This does NOT work, because rule "employee_workplace" hits
+ # with its ip-address-mask of the whole network
+ {
+ # don't accept connection from this ip-address
+ mask => '^10\.95\.81\.5$',
+ accept => 0,
+ }
+ ]
+ }
+
+Start the proxyserver like this:
+
+ rem well-set Oracle_home needed for Oracle
+ set ORACLE_HOME=d:\oracle\ora81
+ dbiproxy --configfile proxy_oracle.cfg
+
+
+=head2 Testing the connection from a remote machine
+
+Call a program "dbish" from your commandline. I take the machine from rule "internal_webserver"
+
+ dbish "dbi:Proxy:hostname=oracle.zdf;port=12400;dsn=dbi:Oracle:e01" informationdesk xxx
+
+There will be a shell-prompt:
+
+ informationdesk@dbi...> alive
+
+ Current statement buffer (enter '/'...):
+ alive
+
+ informationdesk@dbi...> /
+ COUNT(*)
+ '1'
+ [1 rows of 1 fields returned]
+
+
+=head2 Testing the connection with a perl-script
+
+Create a perl-script like this:
+
+ # file: oratest.pl
+ # call me like this: perl oratest.pl user password
+
+ use strict;
+ use DBI;
+
+ my $user = shift || die "Usage: $0 user password";
+ my $pass = shift || die "Usage: $0 user password";
+ my $config = {
+ dsn_at_proxy => "dbi:Oracle:e01",
+ proxy => "hostname=oechsle.zdf;port=12400",
+ };
+ my $dsn = sprintf "dbi:Proxy:%s;dsn=%s",
+ $config->{proxy},
+ $config->{dsn_at_proxy};
+
+ my $dbh = DBI->connect( $dsn, $user, $pass )
+ || die "connect did not work: $DBI::errstr";
+
+ my $sql = "search_city";
+ printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
+ my $cur = $dbh->prepare($sql);
+ $cur->bind_param(1,'905%');
+ &show_result ($cur);
+
+ my $sql = "search_area";
+ printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
+ my $cur = $dbh->prepare($sql);
+ $cur->bind_param(1,'Pfarr%');
+ $cur->bind_param(2,'Bronnamberg%');
+ &show_result ($cur);
+
+ my $sql = "statistic_area";
+ printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
+ my $cur = $dbh->prepare($sql);
+ $cur->bind_param(1,'Pfarr%');
+ &show_result ($cur);
+
+ $dbh->disconnect;
+ exit;
+
+
+ sub show_result {
+ my $cur = shift;
+ unless ($cur->execute()) {
+ print "Could not execute\n";
+ return;
+ }
+
+ my $rownum = 0;
+ while (my @row = $cur->fetchrow_array()) {
+ printf "Row is: %s\n", join(", ",@row);
+ if ($rownum++ > 5) {
+ print "... and so on\n";
+ last;
+ }
+ }
+ $cur->finish;
+ }
+
+The result
+
+ C:\>perl oratest.pl informationdesk xxx
+ ========================================
+ search_city
+ ========================================
+ Row is: 3322, 9050, Chemnitz
+ Row is: 3678, 9051, Chemnitz
+ Row is: 10447, 9051, Chemnitz
+ Row is: 12128, 9051, Chemnitz
+ Row is: 10954, 90513, Zirndorf
+ Row is: 5808, 90513, Zirndorf
+ Row is: 5715, 90513, Zirndorf
+ ... and so on
+ ========================================
+ search_area
+ ========================================
+ Row is: 101, Bronnamberg
+ Row is: 400, Pfarramt Zirndorf
+ Row is: 400, Pfarramt Rosstal
+ Row is: 400, Pfarramt Oberasbach
+ Row is: 401, Pfarramt Zirndorf
+ Row is: 401, Pfarramt Rosstal
+ ========================================
+ statistic_area
+ ========================================
+ DBD::Proxy::st execute failed: Server returned error: Failed to execute method CallMethod: Unknown SQL query: statistic_area at E:/Perl/site/lib/DBI/ProxyServer.pm line 258.
+ Could not execute
+
+
+=head2 How the configuration works
+
+The most important section to control access to your dbi-proxy is "client=>"
+in the file "proxy_oracle.cfg":
+
+Controlling which person at which machine is allowed to access
+
+=over 4
+
+=item * "mask" is a perl regular expression against the plain ip-address of the machine which wishes to connect _or_ the reverse-lookup from a nameserver.
+
+=item * "accept" tells the dbiproxy-server wether ip-adresse like in "mask" are allowed to connect or not (0/1)
+
+=item * "users" is a reference to a list of usernames which must be matched, this is NOT a regular expression.
+
+=back
+
+Controlling which SQL-statements are allowed
+
+You can put every SQL-statement you like in simply ommiting "sql => ...", but the more important thing is to restrict the connection so that only allowed queries are possible.
+
+If you include an sql-section in your config-file like this:
+
+ sql => {
+ alive => 'select count(*) from dual',
+ statistic_area => 'select count(*) from e01admin.e01e203 where geb_bezei like ?',
+ }
+
+The user is allowed to put two queries against the dbi-proxy. The queries are _not_ "select count(*)...", the queries are "alive" and "statistic_area"! These keywords are replaced by the real query. So you can run a query for "alive":
+
+ my $sql = "alive";
+ my $cur = $dbh->prepare($sql);
+ ...
+
+The flexibility is that you can put parameters in the where-part of the query so the query are not static. Simply replace a value in the where-part of the query through a question mark and bind it as a parameter to the query.
+
+ my $sql = "statistic_area";
+ my $cur = $dbh->prepare($sql);
+ $cur->bind_param(1,'905%');
+ # A second parameter would be called like this:
+ # $cur->bind_param(2,'98%');
+
+The result is this query:
+
+ select count(*) from e01admin.e01e203
+ where geb_bezei like '905%'
+
+Don't try to put parameters into the sql-query like this:
+
+ # Does not work like you think.
+ # Only the first word of the query is parsed,
+ # so it's changed to "statistic_area", the rest is omitted.
+ # You _have_ to work with $cur->bind_param.
+ my $sql = "statistic_area 905%";
+ my $cur = $dbh->prepare($sql);
+ ...
+
+
+=head2 Problems
+
+=over 4
+
+=item * I don't know how to restrict users to special databases.
+
+=item * I don't know how to pass query-parameters via dbish
+
+=back
+
+
+=head1 AUTHOR
+
+ Copyright (c) 1997 Jochen Wiedmann
+ Am Eisteich 9
+ 72555 Metzingen
+ Germany
+
+ Email: joe@ispsoft.de
+ Phone: +49 7123 14881
+
+The DBI::ProxyServer module is free software; you can redistribute it
+and/or modify it under the same terms as Perl itself. In particular
+permission is granted to Tim Bunce for distributing this as a part of
+the DBI.
+
+
+=head1 SEE ALSO
+
+L<dbiproxy>, L<DBD::Proxy>, L<DBI>, L<RPC::PlServer>,
+L<RPC::PlClient>, L<Net::Daemon>, L<Net::Daemon::Log>,
+L<Sys::Syslog>, L<Win32::EventLog>, L<syslog>
diff --git a/lib/DBI/PurePerl.pm b/lib/DBI/PurePerl.pm
new file mode 100644
index 0000000..593379d
--- /dev/null
+++ b/lib/DBI/PurePerl.pm
@@ -0,0 +1,1259 @@
+########################################################################
+package # hide from PAUSE
+ DBI;
+# vim: ts=8:sw=4
+########################################################################
+#
+# Copyright (c) 2002,2003 Tim Bunce Ireland.
+#
+# See COPYRIGHT section in DBI.pm for usage and distribution rights.
+#
+########################################################################
+#
+# Please send patches and bug reports to
+#
+# Jeff Zucker <jeff@vpservices.com> with cc to <dbi-dev@perl.org>
+#
+########################################################################
+
+use strict;
+use Carp;
+require Symbol;
+
+require utf8;
+*utf8::is_utf8 = sub { # hack for perl 5.6
+ require bytes;
+ return unless defined $_[0];
+ return !(length($_[0]) == bytes::length($_[0]))
+} unless defined &utf8::is_utf8;
+
+$DBI::PurePerl = $ENV{DBI_PUREPERL} || 1;
+$DBI::PurePerl::VERSION = sprintf("2.%06d", q$Revision: 14285 $ =~ /(\d+)/o);
+
+$DBI::neat_maxlen ||= 400;
+
+$DBI::tfh = Symbol::gensym();
+open $DBI::tfh, ">&STDERR" or warn "Can't dup STDERR: $!";
+select( (select($DBI::tfh), $| = 1)[0] ); # autoflush
+
+# check for weaken support, used by ChildHandles
+my $HAS_WEAKEN = eval {
+ require Scalar::Util;
+ # this will croak() if this Scalar::Util doesn't have a working weaken().
+ Scalar::Util::weaken( my $test = [] );
+ 1;
+};
+
+%DBI::last_method_except = map { $_=>1 } qw(DESTROY _set_fbav set_err);
+
+use constant SQL_ALL_TYPES => 0;
+use constant SQL_ARRAY => 50;
+use constant SQL_ARRAY_LOCATOR => 51;
+use constant SQL_BIGINT => (-5);
+use constant SQL_BINARY => (-2);
+use constant SQL_BIT => (-7);
+use constant SQL_BLOB => 30;
+use constant SQL_BLOB_LOCATOR => 31;
+use constant SQL_BOOLEAN => 16;
+use constant SQL_CHAR => 1;
+use constant SQL_CLOB => 40;
+use constant SQL_CLOB_LOCATOR => 41;
+use constant SQL_DATE => 9;
+use constant SQL_DATETIME => 9;
+use constant SQL_DECIMAL => 3;
+use constant SQL_DOUBLE => 8;
+use constant SQL_FLOAT => 6;
+use constant SQL_GUID => (-11);
+use constant SQL_INTEGER => 4;
+use constant SQL_INTERVAL => 10;
+use constant SQL_INTERVAL_DAY => 103;
+use constant SQL_INTERVAL_DAY_TO_HOUR => 108;
+use constant SQL_INTERVAL_DAY_TO_MINUTE => 109;
+use constant SQL_INTERVAL_DAY_TO_SECOND => 110;
+use constant SQL_INTERVAL_HOUR => 104;
+use constant SQL_INTERVAL_HOUR_TO_MINUTE => 111;
+use constant SQL_INTERVAL_HOUR_TO_SECOND => 112;
+use constant SQL_INTERVAL_MINUTE => 105;
+use constant SQL_INTERVAL_MINUTE_TO_SECOND => 113;
+use constant SQL_INTERVAL_MONTH => 102;
+use constant SQL_INTERVAL_SECOND => 106;
+use constant SQL_INTERVAL_YEAR => 101;
+use constant SQL_INTERVAL_YEAR_TO_MONTH => 107;
+use constant SQL_LONGVARBINARY => (-4);
+use constant SQL_LONGVARCHAR => (-1);
+use constant SQL_MULTISET => 55;
+use constant SQL_MULTISET_LOCATOR => 56;
+use constant SQL_NUMERIC => 2;
+use constant SQL_REAL => 7;
+use constant SQL_REF => 20;
+use constant SQL_ROW => 19;
+use constant SQL_SMALLINT => 5;
+use constant SQL_TIME => 10;
+use constant SQL_TIMESTAMP => 11;
+use constant SQL_TINYINT => (-6);
+use constant SQL_TYPE_DATE => 91;
+use constant SQL_TYPE_TIME => 92;
+use constant SQL_TYPE_TIMESTAMP => 93;
+use constant SQL_TYPE_TIMESTAMP_WITH_TIMEZONE => 95;
+use constant SQL_TYPE_TIME_WITH_TIMEZONE => 94;
+use constant SQL_UDT => 17;
+use constant SQL_UDT_LOCATOR => 18;
+use constant SQL_UNKNOWN_TYPE => 0;
+use constant SQL_VARBINARY => (-3);
+use constant SQL_VARCHAR => 12;
+use constant SQL_WCHAR => (-8);
+use constant SQL_WLONGVARCHAR => (-10);
+use constant SQL_WVARCHAR => (-9);
+
+# for Cursor types
+use constant SQL_CURSOR_FORWARD_ONLY => 0;
+use constant SQL_CURSOR_KEYSET_DRIVEN => 1;
+use constant SQL_CURSOR_DYNAMIC => 2;
+use constant SQL_CURSOR_STATIC => 3;
+use constant SQL_CURSOR_TYPE_DEFAULT => SQL_CURSOR_FORWARD_ONLY;
+
+use constant IMA_HAS_USAGE => 0x0001; #/* check parameter usage */
+use constant IMA_FUNC_REDIRECT => 0x0002; #/* is $h->func(..., "method")*/
+use constant IMA_KEEP_ERR => 0x0004; #/* don't reset err & errstr */
+use constant IMA_KEEP_ERR_SUB => 0x0008; #/* '' if in nested call */
+use constant IMA_NO_TAINT_IN => 0x0010; #/* don't check for tainted args*/
+use constant IMA_NO_TAINT_OUT => 0x0020; #/* don't taint results */
+use constant IMA_COPY_UP_STMT => 0x0040; #/* copy sth Statement to dbh */
+use constant IMA_END_WORK => 0x0080; #/* set on commit & rollback */
+use constant IMA_STUB => 0x0100; #/* donothing eg $dbh->connected */
+use constant IMA_CLEAR_STMT => 0x0200; #/* clear Statement before call */
+use constant IMA_UNRELATED_TO_STMT=> 0x0400; #/* profile as empty Statement */
+use constant IMA_NOT_FOUND_OKAY => 0x0800; #/* not error if not found */
+use constant IMA_EXECUTE => 0x1000; #/* do/execute: DBIcf_Executed */
+use constant IMA_SHOW_ERR_STMT => 0x2000; #/* dbh meth relates to Statement*/
+use constant IMA_HIDE_ERR_PARAMVALUES => 0x4000; #/* ParamValues are not relevant */
+use constant IMA_IS_FACTORY => 0x8000; #/* new h ie connect & prepare */
+use constant IMA_CLEAR_CACHED_KIDS => 0x10000; #/* clear CachedKids before call */
+
+use constant DBIstcf_STRICT => 0x0001;
+use constant DBIstcf_DISCARD_STRING => 0x0002;
+
+my %is_flag_attribute = map {$_ =>1 } qw(
+ Active
+ AutoCommit
+ ChopBlanks
+ CompatMode
+ Executed
+ Taint
+ TaintIn
+ TaintOut
+ InactiveDestroy
+ AutoInactiveDestroy
+ LongTruncOk
+ MultiThread
+ PrintError
+ PrintWarn
+ RaiseError
+ ShowErrorStatement
+ Warn
+);
+my %is_valid_attribute = map {$_ =>1 } (keys %is_flag_attribute, qw(
+ ActiveKids
+ Attribution
+ BegunWork
+ CachedKids
+ Callbacks
+ ChildHandles
+ CursorName
+ Database
+ DebugDispatch
+ Driver
+ Err
+ Errstr
+ ErrCount
+ FetchHashKeyName
+ HandleError
+ HandleSetErr
+ ImplementorClass
+ Kids
+ LongReadLen
+ NAME NAME_uc NAME_lc NAME_uc_hash NAME_lc_hash
+ NULLABLE
+ NUM_OF_FIELDS
+ NUM_OF_PARAMS
+ Name
+ PRECISION
+ ParamValues
+ Profile
+ Provider
+ ReadOnly
+ RootClass
+ RowCacheSize
+ RowsInCache
+ SCALE
+ State
+ Statement
+ TYPE
+ Type
+ TraceLevel
+ Username
+ Version
+));
+
+sub valid_attribute {
+ my $attr = shift;
+ return 1 if $is_valid_attribute{$attr};
+ return 1 if $attr =~ m/^[a-z]/; # starts with lowercase letter
+ return 0
+}
+
+my $initial_setup;
+sub initial_setup {
+ $initial_setup = 1;
+ print $DBI::tfh __FILE__ . " version " . $DBI::PurePerl::VERSION . "\n"
+ if $DBI::dbi_debug & 0xF;
+ untie $DBI::err;
+ untie $DBI::errstr;
+ untie $DBI::state;
+ untie $DBI::rows;
+ #tie $DBI::lasth, 'DBI::var', '!lasth'; # special case: return boolean
+}
+
+sub _install_method {
+ my ( $caller, $method, $from, $param_hash ) = @_;
+ initial_setup() unless $initial_setup;
+
+ my ($class, $method_name) = $method =~ /^[^:]+::(.+)::(.+)$/;
+ my $bitmask = $param_hash->{'O'} || 0;
+ my @pre_call_frag;
+
+ return if $method_name eq 'can';
+
+ push @pre_call_frag, q{
+ # ignore DESTROY for outer handle (DESTROY for inner likely to follow soon)
+ return if $h_inner;
+ # handle AutoInactiveDestroy and InactiveDestroy
+ $h->{InactiveDestroy} = 1
+ if $h->{AutoInactiveDestroy} and $$ != $h->{dbi_pp_pid};
+ $h->{Active} = 0
+ if $h->{InactiveDestroy};
+ # copy err/errstr/state up to driver so $DBI::err etc still work
+ if ($h->{err} and my $drh = $h->{Driver}) {
+ $drh->{$_} = $h->{$_} for ('err','errstr','state');
+ }
+ } if $method_name eq 'DESTROY';
+
+ push @pre_call_frag, q{
+ return $h->{$_[0]} if exists $h->{$_[0]};
+ } if $method_name eq 'FETCH' && !exists $ENV{DBI_TRACE}; # XXX ?
+
+ push @pre_call_frag, "return;"
+ if IMA_STUB & $bitmask;
+
+ push @pre_call_frag, q{
+ $method_name = pop @_;
+ } if IMA_FUNC_REDIRECT & $bitmask;
+
+ push @pre_call_frag, q{
+ my $parent_dbh = $h->{Database};
+ } if (IMA_COPY_UP_STMT|IMA_EXECUTE) & $bitmask;
+
+ push @pre_call_frag, q{
+ warn "No Database set for $h on $method_name!" unless $parent_dbh; # eg proxy problems
+ $parent_dbh->{Statement} = $h->{Statement} if $parent_dbh;
+ } if IMA_COPY_UP_STMT & $bitmask;
+
+ push @pre_call_frag, q{
+ $h->{Executed} = 1;
+ $parent_dbh->{Executed} = 1 if $parent_dbh;
+ } if IMA_EXECUTE & $bitmask;
+
+ push @pre_call_frag, q{
+ %{ $h->{CachedKids} } = () if $h->{CachedKids};
+ } if IMA_CLEAR_CACHED_KIDS & $bitmask;
+
+ if (IMA_KEEP_ERR & $bitmask) {
+ push @pre_call_frag, q{
+ my $keep_error = 1;
+ };
+ }
+ else {
+ my $ke_init = (IMA_KEEP_ERR_SUB & $bitmask)
+ ? q{= $h->{dbi_pp_parent}->{dbi_pp_call_depth} }
+ : "";
+ push @pre_call_frag, qq{
+ my \$keep_error $ke_init;
+ };
+ my $keep_error_code = q{
+ #warn "$method_name cleared err";
+ $h->{err} = $DBI::err = undef;
+ $h->{errstr} = $DBI::errstr = undef;
+ $h->{state} = $DBI::state = '';
+ };
+ $keep_error_code = q{
+ printf $DBI::tfh " !! %s: %s CLEARED by call to }.$method_name.q{ method\n".
+ $h->{err}, $h->{err}
+ if defined $h->{err} && $DBI::dbi_debug & 0xF;
+ }. $keep_error_code
+ if exists $ENV{DBI_TRACE};
+ push @pre_call_frag, ($ke_init)
+ ? qq{ unless (\$keep_error) { $keep_error_code }}
+ : $keep_error_code
+ unless $method_name eq 'set_err';
+ }
+
+ push @pre_call_frag, q{
+ my $ErrCount = $h->{ErrCount};
+ };
+
+ push @pre_call_frag, q{
+ if (($DBI::dbi_debug & 0xF) >= 2) {
+ local $^W;
+ my $args = join " ", map { DBI::neat($_) } ($h, @_);
+ printf $DBI::tfh " > $method_name in $imp ($args) [$@]\n";
+ }
+ } if exists $ENV{DBI_TRACE}; # note use of 'exists'
+
+ push @pre_call_frag, q{
+ $h->{'dbi_pp_last_method'} = $method_name;
+ } unless exists $DBI::last_method_except{$method_name};
+
+ # --- post method call code fragments ---
+ my @post_call_frag;
+
+ push @post_call_frag, q{
+ if (my $trace_level = ($DBI::dbi_debug & 0xF)) {
+ if ($h->{err}) {
+ printf $DBI::tfh " !! ERROR: %s %s\n", $h->{err}, $h->{errstr};
+ }
+ my $ret = join " ", map { DBI::neat($_) } @ret;
+ my $msg = " < $method_name= $ret";
+ $msg = ($trace_level >= 2) ? Carp::shortmess($msg) : "$msg\n";
+ print $DBI::tfh $msg;
+ }
+ } if exists $ENV{DBI_TRACE}; # note use of exists
+
+ push @post_call_frag, q{
+ $h->{Executed} = 0;
+ if ($h->{BegunWork}) {
+ $h->{BegunWork} = 0;
+ $h->{AutoCommit} = 1;
+ }
+ } if IMA_END_WORK & $bitmask;
+
+ push @post_call_frag, q{
+ if ( ref $ret[0] and
+ UNIVERSAL::isa($ret[0], 'DBI::_::common') and
+ defined( (my $h_new = tied(%{$ret[0]})||$ret[0])->{err} )
+ ) {
+ # copy up info/warn to drh so PrintWarn on connect is triggered
+ $h->set_err($h_new->{err}, $h_new->{errstr}, $h_new->{state})
+ }
+ } if IMA_IS_FACTORY & $bitmask;
+
+ push @post_call_frag, q{
+ $keep_error = 0 if $keep_error && $h->{ErrCount} > $ErrCount;
+
+ $DBI::err = $h->{err};
+ $DBI::errstr = $h->{errstr};
+ $DBI::state = $h->{state};
+
+ if ( !$keep_error
+ && defined(my $err = $h->{err})
+ && ($call_depth <= 1 && !$h->{dbi_pp_parent}{dbi_pp_call_depth})
+ ) {
+
+ my($pe,$pw,$re,$he) = @{$h}{qw(PrintError PrintWarn RaiseError HandleError)};
+ my $msg;
+
+ if ($err && ($pe || $re || $he) # error
+ or (!$err && length($err) && $pw) # warning
+ ) {
+ my $last = ($DBI::last_method_except{$method_name})
+ ? ($h->{'dbi_pp_last_method'}||$method_name) : $method_name;
+ my $errstr = $h->{errstr} || $DBI::errstr || $err || '';
+ my $msg = sprintf "%s %s %s: %s", $imp, $last,
+ ($err eq "0") ? "warning" : "failed", $errstr;
+
+ if ($h->{'ShowErrorStatement'} and my $Statement = $h->{Statement}) {
+ $msg .= ' [for Statement "' . $Statement;
+ if (my $ParamValues = $h->FETCH('ParamValues')) {
+ $msg .= '" with ParamValues: ';
+ $msg .= DBI::_concat_hash_sorted($ParamValues, "=", ", ", 1, undef);
+ $msg .= "]";
+ }
+ else {
+ $msg .= '"]';
+ }
+ }
+ if ($err eq "0") { # is 'warning' (not info)
+ carp $msg if $pw;
+ }
+ else {
+ my $do_croak = 1;
+ if (my $subsub = $h->{'HandleError'}) {
+ $do_croak = 0 if &$subsub($msg,$h,$ret[0]);
+ }
+ if ($do_croak) {
+ printf $DBI::tfh " $method_name has failed ($h->{PrintError},$h->{RaiseError})\n"
+ if ($DBI::dbi_debug & 0xF) >= 4;
+ carp $msg if $pe;
+ die $msg if $h->{RaiseError};
+ }
+ }
+ }
+ }
+ };
+
+
+ my $method_code = q[
+ sub {
+ my $h = shift;
+ my $h_inner = tied(%$h);
+ $h = $h_inner if $h_inner;
+
+ my $imp;
+ if ($method_name eq 'DESTROY') {
+ # during global destruction, $h->{...} can trigger "Can't call FETCH on an undef value"
+ # implying that tied() above lied to us, so we need to use eval
+ local $@; # protect $@
+ $imp = eval { $h->{"ImplementorClass"} } or return; # probably global destruction
+ }
+ else {
+ $imp = $h->{"ImplementorClass"} or do {
+ warn "Can't call $method_name method on handle $h after take_imp_data()\n"
+ if not exists $h->{Active};
+ return; # or, more likely, global destruction
+ };
+ }
+
+ ] . join("\n", '', @pre_call_frag, '') . q[
+
+ my $call_depth = $h->{'dbi_pp_call_depth'} + 1;
+ local ($h->{'dbi_pp_call_depth'}) = $call_depth;
+
+ my @ret;
+ my $sub = $imp->can($method_name);
+ if (!$sub and IMA_FUNC_REDIRECT & $bitmask and $sub = $imp->can('func')) {
+ push @_, $method_name;
+ }
+ if ($sub) {
+ (wantarray) ? (@ret = &$sub($h,@_)) : (@ret = scalar &$sub($h,@_));
+ }
+ else {
+ # XXX could try explicit fallback to $imp->can('AUTOLOAD') etc
+ # which would then let Multiplex pass PurePerl tests, but some
+ # hook into install_method may be better.
+ croak "Can't locate DBI object method \"$method_name\" via package \"$imp\""
+ if ] . ((IMA_NOT_FOUND_OKAY & $bitmask) ? 0 : 1) . q[;
+ }
+
+ ] . join("\n", '', @post_call_frag, '') . q[
+
+ return (wantarray) ? @ret : $ret[0];
+ }
+ ];
+ no strict qw(refs);
+ my $code_ref = eval qq{#line 1 "DBI::PurePerl $method"\n$method_code};
+ warn "$@\n$method_code\n" if $@;
+ die "$@\n$method_code\n" if $@;
+ *$method = $code_ref;
+ if (0 && $method =~ /\b(connect|FETCH)\b/) { # debuging tool
+ my $l=0; # show line-numbered code for method
+ warn "*$method code:\n".join("\n", map { ++$l.": $_" } split/\n/,$method_code);
+ }
+}
+
+
+sub _new_handle {
+ my ($class, $parent, $attr, $imp_data, $imp_class) = @_;
+
+ DBI->trace_msg(" New $class (for $imp_class, parent=$parent, id=".($imp_data||'').")\n")
+ if $DBI::dbi_debug >= 3;
+
+ $attr->{ImplementorClass} = $imp_class
+ or Carp::croak("_new_handle($class): 'ImplementorClass' attribute not given");
+
+ # This is how we create a DBI style Object:
+ # %outer gets tied to %$attr (which becomes the 'inner' handle)
+ my (%outer, $i, $h);
+ $i = tie %outer, $class, $attr; # ref to inner hash (for driver)
+ $h = bless \%outer, $class; # ref to outer hash (for application)
+ # The above tie and bless may migrate down into _setup_handle()...
+ # Now add magic so DBI method dispatch works
+ DBI::_setup_handle($h, $imp_class, $parent, $imp_data);
+ return $h unless wantarray;
+ return ($h, $i);
+}
+
+sub _setup_handle {
+ my($h, $imp_class, $parent, $imp_data) = @_;
+ my $h_inner = tied(%$h) || $h;
+ if (($DBI::dbi_debug & 0xF) >= 4) {
+ local $^W;
+ print $DBI::tfh " _setup_handle(@_)\n";
+ }
+ $h_inner->{"imp_data"} = $imp_data;
+ $h_inner->{"ImplementorClass"} = $imp_class;
+ $h_inner->{"Kids"} = $h_inner->{"ActiveKids"} = 0; # XXX not maintained
+ if ($parent) {
+ foreach (qw(
+ RaiseError PrintError PrintWarn HandleError HandleSetErr
+ Warn LongTruncOk ChopBlanks AutoCommit ReadOnly
+ ShowErrorStatement FetchHashKeyName LongReadLen CompatMode
+ )) {
+ $h_inner->{$_} = $parent->{$_}
+ if exists $parent->{$_} && !exists $h_inner->{$_};
+ }
+ if (ref($parent) =~ /::db$/) {
+ $h_inner->{Database} = $parent;
+ $parent->{Statement} = $h_inner->{Statement};
+ $h_inner->{NUM_OF_PARAMS} = 0;
+ }
+ elsif (ref($parent) =~ /::dr$/){
+ $h_inner->{Driver} = $parent;
+ }
+ $h_inner->{dbi_pp_parent} = $parent;
+
+ # add to the parent's ChildHandles
+ if ($HAS_WEAKEN) {
+ my $handles = $parent->{ChildHandles} ||= [];
+ push @$handles, $h;
+ Scalar::Util::weaken($handles->[-1]);
+ # purge destroyed handles occasionally
+ if (@$handles % 120 == 0) {
+ @$handles = grep { defined } @$handles;
+ Scalar::Util::weaken($_) for @$handles; # re-weaken after grep
+ }
+ }
+ }
+ else { # setting up a driver handle
+ $h_inner->{Warn} = 1;
+ $h_inner->{PrintWarn} = $^W;
+ $h_inner->{AutoCommit} = 1;
+ $h_inner->{TraceLevel} = 0;
+ $h_inner->{CompatMode} = (1==0);
+ $h_inner->{FetchHashKeyName} ||= 'NAME';
+ $h_inner->{LongReadLen} ||= 80;
+ $h_inner->{ChildHandles} ||= [] if $HAS_WEAKEN;
+ $h_inner->{Type} ||= 'dr';
+ }
+ $h_inner->{"dbi_pp_call_depth"} = 0;
+ $h_inner->{"dbi_pp_pid"} = $$;
+ $h_inner->{ErrCount} = 0;
+ $h_inner->{Active} = 1;
+}
+
+sub constant {
+ warn "constant(@_) called unexpectedly"; return undef;
+}
+
+sub trace {
+ my ($h, $level, $file) = @_;
+ $level = $h->parse_trace_flags($level)
+ if defined $level and !DBI::looks_like_number($level);
+ my $old_level = $DBI::dbi_debug;
+ _set_trace_file($file) if $level;
+ if (defined $level) {
+ $DBI::dbi_debug = $level;
+ print $DBI::tfh " DBI $DBI::VERSION (PurePerl) "
+ . "dispatch trace level set to $DBI::dbi_debug\n"
+ if $DBI::dbi_debug & 0xF;
+ }
+ _set_trace_file($file) if !$level;
+ return $old_level;
+}
+
+sub _set_trace_file {
+ my ($file) = @_;
+ #
+ # DAA add support for filehandle inputs
+ #
+ # DAA required to avoid closing a prior fh trace()
+ $DBI::tfh = undef unless $DBI::tfh_needs_close;
+
+ if (ref $file eq 'GLOB') {
+ $DBI::tfh = $file;
+ select((select($DBI::tfh), $| = 1)[0]);
+ $DBI::tfh_needs_close = 0;
+ return 1;
+ }
+ if ($file && ref \$file eq 'GLOB') {
+ $DBI::tfh = *{$file}{IO};
+ select((select($DBI::tfh), $| = 1)[0]);
+ $DBI::tfh_needs_close = 0;
+ return 1;
+ }
+ $DBI::tfh_needs_close = 1;
+ if (!$file || $file eq 'STDERR') {
+ open $DBI::tfh, ">&STDERR" or carp "Can't dup STDERR: $!";
+ }
+ elsif ($file eq 'STDOUT') {
+ open $DBI::tfh, ">&STDOUT" or carp "Can't dup STDOUT: $!";
+ }
+ else {
+ open $DBI::tfh, ">>$file" or carp "Can't open $file: $!";
+ }
+ select((select($DBI::tfh), $| = 1)[0]);
+ return 1;
+}
+sub _get_imp_data { shift->{"imp_data"}; }
+sub _svdump { }
+sub dump_handle {
+ my ($h,$msg,$level) = @_;
+ $msg||="dump_handle $h";
+ print $DBI::tfh "$msg:\n";
+ for my $attrib (sort keys %$h) {
+ print $DBI::tfh "\t$attrib => ".DBI::neat($h->{$attrib})."\n";
+ }
+}
+
+sub _handles {
+ my $h = shift;
+ my $h_inner = tied %$h;
+ if ($h_inner) { # this is okay
+ return $h unless wantarray;
+ return ($h, $h_inner);
+ }
+ # XXX this isn't okay... we have an inner handle but
+ # currently have no way to get at its outer handle,
+ # so we just warn and return the inner one for both...
+ Carp::carp("Can't return outer handle from inner handle using DBI::PurePerl");
+ return $h unless wantarray;
+ return ($h,$h);
+}
+
+sub hash {
+ my ($key, $type) = @_;
+ my ($hash);
+ if (!$type) {
+ $hash = 0;
+ # XXX The C version uses the "char" type, which could be either
+ # signed or unsigned. I use signed because so do the two
+ # compilers on my system.
+ for my $char (unpack ("c*", $key)) {
+ $hash = $hash * 33 + $char;
+ }
+ $hash &= 0x7FFFFFFF; # limit to 31 bits
+ $hash |= 0x40000000; # set bit 31
+ return -$hash; # return negative int
+ }
+ elsif ($type == 1) { # Fowler/Noll/Vo hash
+ # see http://www.isthe.com/chongo/tech/comp/fnv/
+ require Math::BigInt; # feel free to reimplement w/o BigInt!
+ (my $version = $Math::BigInt::VERSION || 0) =~ s/_.*//; # eg "1.70_01"
+ if ($version >= 1.56) {
+ $hash = Math::BigInt->new(0x811c9dc5);
+ for my $uchar (unpack ("C*", $key)) {
+ # multiply by the 32 bit FNV magic prime mod 2^64
+ $hash = ($hash * 0x01000193) & 0xffffffff;
+ # xor the bottom with the current octet
+ $hash ^= $uchar;
+ }
+ # cast to int
+ return unpack "i", pack "i", $hash;
+ }
+ croak("DBI::PurePerl doesn't support hash type 1 without Math::BigInt >= 1.56 (available on CPAN)");
+ }
+ else {
+ croak("bad hash type $type");
+ }
+}
+
+sub looks_like_number {
+ my @new = ();
+ for my $thing(@_) {
+ if (!defined $thing or $thing eq '') {
+ push @new, undef;
+ }
+ else {
+ push @new, ($thing =~ /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$/) ? 1 : 0;
+ }
+ }
+ return (@_ >1) ? @new : $new[0];
+}
+
+sub neat {
+ my $v = shift;
+ return "undef" unless defined $v;
+ my $quote = q{"};
+ if (not utf8::is_utf8($v)) {
+ return $v if (($v & ~ $v) eq "0"); # is SvNIOK
+ $quote = q{'};
+ }
+ my $maxlen = shift || $DBI::neat_maxlen;
+ if ($maxlen && $maxlen < length($v) + 2) {
+ $v = substr($v,0,$maxlen-5);
+ $v .= '...';
+ }
+ $v =~ s/[^[:print:]]/./g;
+ return "$quote$v$quote";
+}
+
+sub sql_type_cast {
+ my (undef, $sql_type, $flags) = @_;
+
+ return -1 unless defined $_[0];
+
+ my $cast_ok = 1;
+
+ my $evalret = eval {
+ use warnings FATAL => qw(numeric);
+ if ($sql_type == SQL_INTEGER) {
+ my $dummy = $_[0] + 0;
+ return 1;
+ }
+ elsif ($sql_type == SQL_DOUBLE) {
+ my $dummy = $_[0] + 0.0;
+ return 1;
+ }
+ elsif ($sql_type == SQL_NUMERIC) {
+ my $dummy = $_[0] + 0.0;
+ return 1;
+ }
+ else {
+ return -2;
+ }
+ } or $^W && warn $@; # XXX warnings::warnif("numeric", $@) ?
+
+ return $evalret if defined($evalret) && ($evalret == -2);
+ $cast_ok = 0 unless $evalret;
+
+ # DBIstcf_DISCARD_STRING not supported for PurePerl currently
+
+ return 2 if $cast_ok;
+ return 0 if $flags & DBIstcf_STRICT;
+ return 1;
+}
+
+sub dbi_time {
+ return time();
+}
+
+sub DBI::st::TIEHASH { bless $_[1] => $_[0] };
+
+sub _concat_hash_sorted {
+ my ( $hash_ref, $kv_separator, $pair_separator, $use_neat, $num_sort ) = @_;
+ # $num_sort: 0=lexical, 1=numeric, undef=try to guess
+
+ return undef unless defined $hash_ref;
+ die "hash is not a hash reference" unless ref $hash_ref eq 'HASH';
+ my $keys = _get_sorted_hash_keys($hash_ref, $num_sort);
+ my $string = '';
+ for my $key (@$keys) {
+ $string .= $pair_separator if length $string > 0;
+ my $value = $hash_ref->{$key};
+ if ($use_neat) {
+ $value = DBI::neat($value, 0);
+ }
+ else {
+ $value = (defined $value) ? "'$value'" : 'undef';
+ }
+ $string .= $key . $kv_separator . $value;
+ }
+ return $string;
+}
+
+sub _get_sorted_hash_keys {
+ my ($hash_ref, $num_sort) = @_;
+ if (not defined $num_sort) {
+ my $sort_guess = 1;
+ $sort_guess = (not looks_like_number($_)) ? 0 : $sort_guess
+ for keys %$hash_ref;
+ $num_sort = $sort_guess;
+ }
+
+ my @keys = keys %$hash_ref;
+ no warnings 'numeric';
+ my @sorted = ($num_sort)
+ ? sort { $a <=> $b or $a cmp $b } @keys
+ : sort @keys;
+ return \@sorted;
+}
+
+
+
+package
+ DBI::var;
+
+sub FETCH {
+ my($key)=shift;
+ return $DBI::err if $$key eq '*err';
+ return $DBI::errstr if $$key eq '&errstr';
+ Carp::confess("FETCH $key not supported when using DBI::PurePerl");
+}
+
+package
+ DBD::_::common;
+
+sub swap_inner_handle {
+ my ($h1, $h2) = @_;
+ # can't make this work till we can get the outer handle from the inner one
+ # probably via a WeakRef
+ return $h1->set_err($DBI::stderr, "swap_inner_handle not currently supported by DBI::PurePerl");
+}
+
+sub trace { # XXX should set per-handle level, not global
+ my ($h, $level, $file) = @_;
+ $level = $h->parse_trace_flags($level)
+ if defined $level and !DBI::looks_like_number($level);
+ my $old_level = $DBI::dbi_debug;
+ DBI::_set_trace_file($file) if defined $file;
+ if (defined $level) {
+ $DBI::dbi_debug = $level;
+ if ($DBI::dbi_debug) {
+ printf $DBI::tfh
+ " %s trace level set to %d in DBI $DBI::VERSION (PurePerl)\n",
+ $h, $DBI::dbi_debug;
+ print $DBI::tfh " Full trace not available because DBI_TRACE is not in environment\n"
+ unless exists $ENV{DBI_TRACE};
+ }
+ }
+ return $old_level;
+}
+*debug = \&trace; *debug = \&trace; # twice to avoid typo warning
+
+sub FETCH {
+ my($h,$key)= @_;
+ my $v = $h->{$key};
+ #warn ((exists $h->{$key}) ? "$key=$v\n" : "$key NONEXISTANT\n");
+ return $v if defined $v;
+ if ($key =~ /^NAME_.c$/) {
+ my $cols = $h->FETCH('NAME');
+ return undef unless $cols;
+ my @lcols = map { lc $_ } @$cols;
+ $h->{NAME_lc} = \@lcols;
+ my @ucols = map { uc $_ } @$cols;
+ $h->{NAME_uc} = \@ucols;
+ return $h->FETCH($key);
+ }
+ if ($key =~ /^NAME.*_hash$/) {
+ my $i=0;
+ for my $c(@{$h->FETCH('NAME')||[]}) {
+ $h->{'NAME_hash'}->{$c} = $i;
+ $h->{'NAME_lc_hash'}->{"\L$c"} = $i;
+ $h->{'NAME_uc_hash'}->{"\U$c"} = $i;
+ $i++;
+ }
+ return $h->{$key};
+ }
+ if (!defined $v && !exists $h->{$key}) {
+ return ($h->FETCH('TaintIn') && $h->FETCH('TaintOut')) if $key eq'Taint';
+ return (1==0) if $is_flag_attribute{$key}; # return perl-style sv_no, not undef
+ return $DBI::dbi_debug if $key eq 'TraceLevel';
+ return [] if $key eq 'ChildHandles' && $HAS_WEAKEN;
+ if ($key eq 'Type') {
+ return "dr" if $h->isa('DBI::dr');
+ return "db" if $h->isa('DBI::db');
+ return "st" if $h->isa('DBI::st');
+ Carp::carp( sprintf "Can't determine Type for %s",$h );
+ }
+ if (!$is_valid_attribute{$key} and $key =~ m/^[A-Z]/) {
+ local $^W; # hide undef warnings
+ Carp::carp( sprintf "Can't get %s->{%s}: unrecognised attribute (@{[ %$h ]})",$h,$key )
+ }
+ }
+ return $v;
+}
+sub STORE {
+ my ($h,$key,$value) = @_;
+ if ($key eq 'AutoCommit') {
+ Carp::croak("DBD driver has not implemented the AutoCommit attribute")
+ unless $value == -900 || $value == -901;
+ $value = ($value == -901);
+ }
+ elsif ($key =~ /^Taint/ ) {
+ Carp::croak(sprintf "Can't set %s->{%s}: Taint mode not supported by DBI::PurePerl",$h,$key)
+ if $value;
+ }
+ elsif ($key eq 'TraceLevel') {
+ $h->trace($value);
+ return 1;
+ }
+ elsif ($key eq 'NUM_OF_FIELDS') {
+ $h->{$key} = $value;
+ if ($value) {
+ my $fbav = DBD::_::st::dbih_setup_fbav($h);
+ @$fbav = (undef) x $value if @$fbav != $value;
+ }
+ return 1;
+ }
+ elsif (!$is_valid_attribute{$key} && $key =~ /^[A-Z]/ && !exists $h->{$key}) {
+ Carp::carp(sprintf "Can't set %s->{%s}: unrecognised attribute or invalid value %s",
+ $h,$key,$value);
+ }
+ $h->{$key} = $is_flag_attribute{$key} ? !!$value : $value;
+ return 1;
+}
+sub err { return shift->{err} }
+sub errstr { return shift->{errstr} }
+sub state { return shift->{state} }
+sub set_err {
+ my ($h, $errnum,$msg,$state, $method, $rv) = @_;
+ $h = tied(%$h) || $h;
+
+ if (my $hss = $h->{HandleSetErr}) {
+ return if $hss->($h, $errnum, $msg, $state, $method);
+ }
+
+ if (!defined $errnum) {
+ $h->{err} = $DBI::err = undef;
+ $h->{errstr} = $DBI::errstr = undef;
+ $h->{state} = $DBI::state = '';
+ return;
+ }
+
+ if ($h->{errstr}) {
+ $h->{errstr} .= sprintf " [err was %s now %s]", $h->{err}, $errnum
+ if $h->{err} && $errnum && $h->{err} ne $errnum;
+ $h->{errstr} .= sprintf " [state was %s now %s]", $h->{state}, $state
+ if $h->{state} and $h->{state} ne "S1000" && $state && $h->{state} ne $state;
+ $h->{errstr} .= "\n$msg" if $h->{errstr} ne $msg;
+ $DBI::errstr = $h->{errstr};
+ }
+ else {
+ $h->{errstr} = $DBI::errstr = $msg;
+ }
+
+ # assign if higher priority: err > "0" > "" > undef
+ my $err_changed;
+ if ($errnum # new error: so assign
+ or !defined $h->{err} # no existing warn/info: so assign
+ # new warn ("0" len 1) > info ("" len 0): so assign
+ or defined $errnum && length($errnum) > length($h->{err})
+ ) {
+ $h->{err} = $DBI::err = $errnum;
+ ++$h->{ErrCount} if $errnum;
+ ++$err_changed;
+ }
+
+ if ($err_changed) {
+ $state ||= "S1000" if $DBI::err;
+ $h->{state} = $DBI::state = ($state eq "00000") ? "" : $state
+ if $state;
+ }
+
+ if (my $p = $h->{Database}) { # just sth->dbh, not dbh->drh (see ::db::DESTROY)
+ $p->{err} = $DBI::err;
+ $p->{errstr} = $DBI::errstr;
+ $p->{state} = $DBI::state;
+ }
+
+ $h->{'dbi_pp_last_method'} = $method;
+ return $rv; # usually undef
+}
+sub trace_msg {
+ my ($h, $msg, $minlevel)=@_;
+ $minlevel = 1 unless defined $minlevel;
+ return unless $minlevel <= ($DBI::dbi_debug & 0xF);
+ print $DBI::tfh $msg;
+ return 1;
+}
+sub private_data {
+ warn "private_data @_";
+}
+sub take_imp_data {
+ my $dbh = shift;
+ # A reasonable default implementation based on the one in DBI.xs.
+ # Typically a pure-perl driver would have their own take_imp_data method
+ # that would delete all but the essential items in the hash before einding with:
+ # return $dbh->SUPER::take_imp_data();
+ # Of course it's useless if the driver doesn't also implement support for
+ # the dbi_imp_data attribute to the connect() method.
+ require Storable;
+ croak("Can't take_imp_data from handle that's not Active")
+ unless $dbh->{Active};
+ for my $sth (@{ $dbh->{ChildHandles} || [] }) {
+ next unless $sth;
+ $sth->finish if $sth->{Active};
+ bless $sth, 'DBI::zombie';
+ }
+ delete $dbh->{$_} for (keys %is_valid_attribute);
+ delete $dbh->{$_} for grep { m/^dbi_/ } keys %$dbh;
+ # warn "@{[ %$dbh ]}";
+ local $Storable::forgive_me = 1; # in case there are some CODE refs
+ my $imp_data = Storable::freeze($dbh);
+ # XXX um, should probably untie here - need to check dispatch behaviour
+ return $imp_data;
+}
+sub rows {
+ return -1; # always returns -1 here, see DBD::_::st::rows below
+}
+sub DESTROY {
+}
+
+package
+ DBD::_::dr;
+
+sub dbixs_revision {
+ return 0;
+}
+
+package
+ DBD::_::db;
+
+sub connected {
+}
+
+
+package
+ DBD::_::st;
+
+sub fetchrow_arrayref {
+ my $h = shift;
+ # if we're here then driver hasn't implemented fetch/fetchrow_arrayref
+ # so we assume they've implemented fetchrow_array and call that instead
+ my @row = $h->fetchrow_array or return;
+ return $h->_set_fbav(\@row);
+}
+# twice to avoid typo warning
+*fetch = \&fetchrow_arrayref; *fetch = \&fetchrow_arrayref;
+
+sub fetchrow_array {
+ my $h = shift;
+ # if we're here then driver hasn't implemented fetchrow_array
+ # so we assume they've implemented fetch/fetchrow_arrayref
+ my $row = $h->fetch or return;
+ return @$row;
+}
+*fetchrow = \&fetchrow_array; *fetchrow = \&fetchrow_array;
+
+sub fetchrow_hashref {
+ my $h = shift;
+ my $row = $h->fetch or return;
+ my $FetchCase = shift;
+ my $FetchHashKeyName = $FetchCase || $h->{'FetchHashKeyName'} || 'NAME';
+ my $FetchHashKeys = $h->FETCH($FetchHashKeyName);
+ my %rowhash;
+ @rowhash{ @$FetchHashKeys } = @$row;
+ return \%rowhash;
+}
+sub dbih_setup_fbav {
+ my $h = shift;
+ return $h->{'_fbav'} || do {
+ $DBI::rows = $h->{'_rows'} = 0;
+ my $fields = $h->{'NUM_OF_FIELDS'}
+ or DBI::croak("NUM_OF_FIELDS not set");
+ my @row = (undef) x $fields;
+ \@row;
+ };
+}
+sub _get_fbav {
+ my $h = shift;
+ my $av = $h->{'_fbav'} ||= dbih_setup_fbav($h);
+ $DBI::rows = ++$h->{'_rows'};
+ return $av;
+}
+sub _set_fbav {
+ my $h = shift;
+ my $fbav = $h->{'_fbav'};
+ if ($fbav) {
+ $DBI::rows = ++$h->{'_rows'};
+ }
+ else {
+ $fbav = $h->_get_fbav;
+ }
+ my $row = shift;
+ if (my $bc = $h->{'_bound_cols'}) {
+ for my $i (0..@$row-1) {
+ my $bound = $bc->[$i];
+ $fbav->[$i] = ($bound) ? ($$bound = $row->[$i]) : $row->[$i];
+ }
+ }
+ else {
+ @$fbav = @$row;
+ }
+ return $fbav;
+}
+sub bind_col {
+ my ($h, $col, $value_ref,$from_bind_columns) = @_;
+ my $fbav = $h->{'_fbav'} ||= dbih_setup_fbav($h); # from _get_fbav()
+ my $num_of_fields = @$fbav;
+ DBI::croak("bind_col: column $col is not a valid column (1..$num_of_fields)")
+ if $col < 1 or $col > $num_of_fields;
+ return 1 if not defined $value_ref; # ie caller is just trying to set TYPE
+ DBI::croak("bind_col($col,$value_ref) needs a reference to a scalar")
+ unless ref $value_ref eq 'SCALAR';
+ $h->{'_bound_cols'}->[$col-1] = $value_ref;
+ return 1;
+}
+sub finish {
+ my $h = shift;
+ $h->{'_fbav'} = undef;
+ $h->{'Active'} = 0;
+ return 1;
+}
+sub rows {
+ my $h = shift;
+ my $rows = $h->{'_rows'};
+ return -1 unless defined $rows;
+ return $rows;
+}
+
+1;
+__END__
+
+=pod
+
+=head1 NAME
+
+DBI::PurePerl -- a DBI emulation using pure perl (no C/XS compilation required)
+
+=head1 SYNOPSIS
+
+ BEGIN { $ENV{DBI_PUREPERL} = 2 }
+ use DBI;
+
+=head1 DESCRIPTION
+
+This is a pure perl emulation of the DBI internals. In almost all
+cases you will be better off using standard DBI since the portions
+of the standard version written in C make it *much* faster.
+
+However, if you are in a situation where it isn't possible to install
+a compiled version of standard DBI, and you're using pure-perl DBD
+drivers, then this module allows you to use most common features
+of DBI without needing any changes in your scripts.
+
+=head1 EXPERIMENTAL STATUS
+
+DBI::PurePerl is new so please treat it as experimental pending
+more extensive testing. So far it has passed all tests with DBD::CSV,
+DBD::AnyData, DBD::XBase, DBD::Sprite, DBD::mysqlPP. Please send
+bug reports to Jeff Zucker at <jeff@vpservices.com> with a cc to
+<dbi-dev@perl.org>.
+
+=head1 USAGE
+
+The usage is the same as for standard DBI with the exception
+that you need to set the environment variable DBI_PUREPERL if
+you want to use the PurePerl version.
+
+ DBI_PUREPERL == 0 (the default) Always use compiled DBI, die
+ if it isn't properly compiled & installed
+
+ DBI_PUREPERL == 1 Use compiled DBI if it is properly compiled
+ & installed, otherwise use PurePerl
+
+ DBI_PUREPERL == 2 Always use PurePerl
+
+You may set the enviornment variable in your shell (e.g. with
+set or setenv or export, etc) or else set it in your script like
+this:
+
+ BEGIN { $ENV{DBI_PUREPERL}=2 }
+
+before you C<use DBI;>.
+
+=head1 INSTALLATION
+
+In most situations simply install DBI (see the DBI pod for details).
+
+In the situation in which you can not install DBI itself, you
+may manually copy DBI.pm and PurePerl.pm into the appropriate
+directories.
+
+For example:
+
+ cp DBI.pm /usr/jdoe/mylibs/.
+ cp PurePerl.pm /usr/jdoe/mylibs/DBI/.
+
+Then add this to the top of scripts:
+
+ BEGIN {
+ $ENV{DBI_PUREPERL} = 1; # or =2
+ unshift @INC, '/usr/jdoe/mylibs';
+ }
+
+(Or should we perhaps patch Makefile.PL so that if DBI_PUREPERL
+is set to 2 prior to make, the normal compile process is skipped
+and the files are installed automatically?)
+
+=head1 DIFFERENCES BETWEEN DBI AND DBI::PurePerl
+
+=head2 Attributes
+
+Boolean attributes still return boolean values but the actual values
+used may be different, i.e., 0 or undef instead of an empty string.
+
+Some handle attributes are either not supported or have very limited
+functionality:
+
+ ActiveKids
+ InactiveDestroy
+ AutoInactiveDestroy
+ Kids
+ Taint
+ TaintIn
+ TaintOut
+
+(and probably others)
+
+=head2 Tracing
+
+Trace functionality is more limited and the code to handle tracing is
+only embedded into DBI:PurePerl if the DBI_TRACE environment variable
+is defined. To enable total tracing you can set the DBI_TRACE
+environment variable as usual. But to enable individual handle
+tracing using the trace() method you also need to set the DBI_TRACE
+environment variable, but set it to 0.
+
+=head2 Parameter Usage Checking
+
+The DBI does some basic parameter count checking on method calls.
+DBI::PurePerl doesn't.
+
+=head2 Speed
+
+DBI::PurePerl is slower. Although, with some drivers in some
+contexts this may not be very significant for you.
+
+By way of example... the test.pl script in the DBI source
+distribution has a simple benchmark that just does:
+
+ my $null_dbh = DBI->connect('dbi:NullP:','','');
+ my $i = 10_000;
+ $null_dbh->prepare('') while $i--;
+
+In other words just prepares a statement, creating and destroying
+a statement handle, over and over again. Using the real DBI this
+runs at ~4550 handles per second whereas DBI::PurePerl manages
+~2800 per second on the same machine (not too bad really).
+
+=head2 May not fully support hash()
+
+If you want to use type 1 hash, i.e., C<hash($string,1)> with
+DBI::PurePerl, you'll need version 1.56 or higher of Math::BigInt
+(available on CPAN).
+
+=head2 Doesn't support preparse()
+
+The DBI->preparse() method isn't supported in DBI::PurePerl.
+
+=head2 Doesn't support DBD::Proxy
+
+There's a subtle problem somewhere I've not been able to identify.
+DBI::ProxyServer seem to work fine with DBI::PurePerl but DBD::Proxy
+does not work 100% (which is sad because that would be far more useful :)
+Try re-enabling t/80proxy.t for DBI::PurePerl to see if the problem
+that remains will affect you're usage.
+
+=head2 Others
+
+ can() - doesn't have any special behaviour
+
+Please let us know if you find any other differences between DBI
+and DBI::PurePerl.
+
+=head1 AUTHORS
+
+Tim Bunce and Jeff Zucker.
+
+Tim provided the direction and basis for the code. The original
+idea for the module and most of the brute force porting from C to
+Perl was by Jeff. Tim then reworked some core parts to boost the
+performance and accuracy of the emulation. Thanks also to Randal
+Schwartz and John Tobey for patches.
+
+=head1 COPYRIGHT
+
+Copyright (c) 2002 Tim Bunce Ireland.
+
+See COPYRIGHT section in DBI.pm for usage and distribution rights.
+
+=cut
diff --git a/lib/DBI/SQL/Nano.pm b/lib/DBI/SQL/Nano.pm
new file mode 100644
index 0000000..dc0711f
--- /dev/null
+++ b/lib/DBI/SQL/Nano.pm
@@ -0,0 +1,1010 @@
+#######################################################################
+#
+# DBI::SQL::Nano - a very tiny SQL engine
+#
+# Copyright (c) 2010 by Jens Rehsack < rehsack AT cpan.org >
+# Copyright (c) 2004 by Jeff Zucker < jzucker AT cpan.org >
+#
+# All rights reserved.
+#
+# You may freely distribute and/or modify this module under the terms
+# of either the GNU General Public License (GPL) or the Artistic License,
+# as specified in the Perl README file.
+#
+# See the pod at the bottom of this file for help information
+#
+#######################################################################
+
+#######################
+package DBI::SQL::Nano;
+#######################
+use strict;
+use warnings;
+use vars qw( $VERSION $versions );
+
+use Carp qw(croak);
+
+require DBI; # for looks_like_number()
+
+BEGIN
+{
+ $VERSION = sprintf( "1.%06d", q$Revision: 14600 $ =~ /(\d+)/o );
+
+ $versions->{nano_version} = $VERSION;
+ if ( $ENV{DBI_SQL_NANO} || !eval { require SQL::Statement; $SQL::Statement::VERSION ge '1.28' } )
+ {
+ @DBI::SQL::Nano::Statement::ISA = qw(DBI::SQL::Nano::Statement_);
+ @DBI::SQL::Nano::Table::ISA = qw(DBI::SQL::Nano::Table_);
+ }
+ else
+ {
+ @DBI::SQL::Nano::Statement::ISA = qw( SQL::Statement );
+ @DBI::SQL::Nano::Table::ISA = qw( SQL::Eval::Table);
+ $versions->{statement_version} = $SQL::Statement::VERSION;
+ }
+}
+
+###################################
+package DBI::SQL::Nano::Statement_;
+###################################
+
+use Carp qw(croak);
+use Errno;
+
+if ( eval { require Clone; } )
+{
+ Clone->import("clone");
+}
+else
+{
+ require Storable; # in CORE since 5.7.3
+ *clone = \&Storable::dclone;
+}
+
+sub new
+{
+ my ( $class, $sql ) = @_;
+ my $self = {};
+ bless $self, $class;
+ return $self->prepare($sql);
+}
+
+#####################################################################
+# PREPARE
+#####################################################################
+sub prepare
+{
+ my ( $self, $sql ) = @_;
+ $sql =~ s/\s+$//;
+ for ($sql)
+ {
+ /^\s*CREATE\s+TABLE\s+(.*?)\s*\((.+)\)\s*$/is
+ && do
+ {
+ $self->{command} = 'CREATE';
+ $self->{table_name} = $1;
+ $self->{column_names} = parse_coldef_list($2) if $2;
+ $self->{column_names} or croak "Can't find columns";
+ };
+ /^\s*DROP\s+TABLE\s+(IF\s+EXISTS\s+)?(.*?)\s*$/is
+ && do
+ {
+ $self->{command} = 'DROP';
+ $self->{table_name} = $2;
+ $self->{ignore_missing_table} = 1 if $1;
+ };
+ /^\s*SELECT\s+(.*?)\s+FROM\s+(\S+)((.*))?/is
+ && do
+ {
+ $self->{command} = 'SELECT';
+ $self->{column_names} = parse_comma_list($1) if $1;
+ $self->{column_names} or croak "Can't find columns";
+ $self->{table_name} = $2;
+ if ( my $clauses = $4 )
+ {
+ if ( $clauses =~ /^(.*)\s+ORDER\s+BY\s+(.*)$/is )
+ {
+ $clauses = $1;
+ $self->{order_clause} = $self->parse_order_clause($2);
+ }
+ $self->{where_clause} = $self->parse_where_clause($clauses) if ($clauses);
+ }
+ };
+ /^\s*INSERT\s+(?:INTO\s+)?(\S+)\s*(\((.*?)\))?\s*VALUES\s*\((.+)\)/is
+ && do
+ {
+ $self->{command} = 'INSERT';
+ $self->{table_name} = $1;
+ $self->{column_names} = parse_comma_list($2) if $2;
+ $self->{values} = $self->parse_values_list($4) if $4;
+ $self->{values} or croak "Can't parse values";
+ };
+ /^\s*DELETE\s+FROM\s+(\S+)((.*))?/is
+ && do
+ {
+ $self->{command} = 'DELETE';
+ $self->{table_name} = $1;
+ $self->{where_clause} = $self->parse_where_clause($3) if $3;
+ };
+ /^\s*UPDATE\s+(\S+)\s+SET\s+(.+)(\s+WHERE\s+.+)/is
+ && do
+ {
+ $self->{command} = 'UPDATE';
+ $self->{table_name} = $1;
+ $self->parse_set_clause($2) if $2;
+ $self->{where_clause} = $self->parse_where_clause($3) if $3;
+ };
+ }
+ croak "Couldn't parse" unless ( $self->{command} and $self->{table_name} );
+ return $self;
+}
+
+sub parse_order_clause
+{
+ my ( $self, $str ) = @_;
+ my @clause = split /\s+/, $str;
+ return { $clause[0] => 'ASC' } if ( @clause == 1 );
+ croak "Bad ORDER BY clause '$str'" if ( @clause > 2 );
+ $clause[1] ||= '';
+ return { $clause[0] => uc $clause[1] }
+ if $clause[1] =~ /^ASC$/i
+ or $clause[1] =~ /^DESC$/i;
+ croak "Bad ORDER BY clause '$clause[1]'";
+}
+
+sub parse_coldef_list
+{ # check column definitions
+ my @col_defs;
+ for ( split ',', shift )
+ {
+ my $col = clean_parse_str($_);
+ if ( $col =~ /^(\S+?)\s+.+/ )
+ { # doesn't check what it is
+ $col = $1; # just checks if it exists
+ }
+ else
+ {
+ croak "No column definition for '$_'";
+ }
+ push @col_defs, $col;
+ }
+ return \@col_defs;
+}
+
+sub parse_comma_list
+{
+ [ map { clean_parse_str($_) } split( ',', shift ) ];
+}
+sub clean_parse_str { local $_ = shift; s/\(//; s/\)//; s/^\s+//; s/\s+$//; $_; }
+
+sub parse_values_list
+{
+ my ( $self, $str ) = @_;
+ [ map { $self->parse_value( clean_parse_str($_) ) } split( ',', $str ) ];
+}
+
+sub parse_set_clause
+{
+ my $self = shift;
+ my @cols = split /,/, shift;
+ my $set_clause;
+ for my $col (@cols)
+ {
+ my ( $col_name, $value ) = $col =~ /^\s*(.+?)\s*=\s*(.+?)\s*$/s;
+ push @{ $self->{column_names} }, $col_name;
+ push @{ $self->{values} }, $self->parse_value($value);
+ }
+ croak "Can't parse set clause" unless ( $self->{column_names} and $self->{values} );
+}
+
+sub parse_value
+{
+ my ( $self, $str ) = @_;
+ return unless ( defined $str );
+ $str =~ s/\s+$//;
+ $str =~ s/^\s+//;
+ if ( $str =~ /^\?$/ )
+ {
+ push @{ $self->{params} }, '?';
+ return {
+ value => '?',
+ type => 'placeholder'
+ };
+ }
+ return {
+ value => undef,
+ type => 'NULL'
+ } if ( $str =~ /^NULL$/i );
+ return {
+ value => $1,
+ type => 'string'
+ } if ( $str =~ /^'(.+)'$/s );
+ return {
+ value => $str,
+ type => 'number'
+ } if ( DBI::looks_like_number($str) );
+ return {
+ value => $str,
+ type => 'column'
+ };
+}
+
+sub parse_where_clause
+{
+ my ( $self, $str ) = @_;
+ $str =~ s/\s+$//;
+ if ( $str =~ /^\s*WHERE\s+(.*)/i )
+ {
+ $str = $1;
+ }
+ else
+ {
+ croak "Couldn't find WHERE clause in '$str'";
+ }
+ my ($neg) = $str =~ s/^\s*(NOT)\s+//is;
+ my $opexp = '=|<>|<=|>=|<|>|LIKE|CLIKE|IS';
+ my ( $val1, $op, $val2 ) = $str =~ /^(.+?)\s*($opexp)\s*(.+)\s*$/iso;
+ croak "Couldn't parse WHERE expression '$str'" unless ( defined $val1 and defined $op and defined $val2 );
+ return {
+ arg1 => $self->parse_value($val1),
+ arg2 => $self->parse_value($val2),
+ op => $op,
+ neg => $neg,
+ };
+}
+
+#####################################################################
+# EXECUTE
+#####################################################################
+sub execute
+{
+ my ( $self, $data, $params ) = @_;
+ my $num_placeholders = $self->params;
+ my $num_params = scalar @$params || 0;
+ croak "Number of params '$num_params' does not match number of placeholders '$num_placeholders'"
+ unless ( $num_placeholders == $num_params );
+ if ( scalar @$params )
+ {
+ for my $i ( 0 .. $#{ $self->{values} } )
+ {
+ if ( $self->{values}->[$i]->{type} eq 'placeholder' )
+ {
+ $self->{values}->[$i]->{value} = shift @$params;
+ }
+ }
+ if ( $self->{where_clause} )
+ {
+ if ( $self->{where_clause}->{arg1}->{type} eq 'placeholder' )
+ {
+ $self->{where_clause}->{arg1}->{value} = shift @$params;
+ }
+ if ( $self->{where_clause}->{arg2}->{type} eq 'placeholder' )
+ {
+ $self->{where_clause}->{arg2}->{value} = shift @$params;
+ }
+ }
+ }
+ my $command = $self->{command};
+ ( $self->{'NUM_OF_ROWS'}, $self->{'NUM_OF_FIELDS'}, $self->{'data'}, ) = $self->$command( $data, $params );
+ $self->{NAME} ||= $self->{column_names};
+ return $self->{'NUM_OF_ROWS'} || '0E0';
+}
+
+my $enoentstr = "Cannot open .*\(" . Errno::ENOENT . "\)";
+my $enoentrx = qr/$enoentstr/;
+
+sub DROP ($$$)
+{
+ my ( $self, $data, $params ) = @_;
+
+ my $table;
+ my @err;
+ eval {
+ local $SIG{__WARN__} = sub { push @err, @_ };
+ ($table) = $self->open_tables( $data, 0, 1 );
+ };
+ if ( $self->{ignore_missing_table} and ( $@ or @err ) and grep { $_ =~ $enoentrx } ( @err, $@ ) )
+ {
+ $@ = '';
+ return ( -1, 0 );
+ }
+
+ croak( $@ || $err[0] ) if ( $@ || @err );
+ return ( -1, 0 ) unless $table;
+
+ $table->drop($data);
+ ( -1, 0 );
+}
+
+sub CREATE ($$$)
+{
+ my ( $self, $data, $params ) = @_;
+ my $table = $self->open_tables( $data, 1, 1 );
+ $table->push_names( $data, $self->{column_names} );
+ ( 0, 0 );
+}
+
+sub INSERT ($$$)
+{
+ my ( $self, $data, $params ) = @_;
+ my $table = $self->open_tables( $data, 0, 1 );
+ $self->verify_columns($table);
+ $table->seek( $data, 0, 2 ) unless ( $table->can('insert_one_row') );
+ my ($array) = [];
+ my ( $val, $col, $i );
+ $self->{column_names} = $table->col_names() unless ( $self->{column_names} );
+ my $cNum = scalar( @{ $self->{column_names} } ) if ( $self->{column_names} );
+ my $param_num = 0;
+
+ if ($cNum)
+ {
+ for ( $i = 0; $i < $cNum; $i++ )
+ {
+ $col = $self->{column_names}->[$i];
+ $array->[ $self->column_nums( $table, $col ) ] = $self->row_values($i);
+ }
+ }
+ else
+ {
+ croak "Bad col names in INSERT";
+ }
+
+ $table->can('insert_new_row') ? $table->insert_new_row( $data, $array ) : $table->push_row( $data, $array );
+
+ return ( 1, 0 );
+}
+
+sub DELETE ($$$)
+{
+ my ( $self, $data, $params ) = @_;
+ my $table = $self->open_tables( $data, 0, 1 );
+ $self->verify_columns($table);
+ my ($affected) = 0;
+ my ( @rows, $array );
+ my $can_dor = $table->can('delete_one_row');
+ while ( $array = $table->fetch_row($data) )
+ {
+ if ( $self->eval_where( $table, $array ) )
+ {
+ ++$affected;
+ if ( $self->{fetched_from_key} )
+ {
+ $array = $self->{fetched_value};
+ $table->delete_one_row( $data, $array );
+ return ( $affected, 0 );
+ }
+ push( @rows, $array ) if ($can_dor);
+ }
+ else
+ {
+ push( @rows, $array ) unless ($can_dor);
+ }
+ }
+ if ($can_dor)
+ {
+ foreach $array (@rows)
+ {
+ $table->delete_one_row( $data, $array );
+ }
+ }
+ else
+ {
+ $table->seek( $data, 0, 0 );
+ foreach $array (@rows)
+ {
+ $table->push_row( $data, $array );
+ }
+ $table->truncate($data);
+ }
+ return ( $affected, 0 );
+}
+
+sub _anycmp($$;$)
+{
+ my ( $a, $b, $case_fold ) = @_;
+
+ if ( !defined($a) || !defined($b) )
+ {
+ return defined($a) - defined($b);
+ }
+ elsif ( DBI::looks_like_number($a) && DBI::looks_like_number($b) )
+ {
+ return $a <=> $b;
+ }
+ else
+ {
+ return $case_fold ? lc($a) cmp lc($b) || $a cmp $b : $a cmp $b;
+ }
+}
+
+sub SELECT ($$$)
+{
+ my ( $self, $data, $params ) = @_;
+ my $table = $self->open_tables( $data, 0, 0 );
+ $self->verify_columns($table);
+ my $tname = $self->{table_name};
+ my ($affected) = 0;
+ my ( @rows, %cols, $array, $val, $col, $i );
+ while ( $array = $table->fetch_row($data) )
+ {
+ if ( $self->eval_where( $table, $array ) )
+ {
+ $array = $self->{fetched_value} if ( $self->{fetched_from_key} );
+ unless ( keys %cols )
+ {
+ my $col_nums = $self->column_nums($table);
+ %cols = reverse %{$col_nums};
+ }
+
+ my $rowhash;
+ for ( sort keys %cols )
+ {
+ $rowhash->{ $cols{$_} } = $array->[$_];
+ }
+ my @newarray;
+ for ( $i = 0; $i < @{ $self->{column_names} }; $i++ )
+ {
+ $col = $self->{column_names}->[$i];
+ push @newarray, $rowhash->{$col};
+ }
+ push( @rows, \@newarray );
+ return ( scalar(@rows), scalar @{ $self->{column_names} }, \@rows )
+ if ( $self->{fetched_from_key} );
+ }
+ }
+ if ( $self->{order_clause} )
+ {
+ my ( $sort_col, $desc ) = each %{ $self->{order_clause} };
+ my @sortCols = ( $self->column_nums( $table, $sort_col, 1 ) );
+ $sortCols[1] = uc $desc eq 'DESC' ? 1 : 0;
+
+ @rows = sort {
+ my ( $result, $colNum, $desc );
+ my $i = 0;
+ do
+ {
+ $colNum = $sortCols[ $i++ ];
+ $desc = $sortCols[ $i++ ];
+ $result = _anycmp( $a->[$colNum], $b->[$colNum] );
+ $result = -$result if ($desc);
+ } while ( !$result && $i < @sortCols );
+ $result;
+ } @rows;
+ }
+ ( scalar(@rows), scalar @{ $self->{column_names} }, \@rows );
+}
+
+sub UPDATE ($$$)
+{
+ my ( $self, $data, $params ) = @_;
+ my $table = $self->open_tables( $data, 0, 1 );
+ $self->verify_columns($table);
+ return undef unless $table;
+ my $affected = 0;
+ my $can_usr = $table->can('update_specific_row');
+ my $can_uor = $table->can('update_one_row');
+ my $can_rwu = $can_usr || $can_uor;
+ my ( @rows, $array, $f_array, $val, $col, $i );
+
+ while ( $array = $table->fetch_row($data) )
+ {
+ if ( $self->eval_where( $table, $array ) )
+ {
+ $array = $self->{fetched_value} if ( $self->{fetched_from_key} and $can_rwu );
+ my $orig_ary = clone($array) if ($can_usr);
+ for ( $i = 0; $i < @{ $self->{column_names} }; $i++ )
+ {
+ $col = $self->{column_names}->[$i];
+ $array->[ $self->column_nums( $table, $col ) ] = $self->row_values($i);
+ }
+ $affected++;
+ if ( $self->{fetched_value} )
+ {
+ if ($can_usr)
+ {
+ $table->update_specific_row( $data, $array, $orig_ary );
+ }
+ elsif ($can_uor)
+ {
+ $table->update_one_row( $data, $array );
+ }
+ return ( $affected, 0 );
+ }
+ push( @rows, $can_usr ? [ $array, $orig_ary ] : $array );
+ }
+ else
+ {
+ push( @rows, $array ) unless ($can_rwu);
+ }
+ }
+ if ($can_rwu)
+ {
+ foreach my $array (@rows)
+ {
+ if ($can_usr)
+ {
+ $table->update_specific_row( $data, @$array );
+ }
+ elsif ($can_uor)
+ {
+ $table->update_one_row( $data, $array );
+ }
+ }
+ }
+ else
+ {
+ $table->seek( $data, 0, 0 );
+ foreach my $array (@rows)
+ {
+ $table->push_row( $data, $array );
+ }
+ $table->truncate($data);
+ }
+
+ return ( $affected, 0 );
+}
+
+sub verify_columns
+{
+ my ( $self, $table ) = @_;
+ my @cols = @{ $self->{column_names} };
+ if ( $self->{where_clause} )
+ {
+ if ( my $col = $self->{where_clause}->{arg1} )
+ {
+ push @cols, $col->{value} if $col->{type} eq 'column';
+ }
+ if ( my $col = $self->{where_clause}->{arg2} )
+ {
+ push @cols, $col->{value} if $col->{type} eq 'column';
+ }
+ }
+ for (@cols)
+ {
+ $self->column_nums( $table, $_ );
+ }
+}
+
+sub column_nums
+{
+ my ( $self, $table, $stmt_col_name, $find_in_stmt ) = @_;
+ my %dbd_nums = %{ $table->col_nums() };
+ my @dbd_cols = @{ $table->col_names() };
+ my %stmt_nums;
+ if ( $stmt_col_name and !$find_in_stmt )
+ {
+ while ( my ( $k, $v ) = each %dbd_nums )
+ {
+ return $v if uc $k eq uc $stmt_col_name;
+ }
+ croak "No such column '$stmt_col_name'";
+ }
+ if ( $stmt_col_name and $find_in_stmt )
+ {
+ for my $i ( 0 .. @{ $self->{column_names} } )
+ {
+ return $i if uc $stmt_col_name eq uc $self->{column_names}->[$i];
+ }
+ croak "No such column '$stmt_col_name'";
+ }
+ for my $i ( 0 .. $#dbd_cols )
+ {
+ for my $stmt_col ( @{ $self->{column_names} } )
+ {
+ $stmt_nums{$stmt_col} = $i if uc $dbd_cols[$i] eq uc $stmt_col;
+ }
+ }
+ return \%stmt_nums;
+}
+
+sub eval_where
+{
+ my ( $self, $table, $rowary ) = @_;
+ my $where = $self->{"where_clause"} || return 1;
+ my $col_nums = $table->col_nums();
+ my %cols = reverse %{$col_nums};
+ my $rowhash;
+ for ( sort keys %cols )
+ {
+ $rowhash->{ uc $cols{$_} } = $rowary->[$_];
+ }
+ return $self->process_predicate( $where, $table, $rowhash );
+}
+
+sub process_predicate
+{
+ my ( $self, $pred, $table, $rowhash ) = @_;
+ my $val1 = $pred->{arg1};
+ if ( $val1->{type} eq 'column' )
+ {
+ $val1 = $rowhash->{ uc $val1->{value} };
+ }
+ else
+ {
+ $val1 = $val1->{value};
+ }
+ my $val2 = $pred->{arg2};
+ if ( $val2->{type} eq 'column' )
+ {
+ $val2 = $rowhash->{ uc $val2->{value} };
+ }
+ else
+ {
+ $val2 = $val2->{value};
+ }
+ my $op = $pred->{op};
+ my $neg = $pred->{neg};
+ if ( $op eq '=' and !$neg and $table->can('fetch_one_row') )
+ {
+ my $key_col = $table->fetch_one_row( 1, 1 );
+ if ( $pred->{arg1}->{value} =~ /^$key_col$/i )
+ {
+ $self->{fetched_from_key} = 1;
+ $self->{fetched_value} = $table->fetch_one_row( 0, $pred->{arg2}->{value} );
+ return 1;
+ }
+ }
+ my $match = $self->is_matched( $val1, $op, $val2 ) || 0;
+ if ($neg) { $match = $match ? 0 : 1; }
+ return $match;
+}
+
+sub is_matched
+{
+ my ( $self, $val1, $op, $val2 ) = @_;
+ if ( $op eq 'IS' )
+ {
+ return 1 if ( !defined $val1 or $val1 eq '' );
+ return 0;
+ }
+ $val1 = '' unless ( defined $val1 );
+ $val2 = '' unless ( defined $val2 );
+ if ( $op =~ /LIKE|CLIKE/i )
+ {
+ $val2 = quotemeta($val2);
+ $val2 =~ s/\\%/.*/g;
+ $val2 =~ s/_/./g;
+ }
+ if ( $op eq 'LIKE' ) { return $val1 =~ /^$val2$/s; }
+ if ( $op eq 'CLIKE' ) { return $val1 =~ /^$val2$/si; }
+ if ( DBI::looks_like_number($val1) && DBI::looks_like_number($val2) )
+ {
+ if ( $op eq '<' ) { return $val1 < $val2; }
+ if ( $op eq '>' ) { return $val1 > $val2; }
+ if ( $op eq '=' ) { return $val1 == $val2; }
+ if ( $op eq '<>' ) { return $val1 != $val2; }
+ if ( $op eq '<=' ) { return $val1 <= $val2; }
+ if ( $op eq '>=' ) { return $val1 >= $val2; }
+ }
+ else
+ {
+ if ( $op eq '<' ) { return $val1 lt $val2; }
+ if ( $op eq '>' ) { return $val1 gt $val2; }
+ if ( $op eq '=' ) { return $val1 eq $val2; }
+ if ( $op eq '<>' ) { return $val1 ne $val2; }
+ if ( $op eq '<=' ) { return $val1 ge $val2; }
+ if ( $op eq '>=' ) { return $val1 le $val2; }
+ }
+}
+
+sub params
+{
+ my ( $self, $val_num ) = @_;
+ if ( !$self->{"params"} ) { return 0; }
+ if ( defined $val_num )
+ {
+ return $self->{"params"}->[$val_num];
+ }
+ if (wantarray)
+ {
+ return @{ $self->{"params"} };
+ }
+ else
+ {
+ return scalar @{ $self->{"params"} };
+ }
+
+}
+
+sub open_tables
+{
+ my ( $self, $data, $createMode, $lockMode ) = @_;
+ my $table_name = $self->{table_name};
+ my $table;
+ eval { $table = $self->open_table( $data, $table_name, $createMode, $lockMode ) };
+ if ($@)
+ {
+ chomp $@;
+ croak $@;
+ }
+ croak "Couldn't open table '$table_name'" unless $table;
+ if ( !$self->{column_names} or $self->{column_names}->[0] eq '*' )
+ {
+ $self->{column_names} = $table->col_names();
+ }
+ return $table;
+}
+
+sub row_values
+{
+ my ( $self, $val_num ) = @_;
+ if ( !$self->{"values"} ) { return 0; }
+ if ( defined $val_num )
+ {
+ return $self->{"values"}->[$val_num]->{value};
+ }
+ if (wantarray)
+ {
+ return map { $_->{"value"} } @{ $self->{"values"} };
+ }
+ else
+ {
+ return scalar @{ $self->{"values"} };
+ }
+}
+
+sub column_names
+{
+ my ($self) = @_;
+ my @col_names;
+ if ( $self->{column_names} and $self->{column_names}->[0] ne '*' )
+ {
+ @col_names = @{ $self->{column_names} };
+ }
+ return @col_names;
+}
+
+###############################
+package DBI::SQL::Nano::Table_;
+###############################
+
+use Carp qw(croak);
+
+sub new ($$)
+{
+ my ( $proto, $attr ) = @_;
+ my ($self) = {%$attr};
+
+ defined( $self->{col_names} ) and "ARRAY" eq ref( $self->{col_names} )
+ or croak("attribute 'col_names' must be defined as an array");
+ exists( $self->{col_nums} ) or $self->{col_nums} = _map_colnums( $self->{col_names} );
+ defined( $self->{col_nums} ) and "HASH" eq ref( $self->{col_nums} )
+ or croak("attribute 'col_nums' must be defined as a hash");
+
+ bless( $self, ( ref($proto) || $proto ) );
+ return $self;
+}
+
+sub _map_colnums
+{
+ my $col_names = $_[0];
+ my %col_nums;
+ for my $i ( 0 .. $#$col_names )
+ {
+ next unless $col_names->[$i];
+ $col_nums{ $col_names->[$i] } = $i;
+ }
+ return \%col_nums;
+}
+
+sub row() { return $_[0]->{row}; }
+sub column($) { return $_[0]->{row}->[ $_[0]->column_num( $_[1] ) ]; }
+sub column_num($) { $_[0]->{col_nums}->{ $_[1] }; }
+sub col_nums() { $_[0]->{col_nums} }
+sub col_names() { $_[0]->{col_names}; }
+
+sub drop ($$) { croak "Abstract method " . ref( $_[0] ) . "::drop called" }
+sub fetch_row ($$$) { croak "Abstract method " . ref( $_[0] ) . "::fetch_row called" }
+sub push_row ($$$) { croak "Abstract method " . ref( $_[0] ) . "::push_row called" }
+sub push_names ($$$) { croak "Abstract method " . ref( $_[0] ) . "::push_names called" }
+sub truncate ($$) { croak "Abstract method " . ref( $_[0] ) . "::truncate called" }
+sub seek ($$$$) { croak "Abstract method " . ref( $_[0] ) . "::seek called" }
+
+1;
+__END__
+
+=pod
+
+=head1 NAME
+
+DBI::SQL::Nano - a very tiny SQL engine
+
+=head1 SYNOPSIS
+
+ BEGIN { $ENV{DBI_SQL_NANO}=1 } # forces use of Nano rather than SQL::Statement
+ use DBI::SQL::Nano;
+ use Data::Dumper;
+ my $stmt = DBI::SQL::Nano::Statement->new(
+ "SELECT bar,baz FROM foo WHERE qux = 1"
+ ) or die "Couldn't parse";
+ print Dumper $stmt;
+
+=head1 DESCRIPTION
+
+C<< DBI::SQL::Nano >> is meant as a I<very> minimal SQL engine for use in
+situations where SQL::Statement is not available. In most situations you are
+better off installing L<SQL::Statement> although DBI::SQL::Nano may be faster
+for some B<very> simple tasks.
+
+DBI::SQL::Nano, like SQL::Statement is primarily intended to provide a SQL
+engine for use with some pure perl DBDs including L<DBD::DBM>, L<DBD::CSV>,
+L<DBD::AnyData>, and L<DBD::Excel>. It is not of much use in and of itself.
+You can dump out the structure of a parsed SQL statement, but that is about
+it.
+
+=head1 USAGE
+
+=head2 Setting the DBI_SQL_NANO flag
+
+By default, when a C<< DBD >> uses C<< DBI::SQL::Nano >>, the module will
+look to see if C<< SQL::Statement >> is installed. If it is, SQL::Statement
+objects are used. If SQL::Statement is not available, DBI::SQL::Nano
+objects are used.
+
+In some cases, you may wish to use DBI::SQL::Nano objects even if
+SQL::Statement is available. To force usage of DBI::SQL::Nano objects
+regardless of the availability of SQL::Statement, set the environment
+variable DBI_SQL_NANO to 1.
+
+You can set the environment variable in your shell prior to running your
+script (with SET or EXPORT or whatever), or else you can set it in your
+script by putting this at the top of the script:
+
+ BEGIN { $ENV{DBI_SQL_NANO} = 1 }
+
+=head2 Supported SQL syntax
+
+ Here's a pseudo-BNF. Square brackets [] indicate optional items;
+ Angle brackets <> indicate items defined elsewhere in the BNF.
+
+ statement ::=
+ DROP TABLE [IF EXISTS] <table_name>
+ | CREATE TABLE <table_name> <col_def_list>
+ | INSERT INTO <table_name> [<insert_col_list>] VALUES <val_list>
+ | DELETE FROM <table_name> [<where_clause>]
+ | UPDATE <table_name> SET <set_clause> <where_clause>
+ | SELECT <select_col_list> FROM <table_name> [<where_clause>]
+ [<order_clause>]
+
+ the optional IF EXISTS clause ::=
+ * similar to MySQL - prevents errors when trying to drop
+ a table that doesn't exist
+
+ identifiers ::=
+ * table and column names should be valid SQL identifiers
+ * especially avoid using spaces and commas in identifiers
+ * note: there is no error checking for invalid names, some
+ will be accepted, others will cause parse failures
+
+ table_name ::=
+ * only one table (no multiple table operations)
+ * see identifier for valid table names
+
+ col_def_list ::=
+ * a parens delimited, comma-separated list of column names
+ * see identifier for valid column names
+ * column types and column constraints may be included but are ignored
+ e.g. these are all the same:
+ (id,phrase)
+ (id INT, phrase VARCHAR(40))
+ (id INT PRIMARY KEY, phrase VARCHAR(40) NOT NULL)
+ * you are *strongly* advised to put in column types even though
+ they are ignored ... it increases portability
+
+ insert_col_list ::=
+ * a parens delimited, comma-separated list of column names
+ * as in standard SQL, this is optional
+
+ select_col_list ::=
+ * a comma-separated list of column names
+ * or an asterisk denoting all columns
+
+ val_list ::=
+ * a parens delimited, comma-separated list of values which can be:
+ * placeholders (an unquoted question mark)
+ * numbers (unquoted numbers)
+ * column names (unquoted strings)
+ * nulls (unquoted word NULL)
+ * strings (delimited with single quote marks);
+ * note: leading and trailing percent mark (%) and underscore (_)
+ can be used as wildcards in quoted strings for use with
+ the LIKE and CLIKE operators
+ * note: escaped single quotation marks within strings are not
+ supported, neither are embedded commas, use placeholders instead
+
+ set_clause ::=
+ * a comma-separated list of column = value pairs
+ * see val_list for acceptable value formats
+
+ where_clause ::=
+ * a single "column/value <op> column/value" predicate, optionally
+ preceded by "NOT"
+ * note: multiple predicates combined with ORs or ANDs are not supported
+ * see val_list for acceptable value formats
+ * op may be one of:
+ < > >= <= = <> LIKE CLIKE IS
+ * CLIKE is a case insensitive LIKE
+
+ order_clause ::= column_name [ASC|DESC]
+ * a single column optional ORDER BY clause is supported
+ * as in standard SQL, if neither ASC (ascending) nor
+ DESC (descending) is specified, ASC becomes the default
+
+=head1 TABLES
+
+DBI::SQL::Nano::Statement operates on exactly one table. This table will be
+opened by inherit from DBI::SQL::Nano::Statement and implements the
+C<< open_table >> method.
+
+ sub open_table ($$$$$)
+ {
+ ...
+ return Your::Table->new( \%attributes );
+ }
+
+DBI::SQL::Nano::Statement_ expects a rudimentary interface is implemented by
+the table object, as well as SQL::Statement expects.
+
+ package Your::Table;
+
+ use vars qw(@ISA);
+ @ISA = qw(DBI::SQL::Nano::Table);
+
+ sub drop ($$) { ... }
+ sub fetch_row ($$$) { ... }
+ sub push_row ($$$) { ... }
+ sub push_names ($$$) { ... }
+ sub truncate ($$) { ... }
+ sub seek ($$$$) { ... }
+
+The base class interfaces are provided by DBI::SQL::Nano::Table_ in case of
+relying on DBI::SQL::Nano or SQL::Eval::Table (see L<SQL::Eval> for details)
+otherwise.
+
+=head1 BUGS AND LIMITATIONS
+
+There are no known bugs in DBI::SQL::Nano::Statement. If you find a one
+and want to report, please see L<DBI> for how to report bugs.
+
+DBI::SQL::Nano::Statement is designed to provide a minimal subset for
+executing SQL statements.
+
+The most important limitation might be the restriction on one table per
+statement. This implies, that no JOINs are supported and there cannot be
+any foreign key relation between tables.
+
+The where clause evaluation of DBI::SQL::Nano::Statement is very slow
+(SQL::Statement uses a precompiled evaluation).
+
+INSERT can handle only one row per statement. To insert multiple rows,
+use placeholders as explained in DBI.
+
+The DBI::SQL::Nano parser is very limited and does not support any
+additional syntax such as brackets, comments, functions, aggregations
+etc.
+
+In contrast to SQL::Statement, temporary tables are not supported.
+
+=head1 ACKNOWLEDGEMENTS
+
+Tim Bunce provided the original idea for this module, helped me out of the
+tangled trap of namespaces, and provided help and advice all along the way.
+Although I wrote it from the ground up, it is based on Jochen Wiedmann's
+original design of SQL::Statement, so much of the credit for the API goes
+to him.
+
+=head1 AUTHOR AND COPYRIGHT
+
+This module is originally written by Jeff Zucker < jzucker AT cpan.org >
+
+This module is currently maintained by Jens Rehsack < jrehsack AT cpan.org >
+
+Copyright (C) 2010 by Jens Rehsack, all rights reserved.
+Copyright (C) 2004 by Jeff Zucker, all rights reserved.
+
+You may freely distribute and/or modify this module under the terms of
+either the GNU General Public License (GPL) or the Artistic License,
+as specified in the Perl README file.
+
+=cut
+
diff --git a/lib/DBI/Util/CacheMemory.pm b/lib/DBI/Util/CacheMemory.pm
new file mode 100644
index 0000000..f111432
--- /dev/null
+++ b/lib/DBI/Util/CacheMemory.pm
@@ -0,0 +1,117 @@
+package DBI::Util::CacheMemory;
+
+# $Id: CacheMemory.pm 10314 2007-11-26 22:25:33Z timbo $
+#
+# Copyright (c) 2007, Tim Bunce, Ireland
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+use strict;
+use warnings;
+
+=head1 NAME
+
+DBI::Util::CacheMemory - a very fast but very minimal subset of Cache::Memory
+
+=head1 DESCRIPTION
+
+Like Cache::Memory (part of the Cache distribution) but doesn't support any fancy features.
+
+This module aims to be a very fast compatible strict sub-set for simple cases,
+such as basic client-side caching for DBD::Gofer.
+
+Like Cache::Memory, and other caches in the Cache and Cache::Cache
+distributions, the data will remain in the cache until cleared, it expires,
+or the process dies. The cache object simply going out of scope will I<not>
+destroy the data.
+
+=head1 METHODS WITH CHANGES
+
+=head2 new
+
+All options except C<namespace> are ignored.
+
+=head2 set
+
+Doesn't support expiry.
+
+=head2 purge
+
+Same as clear() - deletes everything in the namespace.
+
+=head1 METHODS WITHOUT CHANGES
+
+=over
+
+=item clear
+
+=item count
+
+=item exists
+
+=item remove
+
+=back
+
+=head1 UNSUPPORTED METHODS
+
+If it's not listed above, it's not supported.
+
+=cut
+
+our $VERSION = sprintf("0.%06d", q$Revision: 10314 $ =~ /(\d+)/o);
+
+my %cache;
+
+sub new {
+ my ($class, %options ) = @_;
+ my $namespace = $options{namespace} ||= 'Default';
+ #$options{_cache} = \%cache; # can be handy for debugging/dumping
+ my $self = bless \%options => $class;
+ $cache{ $namespace } ||= {}; # init - ensure it exists
+ return $self;
+}
+
+sub set {
+ my ($self, $key, $value) = @_;
+ $cache{ $self->{namespace} }->{$key} = $value;
+}
+
+sub get {
+ my ($self, $key) = @_;
+ return $cache{ $self->{namespace} }->{$key};
+}
+
+sub exists {
+ my ($self, $key) = @_;
+ return exists $cache{ $self->{namespace} }->{$key};
+}
+
+sub remove {
+ my ($self, $key) = @_;
+ return delete $cache{ $self->{namespace} }->{$key};
+}
+
+sub purge {
+ return shift->clear;
+}
+
+sub clear {
+ $cache{ shift->{namespace} } = {};
+}
+
+sub count {
+ return scalar keys %{ $cache{ shift->{namespace} } };
+}
+
+sub size {
+ my $c = $cache{ shift->{namespace} };
+ my $size = 0;
+ while ( my ($k,$v) = each %$c ) {
+ $size += length($k) + length($v);
+ }
+ return $size;
+}
+
+1;
diff --git a/lib/DBI/Util/_accessor.pm b/lib/DBI/Util/_accessor.pm
new file mode 100644
index 0000000..7836ebe
--- /dev/null
+++ b/lib/DBI/Util/_accessor.pm
@@ -0,0 +1,65 @@
+package DBI::Util::_accessor;
+use strict;
+use Carp;
+our $VERSION = sprintf("0.%06d", q$Revision: 9478 $ =~ /(\d+)/);
+
+# inspired by Class::Accessor::Fast
+
+sub new {
+ my($proto, $fields) = @_;
+ my($class) = ref $proto || $proto;
+ $fields ||= {};
+
+ my @dubious = grep { !m/^_/ && !$proto->can($_) } keys %$fields;
+ carp "$class doesn't have accessors for fields: @dubious" if @dubious;
+
+ # make a (shallow) copy of $fields.
+ bless {%$fields}, $class;
+}
+
+sub mk_accessors {
+ my($self, @fields) = @_;
+ $self->mk_accessors_using('make_accessor', @fields);
+}
+
+sub mk_accessors_using {
+ my($self, $maker, @fields) = @_;
+ my $class = ref $self || $self;
+
+ # So we don't have to do lots of lookups inside the loop.
+ $maker = $self->can($maker) unless ref $maker;
+
+ no strict 'refs';
+ foreach my $field (@fields) {
+ my $accessor = $self->$maker($field);
+ *{$class."\:\:$field"} = $accessor
+ unless defined &{$class."\:\:$field"};
+ }
+ #my $hash_ref = \%{$class."\:\:_accessors_hash};
+ #$hash_ref->{$_}++ for @fields;
+ # XXX also copy down _accessors_hash of base class(es)
+ # so one in this class is complete
+ return;
+}
+
+sub make_accessor {
+ my($class, $field) = @_;
+ return sub {
+ my $self = shift;
+ return $self->{$field} unless @_;
+ croak "Too many arguments to $field" if @_ > 1;
+ return $self->{$field} = shift;
+ };
+}
+
+sub make_accessor_autoviv_hashref {
+ my($class, $field) = @_;
+ return sub {
+ my $self = shift;
+ return $self->{$field} ||= {} unless @_;
+ croak "Too many arguments to $field" if @_ > 1;
+ return $self->{$field} = shift;
+ };
+}
+
+1;
diff --git a/lib/DBI/W32ODBC.pm b/lib/DBI/W32ODBC.pm
new file mode 100644
index 0000000..ac2aea1
--- /dev/null
+++ b/lib/DBI/W32ODBC.pm
@@ -0,0 +1,181 @@
+package
+ DBI; # hide this non-DBI package from simple indexers
+
+# $Id: W32ODBC.pm 8696 2007-01-24 23:12:38Z timbo $
+#
+# Copyright (c) 1997,1999 Tim Bunce
+# With many thanks to Patrick Hollins for polishing.
+#
+# You may distribute under the terms of either the GNU General Public
+# License or the Artistic License, as specified in the Perl README file.
+
+=head1 NAME
+
+DBI::W32ODBC - An experimental DBI emulation layer for Win32::ODBC
+
+=head1 SYNOPSIS
+
+ use DBI::W32ODBC;
+
+ # apart from the line above everything is just the same as with
+ # the real DBI when using a basic driver with few features.
+
+=head1 DESCRIPTION
+
+This is an experimental pure perl DBI emulation layer for Win32::ODBC
+
+If you can improve this code I'd be interested in hearing about it. If
+you are having trouble using it please respect the fact that it's very
+experimental. Ideally fix it yourself and send me the details.
+
+=head2 Some Things Not Yet Implemented
+
+ Most attributes including PrintError & RaiseError.
+ type_info and table_info
+
+Volunteers welcome!
+
+=cut
+
+${'DBI::VERSION'} # hide version from PAUSE indexer
+ = "0.01";
+
+my $Revision = sprintf("12.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
+
+
+sub DBI::W32ODBC::import { } # must trick here since we're called DBI/W32ODBC.pm
+
+
+use Carp;
+
+use Win32::ODBC;
+
+@ISA = qw(Win32::ODBC);
+
+use strict;
+
+$DBI::dbi_debug = $ENV{PERL_DBI_DEBUG} || 0;
+carp "Loaded (W32ODBC) DBI.pm ${'DBI::VERSION'} (debug $DBI::dbi_debug)"
+ if $DBI::dbi_debug;
+
+
+
+sub connect {
+ my ($class, $dbname, $dbuser, $dbpasswd, $module, $attr) = @_;
+ $dbname .= ";UID=$dbuser" if $dbuser;
+ $dbname .= ";PWD=$dbpasswd" if $dbpasswd;
+ my $h = new Win32::ODBC $dbname;
+ warn "Error connecting to $dbname: ".Win32::ODBC::Error()."\n" unless $h;
+ bless $h, $class if $h; # rebless into our class
+ $h;
+}
+
+
+sub quote {
+ my ($h, $string) = @_;
+ return "NULL" if !defined $string;
+ $string =~ s/'/''/g; # standard
+ # This hack seems to be required for Access but probably breaks for
+ # other databases when using \r and \n. It would be better if we could
+ # use ODBC options to detect that we're actually using Access.
+ $string =~ s/\r/' & chr\$(13) & '/g;
+ $string =~ s/\n/' & chr\$(10) & '/g;
+ "'$string'";
+}
+
+sub do {
+ my($h, $statement, $attribs, @params) = @_;
+ Carp::carp "\$h->do() attribs unused" if $attribs;
+ my $new_h = $h->prepare($statement) or return undef; ##
+ pop @{ $h->{'___sths'} }; ## certian death assured
+ $new_h->execute(@params) or return undef; ##
+ my $rows = $new_h->rows; ##
+ $new_h->finish; ## bang bang
+ ($rows == 0) ? "0E0" : $rows;
+}
+
+# ---
+
+sub prepare {
+ my ($h, $sql) = @_;
+ ## opens a new connection with every prepare to allow
+ ## multiple, concurrent queries
+ my $new_h = new Win32::ODBC $h->{DSN}; ##
+ return undef if not $new_h; ## bail if no connection
+ bless $new_h; ## shouldn't be sub-classed...
+ $new_h->{'__prepare'} = $sql; ##
+ $new_h->{NAME} = []; ##
+ $new_h->{NUM_OF_FIELDS} = -1; ##
+ push @{ $h->{'___sths'} } ,$new_h; ## save sth in parent for mass destruction
+ return $new_h; ##
+}
+
+sub execute {
+ my ($h) = @_;
+ my $rc = $h->Sql($h->{'__prepare'});
+ return undef if $rc;
+ my @fields = $h->FieldNames;
+ $h->{NAME} = \@fields;
+ $h->{NUM_OF_FIELDS} = scalar @fields;
+ $h; # return dbh as pseudo sth
+}
+
+
+sub fetchrow_hashref { ## provide DBI compatibility
+ my $h = shift;
+ my $NAME = shift || "NAME";
+ my $row = $h->fetchrow_arrayref or return undef;
+ my %hash;
+ @hash{ @{ $h->{$NAME} } } = @$row;
+ return \%hash;
+}
+
+sub fetchrow {
+ my $h = shift;
+ return unless $h->FetchRow();
+ my $fields_r = $h->{NAME};
+ return $h->Data(@$fields_r);
+}
+sub fetch {
+ my @row = shift->fetchrow;
+ return undef unless @row;
+ return \@row;
+}
+*fetchrow_arrayref = \&fetch; ## provide DBI compatibility
+*fetchrow_array = \&fetchrow; ## provide DBI compatibility
+
+sub rows {
+ shift->RowCount;
+}
+
+sub finish {
+ shift->Close; ## uncommented this line
+}
+
+# ---
+
+sub commit {
+ shift->Transact(ODBC::SQL_COMMIT);
+}
+sub rollback {
+ shift->Transact(ODBC::SQL_ROLLBACK);
+}
+
+sub disconnect {
+ my ($h) = shift; ## this will kill all the statement handles
+ foreach (@{$h->{'___sths'}}) { ## created for a specific connection
+ $_->Close if $_->{DSN}; ##
+ } ##
+ $h->Close; ##
+}
+
+sub err {
+ (shift->Error)[0];
+}
+sub errstr {
+ scalar( shift->Error );
+}
+
+# ---
+
+1;
diff --git a/lib/Win32/DBIODBC.pm b/lib/Win32/DBIODBC.pm
new file mode 100644
index 0000000..a93f69b
--- /dev/null
+++ b/lib/Win32/DBIODBC.pm
@@ -0,0 +1,248 @@
+package # hide this package from CPAN indexer
+ Win32::ODBC;
+
+#use strict;
+
+use DBI;
+
+# once we've been loaded we don't want perl to load the real Win32::ODBC
+$INC{'Win32/ODBC.pm'} = $INC{'Win32/DBIODBC.pm'} || 1;
+
+#my $db = new Win32::ODBC("DSN=$self->{'DSN'};UID=$self->{'UID'};PWD=$self->{'PWD'};");
+
+#EMU --- my $db = new Win32::ODBC("DSN=$DSN;UID=$login;PWD=$password;");
+sub new
+{
+ shift;
+ my $connect_line= shift;
+
+# [R] self-hack to allow empty UID and PWD
+ my $temp_connect_line;
+ $connect_line=~/DSN=\w+/;
+ $temp_connect_line="$&;";
+ if ($connect_line=~/UID=\w?/)
+ {$temp_connect_line.="$&;";}
+ else {$temp_connect_line.="UID=;";};
+ if ($connect_line=~/PWD=\w?/)
+ {$temp_connect_line.="$&;";}
+ else {$temp_connect_line.="PWD=;";};
+ $connect_line=$temp_connect_line;
+# -[R]-
+
+ my $self= {};
+
+
+ $_=$connect_line;
+ /^(DSN=)(.*)(;UID=)(.*)(;PWD=)(.*)(;)$/;
+
+ #---- DBI CONNECTION VARIABLES
+
+ $self->{ODBC_DSN}=$2;
+ $self->{ODBC_UID}=$4;
+ $self->{ODBC_PWD}=$6;
+
+
+ #---- DBI CONNECTION VARIABLES
+ $self->{DBI_DBNAME}=$self->{ODBC_DSN};
+ $self->{DBI_USER}=$self->{ODBC_UID};
+ $self->{DBI_PASSWORD}=$self->{ODBC_PWD};
+ $self->{DBI_DBD}='ODBC';
+
+ #---- DBI CONNECTION
+ $self->{'DBI_DBH'}=DBI->connect($self->{'DBI_DBNAME'},
+ $self->{'DBI_USER'},$self->{'DBI_PASSWORD'},$self->{'DBI_DBD'});
+
+ warn "Error($DBI::err) : $DBI::errstr\n" if ! $self->{'DBI_DBH'};
+
+
+ #---- RETURN
+
+ bless $self;
+}
+
+
+#EMU --- $db->Sql('SELECT * FROM DUAL');
+sub Sql
+{
+ my $self= shift;
+ my $SQL_statment=shift;
+
+ # print " SQL : $SQL_statment \n";
+
+ $self->{'DBI_SQL_STATMENT'}=$SQL_statment;
+
+ my $dbh=$self->{'DBI_DBH'};
+
+ # print " DBH : $dbh \n";
+
+ my $sth=$dbh->prepare("$SQL_statment");
+
+ # print " STH : $sth \n";
+
+ $self->{'DBI_STH'}=$sth;
+
+ if ($sth)
+ {
+ $sth->execute();
+ }
+
+ #--- GET ERROR MESSAGES
+ $self->{DBI_ERR}=$DBI::err;
+ $self->{DBI_ERRSTR}=$DBI::errstr;
+
+ if ($sth)
+ {
+ #--- GET COLUMNS NAMES
+ $self->{'DBI_NAME'} = $sth->{NAME};
+ }
+
+# [R] provide compatibility with Win32::ODBC's way of identifying erraneous SQL statements
+ return ($self->{'DBI_ERR'})?1:undef;
+# -[R]-
+}
+
+
+#EMU --- $db->FetchRow())
+sub FetchRow
+{
+ my $self= shift;
+
+ my $sth=$self->{'DBI_STH'};
+ if ($sth)
+ {
+ my @row=$sth->fetchrow_array;
+ $self->{'DBI_ROW'}=\@row;
+
+ if (scalar(@row)>0)
+ {
+ #-- the row of result is not nul
+ #-- return somthing nothing will be return else
+ return 1;
+ }
+ }
+ return undef;
+}
+
+# [R] provide compatibility with Win32::ODBC's Data() method.
+sub Data
+{
+ my $self=shift;
+ my @array=@{$self->{'DBI_ROW'}};
+ foreach my $element (@array)
+ {
+ # remove padding of spaces by DBI
+ $element=~s/(\s*$)//;
+ };
+ return (wantarray())?@array:join('', @array);
+};
+# -[R]-
+
+#EMU --- %record = $db->DataHash;
+sub DataHash
+{
+ my $self= shift;
+
+ my $p_name=$self->{'DBI_NAME'};
+ my $p_row=$self->{'DBI_ROW'};
+
+ my @name=@$p_name;
+ my @row=@$p_row;
+
+ my %DataHash;
+#print @name; print "\n"; print @row;
+# [R] new code that seems to work consistent with Win32::ODBC
+ while (@name)
+ {
+ my $name=shift(@name);
+ my $value=shift(@row);
+
+ # remove padding of spaces by DBI
+ $name=~s/(\s*$)//;
+ $value=~s/(\s*$)//;
+
+ $DataHash{$name}=$value;
+ };
+# -[R]-
+
+# [R] old code that didn't appear to work
+# foreach my $name (@name)
+# {
+# $name=~s/(^\s*)|(\s*$)//;
+# my @arr=@$name;
+# foreach (@arr)
+# {
+# print "lot $name name col $_ or ROW= 0 $row[0] 1 $row[1] 2 $row[2] \n ";
+# $DataHash{$name}=shift(@row);
+# }
+# }
+# -[R]-
+
+ #--- Return Hash
+ return %DataHash;
+}
+
+
+#EMU --- $db->Error()
+sub Error
+{
+ my $self= shift;
+
+ if ($self->{'DBI_ERR'} ne '')
+ {
+ #--- Return error message
+ $self->{'DBI_ERRSTR'};
+ }
+
+ #-- else good no error message
+
+}
+
+# [R] provide compatibility with Win32::ODBC's Close() method.
+sub Close
+{
+ my $self=shift;
+
+ my $dbh=$self->{'DBI_DBH'};
+ $dbh->disconnect;
+}
+# -[R]-
+
+1;
+
+__END__
+
+# [R] to -[R]- indicate sections edited by me, Roy Lee
+
+=head1 NAME
+
+Win32::DBIODBC - Win32::ODBC emulation layer for the DBI
+
+=head1 SYNOPSIS
+
+ use Win32::DBIODBC; # instead of use Win32::ODBC
+
+=head1 DESCRIPTION
+
+This is a I<very> basic I<very> alpha quality Win32::ODBC emulation
+for the DBI. To use it just replace
+
+ use Win32::ODBC;
+
+in your scripts with
+
+ use Win32::DBIODBC;
+
+or, while experimenting, you can pre-load this module without changing your
+scripts by doing
+
+ perl -MWin32::DBIODBC your_script_name
+
+=head1 TO DO
+
+Error handling is virtually non-existent.
+
+=head1 AUTHOR
+
+Tom Horen <tho@melexis.com>
+
+=cut