summaryrefslogtreecommitdiff
path: root/ndb/home
diff options
context:
space:
mode:
Diffstat (limited to 'ndb/home')
-rwxr-xr-xndb/home/bin/Linuxmkisofsbin503146 -> 0 bytes
-rwxr-xr-xndb/home/bin/Solarismkisofsbin634084 -> 0 bytes
-rwxr-xr-xndb/home/bin/cvs2cl.pl1865
-rwxr-xr-xndb/home/bin/fix-cvs-root17
-rwxr-xr-xndb/home/bin/import-from-bk.sh158
-rwxr-xr-xndb/home/bin/ndb_deploy27
-rwxr-xr-xndb/home/bin/ndbdoxy.pl184
-rwxr-xr-xndb/home/bin/ngcalc78
-rw-r--r--ndb/home/bin/parseConfigFile.awk98
-rwxr-xr-xndb/home/bin/setup-test.sh272
-rw-r--r--ndb/home/bin/signallog2html.lib/signallog2list.awk102
-rw-r--r--ndb/home/bin/signallog2html.lib/uniq_blocks.awk29
-rwxr-xr-xndb/home/bin/signallog2html.sh349
-rwxr-xr-xndb/home/bin/stripcr90
-rw-r--r--ndb/home/lib/funcs.sh294
15 files changed, 0 insertions, 3563 deletions
diff --git a/ndb/home/bin/Linuxmkisofs b/ndb/home/bin/Linuxmkisofs
deleted file mode 100755
index a531f4cca7b..00000000000
--- a/ndb/home/bin/Linuxmkisofs
+++ /dev/null
Binary files differ
diff --git a/ndb/home/bin/Solarismkisofs b/ndb/home/bin/Solarismkisofs
deleted file mode 100755
index b239eaed6ad..00000000000
--- a/ndb/home/bin/Solarismkisofs
+++ /dev/null
Binary files differ
diff --git a/ndb/home/bin/cvs2cl.pl b/ndb/home/bin/cvs2cl.pl
deleted file mode 100755
index 9e6da5acf5b..00000000000
--- a/ndb/home/bin/cvs2cl.pl
+++ /dev/null
@@ -1,1865 +0,0 @@
-#!/bin/sh
-exec perl -w -x $0 ${1+"$@"} # -*- mode: perl; perl-indent-level: 2; -*-
-#!perl -w
-
-##############################################################
-### ###
-### cvs2cl.pl: produce ChangeLog(s) from `cvs log` output. ###
-### ###
-##############################################################
-
-## $Revision: 2.38 $
-## $Date: 2001/02/12 19:54:35 $
-## $Author: kfogel $
-##
-## (C) 1999 Karl Fogel <kfogel@red-bean.com>, under the GNU GPL.
-##
-## (Extensively hacked on by Melissa O'Neill <oneill@cs.sfu.ca>.)
-##
-## cvs2cl.pl is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2, or (at your option)
-## any later version.
-##
-## cvs2cl.pl is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You may have received a copy of the GNU General Public License
-## along with cvs2cl.pl; see the file COPYING. If not, write to the
-## Free Software Foundation, Inc., 59 Temple Place - Suite 330,
-## Boston, MA 02111-1307, USA.
-
-
-
-use strict;
-use Text::Wrap;
-use Time::Local;
-use File::Basename;
-
-
-# The Plan:
-#
-# Read in the logs for multiple files, spit out a nice ChangeLog that
-# mirrors the information entered during `cvs commit'.
-#
-# The problem presents some challenges. In an ideal world, we could
-# detect files with the same author, log message, and checkin time --
-# each <filelist, author, time, logmessage> would be a changelog entry.
-# We'd sort them; and spit them out. Unfortunately, CVS is *not atomic*
-# so checkins can span a range of times. Also, the directory structure
-# could be hierarchical.
-#
-# Another question is whether we really want to have the ChangeLog
-# exactly reflect commits. An author could issue two related commits,
-# with different log entries, reflecting a single logical change to the
-# source. GNU style ChangeLogs group these under a single author/date.
-# We try to do the same.
-#
-# So, we parse the output of `cvs log', storing log messages in a
-# multilevel hash that stores the mapping:
-# directory => author => time => message => filelist
-# As we go, we notice "nearby" commit times and store them together
-# (i.e., under the same timestamp), so they appear in the same log
-# entry.
-#
-# When we've read all the logs, we twist this mapping into
-# a time => author => message => filelist mapping for each directory.
-#
-# If we're not using the `--distributed' flag, the directory is always
-# considered to be `./', even as descend into subdirectories.
-
-
-############### Globals ################
-
-
-# What we run to generate it:
-my $Log_Source_Command = "cvs log";
-
-# In case we have to print it out:
-my $VERSION = '$Revision: 2.38 $';
-$VERSION =~ s/\S+\s+(\S+)\s+\S+/$1/;
-
-## Vars set by options:
-
-# Print debugging messages?
-my $Debug = 0;
-
-# Just show version and exit?
-my $Print_Version = 0;
-
-# Just print usage message and exit?
-my $Print_Usage = 0;
-
-# Single top-level ChangeLog, or one per subdirectory?
-my $Distributed = 0;
-
-# What file should we generate (defaults to "ChangeLog")?
-my $Log_File_Name = "ChangeLog";
-
-# Grab most recent entry date from existing ChangeLog file, just add
-# to that ChangeLog.
-my $Cumulative = 0;
-
-# Expand usernames to email addresses based on a map file?
-my $User_Map_File = "";
-
-# Output to a file or to stdout?
-my $Output_To_Stdout = 0;
-
-# Eliminate empty log messages?
-my $Prune_Empty_Msgs = 0;
-
-# Don't call Text::Wrap on the body of the message
-my $No_Wrap = 0;
-
-# Separates header from log message. Code assumes it is either " " or
-# "\n\n", so if there's ever an option to set it to something else,
-# make sure to go through all conditionals that use this var.
-my $After_Header = " ";
-
-# Format more for programs than for humans.
-my $XML_Output = 0;
-
-# Do some special tweaks for log data that was written in FSF
-# ChangeLog style.
-my $FSF_Style = 0;
-
-# Show times in UTC instead of local time
-my $UTC_Times = 0;
-
-# Show day of week in output?
-my $Show_Day_Of_Week = 0;
-
-# Show revision numbers in output?
-my $Show_Revisions = 0;
-
-# Show tags (symbolic names) in output?
-my $Show_Tags = 0;
-
-# Show branches by symbolic name in output?
-my $Show_Branches = 0;
-
-# Show only revisions on these branches or their ancestors.
-my @Follow_Branches;
-
-# Don't bother with files matching this regexp.
-my @Ignore_Files;
-
-# How exactly we match entries. We definitely want "o",
-# and user might add "i" by using --case-insensitive option.
-my $Case_Insensitive = 0;
-
-# Maybe only show log messages matching a certain regular expression.
-my $Regexp_Gate = "";
-
-# Pass this global option string along to cvs, to the left of `log':
-my $Global_Opts = "";
-
-# Pass this option string along to the cvs log subcommand:
-my $Command_Opts = "";
-
-# Read log output from stdin instead of invoking cvs log?
-my $Input_From_Stdin = 0;
-
-# Don't show filenames in output.
-my $Hide_Filenames = 0;
-
-# Max checkin duration. CVS checkin is not atomic, so we may have checkin
-# times that span a range of time. We assume that checkins will last no
-# longer than $Max_Checkin_Duration seconds, and that similarly, no
-# checkins will happen from the same users with the same message less
-# than $Max_Checkin_Duration seconds apart.
-my $Max_Checkin_Duration = 180;
-
-# What to put at the front of [each] ChangeLog.
-my $ChangeLog_Header = "";
-
-## end vars set by options.
-
-# In 'cvs log' output, one long unbroken line of equal signs separates
-# files:
-my $file_separator = "======================================="
- . "======================================";
-
-# In 'cvs log' output, a shorter line of dashes separates log messages
-# within a file:
-my $logmsg_separator = "----------------------------";
-
-
-############### End globals ############
-
-
-
-
-&parse_options ();
-&derive_change_log ();
-
-
-
-### Everything below is subroutine definitions. ###
-
-# If accumulating, grab the boundary date from pre-existing ChangeLog.
-sub maybe_grab_accumulation_date ()
-{
- if (! $Cumulative) {
- return "";
- }
-
- # else
-
- open (LOG, "$Log_File_Name")
- or die ("trouble opening $Log_File_Name for reading ($!)");
-
- my $boundary_date;
- while (<LOG>)
- {
- if (/^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/)
- {
- $boundary_date = "$1";
- last;
- }
- }
-
- close (LOG);
- return $boundary_date;
-}
-
-
-# Fills up a ChangeLog structure in the current directory.
-sub derive_change_log ()
-{
- # See "The Plan" above for a full explanation.
-
- my %grand_poobah;
-
- my $file_full_path;
- my $time;
- my $revision;
- my $author;
- my $msg_txt;
- my $detected_file_separator;
-
- # Might be adding to an existing ChangeLog
- my $accumulation_date = &maybe_grab_accumulation_date ();
- if ($accumulation_date) {
- $Log_Source_Command .= " -d\'>${accumulation_date}\'";
- }
-
- # We might be expanding usernames
- my %usermap;
-
- # In general, it's probably not very maintainable to use state
- # variables like this to tell the loop what it's doing at any given
- # moment, but this is only the first one, and if we never have more
- # than a few of these, it's okay.
- my $collecting_symbolic_names = 0;
- my %symbolic_names; # Where tag names get stored.
- my %branch_names; # We'll grab branch names while we're at it.
- my %branch_numbers; # Save some revisions for @Follow_Branches
- my @branch_roots; # For showing which files are branch ancestors.
-
- # Bleargh. Compensate for a deficiency of custom wrapping.
- if (($After_Header ne " ") and $FSF_Style)
- {
- $After_Header .= "\t";
- }
-
- if (! $Input_From_Stdin) {
- open (LOG_SOURCE, "$Log_Source_Command |")
- or die "unable to run \"${Log_Source_Command}\"";
- }
- else {
- open (LOG_SOURCE, "-") or die "unable to open stdin for reading";
- }
-
- %usermap = &maybe_read_user_map_file ();
-
- while (<LOG_SOURCE>)
- {
- # If on a new file and don't see filename, skip until we find it, and
- # when we find it, grab it.
- if ((! (defined $file_full_path)) and /^Working file: (.*)/)
- {
- $file_full_path = $1;
- if (@Ignore_Files)
- {
- my $base;
- ($base, undef, undef) = fileparse ($file_full_path);
- # Ouch, I wish trailing operators in regexps could be
- # evaluated on the fly!
- if ($Case_Insensitive) {
- if (grep ($file_full_path =~ m|$_|i, @Ignore_Files)) {
- undef $file_full_path;
- }
- }
- elsif (grep ($file_full_path =~ m|$_|, @Ignore_Files)) {
- undef $file_full_path;
- }
- }
- next;
- }
-
- # Just spin wheels if no file defined yet.
- next if (! $file_full_path);
-
- # Collect tag names in case we're asked to print them in the output.
- if (/^symbolic names:$/) {
- $collecting_symbolic_names = 1;
- next; # There's no more info on this line, so skip to next
- }
- if ($collecting_symbolic_names)
- {
- # All tag names are listed with whitespace in front in cvs log
- # output; so if see non-whitespace, then we're done collecting.
- if (/^\S/) {
- $collecting_symbolic_names = 0;
- }
- else # we're looking at a tag name, so parse & store it
- {
- # According to the Cederqvist manual, in node "Tags", tag
- # names must start with an uppercase or lowercase letter and
- # can contain uppercase and lowercase letters, digits, `-',
- # and `_'. However, it's not our place to enforce that, so
- # we'll allow anything CVS hands us to be a tag:
- /^\s+([^:]+): ([\d.]+)$/;
- my $tag_name = $1;
- my $tag_rev = $2;
-
- # A branch number either has an odd number of digit sections
- # (and hence an even number of dots), or has ".0." as the
- # second-to-last digit section. Test for these conditions.
- my $real_branch_rev = "";
- if (($tag_rev =~ /^(\d+\.\d+\.)+\d+$/) # Even number of dots...
- and (! ($tag_rev =~ /^(1\.)+1$/))) # ...but not "1.[1.]1"
- {
- $real_branch_rev = $tag_rev;
- }
- elsif ($tag_rev =~ /(\d+\.(\d+\.)+)0.(\d+)/) # Has ".0."
- {
- $real_branch_rev = $1 . $3;
- }
- # If we got a branch, record its number.
- if ($real_branch_rev)
- {
- $branch_names{$real_branch_rev} = $tag_name;
- if (@Follow_Branches) {
- if (grep ($_ eq $tag_name, @Follow_Branches)) {
- $branch_numbers{$tag_name} = $real_branch_rev;
- }
- }
- }
- else {
- # Else it's just a regular (non-branch) tag.
- push (@{$symbolic_names{$tag_rev}}, $tag_name);
- }
- }
- }
- # End of code for collecting tag names.
-
- # If have file name, but not revision, and see revision, then grab
- # it. (We collect unconditionally, even though we may or may not
- # ever use it.)
- if ((! (defined $revision)) and (/^revision (\d+\.[\d.]+)/))
- {
- $revision = $1;
-
- if (@Follow_Branches)
- {
- foreach my $branch (@Follow_Branches)
- {
- # Special case for following trunk revisions
- if (($branch =~ /^trunk$/i) and ($revision =~ /^[0-9]+\.[0-9]+$/))
- {
- goto dengo;
- }
-
- my $branch_number = $branch_numbers{$branch};
- if ($branch_number)
- {
- # Are we on one of the follow branches or an ancestor of
- # same?
- #
- # If this revision is a prefix of the branch number, or
- # possibly is less in the minormost number, OR if this
- # branch number is a prefix of the revision, then yes.
- # Otherwise, no.
- #
- # So below, we determine if any of those conditions are
- # met.
-
- # Trivial case: is this revision on the branch?
- # (Compare this way to avoid regexps that screw up Emacs
- # indentation, argh.)
- if ((substr ($revision, 0, ((length ($branch_number)) + 1)))
- eq ($branch_number . "."))
- {
- goto dengo;
- }
- # Non-trivial case: check if rev is ancestral to branch
- elsif ((length ($branch_number)) > (length ($revision)))
- {
- $revision =~ /^((?:\d+\.)+)(\d+)$/;
- my $r_left = $1; # still has the trailing "."
- my $r_end = $2;
-
- $branch_number =~ /^((?:\d+\.)+)(\d+)\.\d+$/;
- my $b_left = $1; # still has trailing "."
- my $b_mid = $2; # has no trailing "."
-
- if (($r_left eq $b_left)
- && ($r_end <= $b_mid))
- {
- goto dengo;
- }
- }
- }
- }
- }
- else # (! @Follow_Branches)
- {
- next;
- }
-
- # Else we are following branches, but this revision isn't on the
- # path. So skip it.
- undef $revision;
- dengo:
- next;
- }
-
- # If we don't have a revision right now, we couldn't possibly
- # be looking at anything useful.
- if (! (defined ($revision))) {
- $detected_file_separator = /^$file_separator$/o;
- if ($detected_file_separator) {
- # No revisions for this file; can happen, e.g. "cvs log -d DATE"
- goto CLEAR;
- }
- else {
- next;
- }
- }
-
- # If have file name but not date and author, and see date or
- # author, then grab them:
- unless (defined $time)
- {
- if (/^date: .*/)
- {
- ($time, $author) = &parse_date_and_author ($_);
- if (defined ($usermap{$author}) and $usermap{$author}) {
- $author = $usermap{$author};
- }
- }
- else {
- $detected_file_separator = /^$file_separator$/o;
- if ($detected_file_separator) {
- # No revisions for this file; can happen, e.g. "cvs log -d DATE"
- goto CLEAR;
- }
- }
- # If the date/time/author hasn't been found yet, we couldn't
- # possibly care about anything we see. So skip:
- next;
- }
-
- # A "branches: ..." line here indicates that one or more branches
- # are rooted at this revision. If we're showing branches, then we
- # want to show that fact as well, so we collect all the branches
- # that this is the latest ancestor of and store them in
- # @branch_roots. Just for reference, the format of the line we're
- # seeing at this point is:
- #
- # branches: 1.5.2; 1.5.4; ...;
- #
- # Okay, here goes:
-
- if (/^branches:\s+(.*);$/)
- {
- if ($Show_Branches)
- {
- my $lst = $1;
- $lst =~ s/(1\.)+1;|(1\.)+1$//; # ignore the trivial branch 1.1.1
- if ($lst) {
- @branch_roots = split (/;\s+/, $lst);
- }
- else {
- undef @branch_roots;
- }
- next;
- }
- else
- {
- # Ugh. This really bothers me. Suppose we see a log entry
- # like this:
- #
- # ----------------------------
- # revision 1.1
- # date: 1999/10/17 03:07:38; author: jrandom; state: Exp;
- # branches: 1.1.2;
- # Intended first line of log message begins here.
- # ----------------------------
- #
- # The question is, how we can tell the difference between that
- # log message and a *two*-line log message whose first line is
- #
- # "branches: 1.1.2;"
- #
- # See the problem? The output of "cvs log" is inherently
- # ambiguous.
- #
- # For now, we punt: we liberally assume that people don't
- # write log messages like that, and just toss a "branches:"
- # line if we see it but are not showing branches. I hope no
- # one ever loses real log data because of this.
- next;
- }
- }
-
- # If have file name, time, and author, then we're just grabbing
- # log message texts:
- $detected_file_separator = /^$file_separator$/o;
- if ($detected_file_separator && ! (defined $revision)) {
- # No revisions for this file; can happen, e.g. "cvs log -d DATE"
- goto CLEAR;
- }
- unless ($detected_file_separator || /^$logmsg_separator$/o)
- {
- $msg_txt .= $_; # Normally, just accumulate the message...
- next;
- }
- # ... until a msg separator is encountered:
- # Ensure the message contains something:
- if ((! $msg_txt)
- || ($msg_txt =~ /^\s*\.\s*$|^\s*$/)
- || ($msg_txt =~ /\*\*\* empty log message \*\*\*/))
- {
- if ($Prune_Empty_Msgs) {
- goto CLEAR;
- }
- # else
- $msg_txt = "[no log message]\n";
- }
-
- ### Store it all in the Grand Poobah:
- {
- my $dir_key; # key into %grand_poobah
- my %qunk; # complicated little jobbie, see below
-
- # Each revision of a file has a little data structure (a `qunk')
- # associated with it. That data structure holds not only the
- # file's name, but any additional information about the file
- # that might be needed in the output, such as the revision
- # number, tags, branches, etc. The reason to have these things
- # arranged in a data structure, instead of just appending them
- # textually to the file's name, is that we may want to do a
- # little rearranging later as we write the output. For example,
- # all the files on a given tag/branch will go together, followed
- # by the tag in parentheses (so trunk or otherwise non-tagged
- # files would go at the end of the file list for a given log
- # message). This rearrangement is a lot easier to do if we
- # don't have to reparse the text.
- #
- # A qunk looks like this:
- #
- # {
- # filename => "hello.c",
- # revision => "1.4.3.2",
- # time => a timegm() return value (moment of commit)
- # tags => [ "tag1", "tag2", ... ],
- # branch => "branchname" # There should be only one, right?
- # branchroots => [ "branchtag1", "branchtag2", ... ]
- # }
-
- if ($Distributed) {
- # Just the basename, don't include the path.
- ($qunk{'filename'}, $dir_key, undef) = fileparse ($file_full_path);
- }
- else {
- $dir_key = "./";
- $qunk{'filename'} = $file_full_path;
- }
-
- # This may someday be used in a more sophisticated calculation
- # of what other files are involved in this commit. For now, we
- # don't use it, because the common-commit-detection algorithm is
- # hypothesized to be "good enough" as it stands.
- $qunk{'time'} = $time;
-
- # We might be including revision numbers and/or tags and/or
- # branch names in the output. Most of the code from here to
- # loop-end deals with organizing these in qunk.
-
- $qunk{'revision'} = $revision;
-
- # Grab the branch, even though we may or may not need it:
- $qunk{'revision'} =~ /((?:\d+\.)+)\d+/;
- my $branch_prefix = $1;
- $branch_prefix =~ s/\.$//; # strip off final dot
- if ($branch_names{$branch_prefix}) {
- $qunk{'branch'} = $branch_names{$branch_prefix};
- }
-
- # If there's anything in the @branch_roots array, then this
- # revision is the root of at least one branch. We'll display
- # them as branch names instead of revision numbers, the
- # substitution for which is done directly in the array:
- if (@branch_roots) {
- my @roots = map { $branch_names{$_} } @branch_roots;
- $qunk{'branchroots'} = \@roots;
- }
-
- # Save tags too.
- if (defined ($symbolic_names{$revision})) {
- $qunk{'tags'} = $symbolic_names{$revision};
- delete $symbolic_names{$revision};
- }
-
- # Add this file to the list
- # (We use many spoonfuls of autovivication magic. Hashes and arrays
- # will spring into existence if they aren't there already.)
-
- &debug ("(pushing log msg for ${dir_key}$qunk{'filename'})\n");
-
- # Store with the files in this commit. Later we'll loop through
- # again, making sure that revisions with the same log message
- # and nearby commit times are grouped together as one commit.
- push (@{$grand_poobah{$dir_key}{$author}{$time}{$msg_txt}}, \%qunk);
- }
-
- CLEAR:
- # Make way for the next message
- undef $msg_txt;
- undef $time;
- undef $revision;
- undef $author;
- undef @branch_roots;
-
- # Maybe even make way for the next file:
- if ($detected_file_separator) {
- undef $file_full_path;
- undef %branch_names;
- undef %branch_numbers;
- undef %symbolic_names;
- }
- }
-
- close (LOG_SOURCE);
-
- ### Process each ChangeLog
-
- while (my ($dir,$authorhash) = each %grand_poobah)
- {
- &debug ("DOING DIR: $dir\n");
-
- # Here we twist our hash around, from being
- # author => time => message => filelist
- # in %$authorhash to
- # time => author => message => filelist
- # in %changelog.
- #
- # This is also where we merge entries. The algorithm proceeds
- # through the timeline of the changelog with a sliding window of
- # $Max_Checkin_Duration seconds; within that window, entries that
- # have the same log message are merged.
- #
- # (To save space, we zap %$authorhash after we've copied
- # everything out of it.)
-
- my %changelog;
- while (my ($author,$timehash) = each %$authorhash)
- {
- my $lasttime;
- my %stamptime;
- foreach my $time (sort {$main::a <=> $main::b} (keys %$timehash))
- {
- my $msghash = $timehash->{$time};
- while (my ($msg,$qunklist) = each %$msghash)
- {
- my $stamptime = $stamptime{$msg};
- if ((defined $stamptime)
- and (($time - $stamptime) < $Max_Checkin_Duration)
- and (defined $changelog{$stamptime}{$author}{$msg}))
- {
- push(@{$changelog{$stamptime}{$author}{$msg}}, @$qunklist);
- }
- else {
- $changelog{$time}{$author}{$msg} = $qunklist;
- $stamptime{$msg} = $time;
- }
- }
- }
- }
- undef (%$authorhash);
-
- ### Now we can write out the ChangeLog!
-
- my ($logfile_here, $logfile_bak, $tmpfile);
-
- if (! $Output_To_Stdout) {
- $logfile_here = $dir . $Log_File_Name;
- $logfile_here =~ s/^\.\/\//\//; # fix any leading ".//" problem
- $tmpfile = "${logfile_here}.cvs2cl$$.tmp";
- $logfile_bak = "${logfile_here}.bak";
-
- open (LOG_OUT, ">$tmpfile") or die "Unable to open \"$tmpfile\"";
- }
- else {
- open (LOG_OUT, ">-") or die "Unable to open stdout for writing";
- }
-
- print LOG_OUT $ChangeLog_Header;
-
- if ($XML_Output) {
- print LOG_OUT "<?xml version=\"1.0\"?>\n\n"
- . "<changelog xmlns=\"http://www.red-bean.com/xmlns/cvs2cl/\">\n\n";
- }
-
- foreach my $time (sort {$main::b <=> $main::a} (keys %changelog))
- {
- my $authorhash = $changelog{$time};
- while (my ($author,$mesghash) = each %$authorhash)
- {
- # If XML, escape in outer loop to avoid compound quoting:
- if ($XML_Output) {
- $author = &xml_escape ($author);
- }
-
- while (my ($msg,$qunklist) = each %$mesghash)
- {
- my $files = &pretty_file_list ($qunklist);
- my $header_line; # date and author
- my $body; # see below
- my $wholething; # $header_line + $body
-
- # Set up the date/author line.
- # kff todo: do some more XML munging here, on the header
- # part of the entry:
- my ($ignore,$min,$hour,$mday,$mon,$year,$wday)
- = $UTC_Times ? gmtime($time) : localtime($time);
-
- # XML output includes everything else, we might as well make
- # it always include Day Of Week too, for consistency.
- if ($Show_Day_Of_Week or $XML_Output) {
- $wday = ("Sunday", "Monday", "Tuesday", "Wednesday",
- "Thursday", "Friday", "Saturday")[$wday];
- $wday = ($XML_Output) ? "<weekday>${wday}</weekday>\n" : " $wday";
- }
- else {
- $wday = "";
- }
-
- if ($XML_Output) {
- $header_line =
- sprintf ("<date>%4u-%02u-%02u</date>\n"
- . "${wday}"
- . "<time>%02u:%02u</time>\n"
- . "<author>%s</author>\n",
- $year+1900, $mon+1, $mday, $hour, $min, $author);
- }
- else {
- $header_line =
- sprintf ("%4u-%02u-%02u${wday} %02u:%02u %s\n\n",
- $year+1900, $mon+1, $mday, $hour, $min, $author);
- }
-
- # Reshape the body according to user preferences.
- if ($XML_Output)
- {
- $msg = &preprocess_msg_text ($msg);
- $body = $files . $msg;
- }
- elsif ($No_Wrap)
- {
- $msg = &preprocess_msg_text ($msg);
- $files = wrap ("\t", " ", "$files");
- $msg =~ s/\n(.*)/\n\t$1/g;
- unless ($After_Header eq " ") {
- $msg =~ s/^(.*)/\t$1/g;
- }
- $body = $files . $After_Header . $msg;
- }
- else # do wrapping, either FSF-style or regular
- {
- if ($FSF_Style)
- {
- $files = wrap ("\t", " ", "$files");
-
- my $files_last_line_len = 0;
- if ($After_Header eq " ")
- {
- $files_last_line_len = &last_line_len ($files);
- $files_last_line_len += 1; # for $After_Header
- }
-
- $msg = &wrap_log_entry
- ($msg, "\t", 69 - $files_last_line_len, 69);
- $body = $files . $After_Header . $msg;
- }
- else # not FSF-style
- {
- $msg = &preprocess_msg_text ($msg);
- $body = $files . $After_Header . $msg;
- $body = wrap ("\t", " ", "$body");
- }
- }
-
- $wholething = $header_line . $body;
-
- if ($XML_Output) {
- $wholething = "<entry>\n${wholething}</entry>\n";
- }
-
- # One last check: make sure it passes the regexp test, if the
- # user asked for that. We have to do it here, so that the
- # test can match against information in the header as well
- # as in the text of the log message.
-
- # How annoying to duplicate so much code just because I
- # can't figure out a way to evaluate scalars on the trailing
- # operator portion of a regular expression. Grrr.
- if ($Case_Insensitive) {
- unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/oi)) {
- print LOG_OUT "${wholething}\n";
- }
- }
- else {
- unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/o)) {
- print LOG_OUT "${wholething}\n";
- }
- }
- }
- }
- }
-
- if ($XML_Output) {
- print LOG_OUT "</changelog>\n";
- }
-
- close (LOG_OUT);
-
- if (! $Output_To_Stdout)
- {
- # If accumulating, append old data to new before renaming. But
- # don't append the most recent entry, since it's already in the
- # new log due to CVS's idiosyncratic interpretation of "log -d".
- if ($Cumulative && -f $logfile_here)
- {
- open (NEW_LOG, ">>$tmpfile")
- or die "trouble appending to $tmpfile ($!)";
-
- open (OLD_LOG, "<$logfile_here")
- or die "trouble reading from $logfile_here ($!)";
-
- my $started_first_entry = 0;
- my $passed_first_entry = 0;
- while (<OLD_LOG>)
- {
- if (! $passed_first_entry)
- {
- if ((! $started_first_entry)
- && /^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/) {
- $started_first_entry = 1;
- }
- elsif (/^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/) {
- $passed_first_entry = 1;
- print NEW_LOG $_;
- }
- }
- else {
- print NEW_LOG $_;
- }
- }
-
- close (NEW_LOG);
- close (OLD_LOG);
- }
-
- if (-f $logfile_here) {
- rename ($logfile_here, $logfile_bak);
- }
- rename ($tmpfile, $logfile_here);
- }
- }
-}
-
-
-sub parse_date_and_author ()
-{
- # Parses the date/time and author out of a line like:
- #
- # date: 1999/02/19 23:29:05; author: apharris; state: Exp;
-
- my $line = shift;
-
- my ($year, $mon, $mday, $hours, $min, $secs, $author) = $line =~
- m#(\d+)/(\d+)/(\d+)\s+(\d+):(\d+):(\d+);\s+author:\s+([^;]+);#
- or die "Couldn't parse date ``$line''";
- die "Bad date or Y2K issues" unless ($year > 1969 and $year < 2258);
- # Kinda arbitrary, but useful as a sanity check
- my $time = timegm($secs,$min,$hours,$mday,$mon-1,$year-1900);
-
- return ($time, $author);
-}
-
-
-# Here we take a bunch of qunks and convert them into printed
-# summary that will include all the information the user asked for.
-sub pretty_file_list ()
-{
- if ($Hide_Filenames and (! $XML_Output)) {
- return "";
- }
-
- my $qunksref = shift;
- my @qunkrefs = @$qunksref;
- my @filenames;
- my $beauty = ""; # The accumulating header string for this entry.
- my %non_unanimous_tags; # Tags found in a proper subset of qunks
- my %unanimous_tags; # Tags found in all qunks
- my %all_branches; # Branches found in any qunk
- my $common_dir = undef; # Dir prefix common to all files ("" if none)
- my $fbegun = 0; # Did we begin printing filenames yet?
-
- # First, loop over the qunks gathering all the tag/branch names.
- # We'll put them all in non_unanimous_tags, and take out the
- # unanimous ones later.
- foreach my $qunkref (@qunkrefs)
- {
- # Keep track of whether all the files in this commit were in the
- # same directory, and memorize it if so. We can make the output a
- # little more compact by mentioning the directory only once.
- if ((scalar (@qunkrefs)) > 1)
- {
- if (! (defined ($common_dir)))
- {
- my ($base, $dir);
- ($base, $dir, undef) = fileparse ($$qunkref{'filename'});
-
- if ((! (defined ($dir))) # this first case is sheer paranoia
- or ($dir eq "")
- or ($dir eq "./")
- or ($dir eq ".\\"))
- {
- $common_dir = "";
- }
- else
- {
- $common_dir = $dir;
- }
- }
- elsif ($common_dir ne "")
- {
- # Already have a common dir prefix, so how much of it can we preserve?
- $common_dir = &common_path_prefix ($$qunkref{'filename'}, $common_dir);
- }
- }
- else # only one file in this entry anyway, so common dir not an issue
- {
- $common_dir = "";
- }
-
- if (defined ($$qunkref{'branch'})) {
- $all_branches{$$qunkref{'branch'}} = 1;
- }
- if (defined ($$qunkref{'tags'})) {
- foreach my $tag (@{$$qunkref{'tags'}}) {
- $non_unanimous_tags{$tag} = 1;
- }
- }
- }
-
- # Any tag held by all qunks will be printed specially... but only if
- # there are multiple qunks in the first place!
- if ((scalar (@qunkrefs)) > 1) {
- foreach my $tag (keys (%non_unanimous_tags)) {
- my $everyone_has_this_tag = 1;
- foreach my $qunkref (@qunkrefs) {
- if ((! (defined ($$qunkref{'tags'})))
- or (! (grep ($_ eq $tag, @{$$qunkref{'tags'}})))) {
- $everyone_has_this_tag = 0;
- }
- }
- if ($everyone_has_this_tag) {
- $unanimous_tags{$tag} = 1;
- delete $non_unanimous_tags{$tag};
- }
- }
- }
-
- if ($XML_Output)
- {
- # If outputting XML, then our task is pretty simple, because we
- # don't have to detect common dir, common tags, branch prefixing,
- # etc. We just output exactly what we have, and don't worry about
- # redundancy or readability.
-
- foreach my $qunkref (@qunkrefs)
- {
- my $filename = $$qunkref{'filename'};
- my $revision = $$qunkref{'revision'};
- my $tags = $$qunkref{'tags'};
- my $branch = $$qunkref{'branch'};
- my $branchroots = $$qunkref{'branchroots'};
-
- $filename = &xml_escape ($filename); # probably paranoia
- $revision = &xml_escape ($revision); # definitely paranoia
-
- $beauty .= "<file>\n";
- $beauty .= "<name>${filename}</name>\n";
- $beauty .= "<revision>${revision}</revision>\n";
- if ($branch) {
- $branch = &xml_escape ($branch); # more paranoia
- $beauty .= "<branch>${branch}</branch>\n";
- }
- foreach my $tag (@$tags) {
- $tag = &xml_escape ($tag); # by now you're used to the paranoia
- $beauty .= "<tag>${tag}</tag>\n";
- }
- foreach my $root (@$branchroots) {
- $root = &xml_escape ($root); # which is good, because it will continue
- $beauty .= "<branchroot>${root}</branchroot>\n";
- }
- $beauty .= "</file>\n";
- }
-
- # Theoretically, we could go home now. But as long as we're here,
- # let's print out the common_dir and utags, as a convenience to
- # the receiver (after all, earlier code calculated that stuff
- # anyway, so we might as well take advantage of it).
-
- if ((scalar (keys (%unanimous_tags))) > 1) {
- foreach my $utag ((keys (%unanimous_tags))) {
- $utag = &xml_escape ($utag); # the usual paranoia
- $beauty .= "<utag>${utag}</utag>\n";
- }
- }
- if ($common_dir) {
- $common_dir = &xml_escape ($common_dir);
- $beauty .= "<commondir>${common_dir}</commondir>\n";
- }
-
- # That's enough for XML, time to go home:
- return $beauty;
- }
-
- # Else not XML output, so complexly compactify for chordate
- # consumption. At this point we have enough global information
- # about all the qunks to organize them non-redundantly for output.
-
- if ($common_dir) {
- # Note that $common_dir still has its trailing slash
- $beauty .= "$common_dir: ";
- }
-
- if ($Show_Branches)
- {
- # For trailing revision numbers.
- my @brevisions;
-
- foreach my $branch (keys (%all_branches))
- {
- foreach my $qunkref (@qunkrefs)
- {
- if ((defined ($$qunkref{'branch'}))
- and ($$qunkref{'branch'} eq $branch))
- {
- if ($fbegun) {
- # kff todo: comma-delimited in XML too? Sure.
- $beauty .= ", ";
- }
- else {
- $fbegun = 1;
- }
- my $fname = substr ($$qunkref{'filename'}, length ($common_dir));
- $beauty .= $fname;
- $$qunkref{'printed'} = 1; # Just setting a mark bit, basically
-
- if ($Show_Tags && (defined @{$$qunkref{'tags'}})) {
- my @tags = grep ($non_unanimous_tags{$_}, @{$$qunkref{'tags'}});
- if (@tags) {
- $beauty .= " (tags: ";
- $beauty .= join (', ', @tags);
- $beauty .= ")";
- }
- }
-
- if ($Show_Revisions) {
- # Collect the revision numbers' last components, but don't
- # print them -- they'll get printed with the branch name
- # later.
- $$qunkref{'revision'} =~ /.+\.([\d]+)$/;
- push (@brevisions, $1);
-
- # todo: we're still collecting branch roots, but we're not
- # showing them anywhere. If we do show them, it would be
- # nifty to just call them revision "0" on a the branch.
- # Yeah, that's the ticket.
- }
- }
- }
- $beauty .= " ($branch";
- if (@brevisions) {
- if ((scalar (@brevisions)) > 1) {
- $beauty .= ".[";
- $beauty .= (join (',', @brevisions));
- $beauty .= "]";
- }
- else {
- $beauty .= ".$brevisions[0]";
- }
- }
- $beauty .= ")";
- }
- }
-
- # Okay; any qunks that were done according to branch are taken care
- # of, and marked as printed. Now print everyone else.
-
- foreach my $qunkref (@qunkrefs)
- {
- next if (defined ($$qunkref{'printed'})); # skip if already printed
-
- if ($fbegun) {
- $beauty .= ", ";
- }
- else {
- $fbegun = 1;
- }
- $beauty .= substr ($$qunkref{'filename'}, length ($common_dir));
- # todo: Shlomo's change was this:
- # $beauty .= substr ($$qunkref{'filename'},
- # (($common_dir eq "./") ? "" : length ($common_dir)));
- $$qunkref{'printed'} = 1; # Set a mark bit.
-
- if ($Show_Revisions || $Show_Tags)
- {
- my $started_addendum = 0;
-
- if ($Show_Revisions) {
- $started_addendum = 1;
- $beauty .= " (";
- $beauty .= "$$qunkref{'revision'}";
- }
- if ($Show_Tags && (defined $$qunkref{'tags'})) {
- my @tags = grep ($non_unanimous_tags{$_}, @{$$qunkref{'tags'}});
- if ((scalar (@tags)) > 0) {
- if ($started_addendum) {
- $beauty .= ", ";
- }
- else {
- $beauty .= " (tags: ";
- }
- $beauty .= join (', ', @tags);
- $started_addendum = 1;
- }
- }
- if ($started_addendum) {
- $beauty .= ")";
- }
- }
- }
-
- # Unanimous tags always come last.
- if ($Show_Tags && %unanimous_tags)
- {
- $beauty .= " (utags: ";
- $beauty .= join (', ', keys (%unanimous_tags));
- $beauty .= ")";
- }
-
- # todo: still have to take care of branch_roots?
-
- $beauty = "* $beauty:";
-
- return $beauty;
-}
-
-
-sub common_path_prefix ()
-{
- my $path1 = shift;
- my $path2 = shift;
-
- my ($dir1, $dir2);
- (undef, $dir1, undef) = fileparse ($path1);
- (undef, $dir2, undef) = fileparse ($path2);
-
- # Transmogrify Windows filenames to look like Unix.
- # (It is far more likely that someone is running cvs2cl.pl under
- # Windows than that they would genuinely have backslashes in their
- # filenames.)
- $dir1 =~ tr#\\#/#;
- $dir2 =~ tr#\\#/#;
-
- my $accum1 = "";
- my $accum2 = "";
- my $last_common_prefix = "";
-
- while ($accum1 eq $accum2)
- {
- $last_common_prefix = $accum1;
- last if ($accum1 eq $dir1);
- my ($tmp1) = split (/\//, (substr ($dir1, length ($accum1))));
- my ($tmp2) = split (/\//, (substr ($dir2, length ($accum2))));
- $accum1 .= "$tmp1/" if ((defined ($tmp1)) and $tmp1);
- $accum2 .= "$tmp2/" if ((defined ($tmp2)) and $tmp2);
- }
-
- return $last_common_prefix;
-}
-
-
-sub preprocess_msg_text ()
-{
- my $text = shift;
-
- # Strip out carriage returns (as they probably result from DOSsy editors).
- $text =~ s/\r\n/\n/g;
-
- # If it *looks* like two newlines, make it *be* two newlines:
- $text =~ s/\n\s*\n/\n\n/g;
-
- if ($XML_Output)
- {
- $text = &xml_escape ($text);
- $text = "<msg>${text}</msg>\n";
- }
- elsif (! $No_Wrap)
- {
- # Strip off lone newlines, but only for lines that don't begin with
- # whitespace or a mail-quoting character, since we want to preserve
- # that kind of formatting. Also don't strip newlines that follow a
- # period; we handle those specially next. And don't strip
- # newlines that precede an open paren.
- 1 while ($text =~ s/(^|\n)([^>\s].*[^.\n])\n([^>\n])/$1$2 $3/g);
-
- # If a newline follows a period, make sure that when we bring up the
- # bottom sentence, it begins with two spaces.
- 1 while ($text =~ s/(^|\n)([^>\s].*)\n([^>\n])/$1$2 $3/g);
- }
-
- return $text;
-}
-
-
-sub last_line_len ()
-{
- my $files_list = shift;
- my @lines = split (/\n/, $files_list);
- my $last_line = pop (@lines);
- return length ($last_line);
-}
-
-
-# A custom wrap function, sensitive to some common constructs used in
-# log entries.
-sub wrap_log_entry ()
-{
- my $text = shift; # The text to wrap.
- my $left_pad_str = shift; # String to pad with on the left.
-
- # These do NOT take left_pad_str into account:
- my $length_remaining = shift; # Amount left on current line.
- my $max_line_length = shift; # Amount left for a blank line.
-
- my $wrapped_text = ""; # The accumulating wrapped entry.
- my $user_indent = ""; # Inherited user_indent from prev line.
-
- my $first_time = 1; # First iteration of the loop?
- my $suppress_line_start_match = 0; # Set to disable line start checks.
-
- my @lines = split (/\n/, $text);
- while (@lines) # Don't use `foreach' here, it won't work.
- {
- my $this_line = shift (@lines);
- chomp $this_line;
-
- if ($this_line =~ /^(\s+)/) {
- $user_indent = $1;
- }
- else {
- $user_indent = "";
- }
-
- # If it matches any of the line-start regexps, print a newline now...
- if ($suppress_line_start_match)
- {
- $suppress_line_start_match = 0;
- }
- elsif (($this_line =~ /^(\s*)\*\s+[a-zA-Z0-9]/)
- || ($this_line =~ /^(\s*)\* [a-zA-Z0-9_\.\/\+-]+/)
- || ($this_line =~ /^(\s*)\([a-zA-Z0-9_\.\/\+-]+(\)|,\s*)/)
- || ($this_line =~ /^(\s+)(\S+)/)
- || ($this_line =~ /^(\s*)- +/)
- || ($this_line =~ /^()\s*$/)
- || ($this_line =~ /^(\s*)\*\) +/)
- || ($this_line =~ /^(\s*)[a-zA-Z0-9](\)|\.|\:) +/))
- {
- # Make a line break immediately, unless header separator is set
- # and this line is the first line in the entry, in which case
- # we're getting the blank line for free already and shouldn't
- # add an extra one.
- unless (($After_Header ne " ") and ($first_time))
- {
- if ($this_line =~ /^()\s*$/) {
- $suppress_line_start_match = 1;
- $wrapped_text .= "\n${left_pad_str}";
- }
-
- $wrapped_text .= "\n${left_pad_str}";
- }
-
- $length_remaining = $max_line_length - (length ($user_indent));
- }
-
- # Now that any user_indent has been preserved, strip off leading
- # whitespace, so up-folding has no ugly side-effects.
- $this_line =~ s/^\s*//;
-
- # Accumulate the line, and adjust parameters for next line.
- my $this_len = length ($this_line);
- if ($this_len == 0)
- {
- # Blank lines should cancel any user_indent level.
- $user_indent = "";
- $length_remaining = $max_line_length;
- }
- elsif ($this_len >= $length_remaining) # Line too long, try breaking it.
- {
- # Walk backwards from the end. At first acceptable spot, break
- # a new line.
- my $idx = $length_remaining - 1;
- if ($idx < 0) { $idx = 0 };
- while ($idx > 0)
- {
- if (substr ($this_line, $idx, 1) =~ /\s/)
- {
- my $line_now = substr ($this_line, 0, $idx);
- my $next_line = substr ($this_line, $idx);
- $this_line = $line_now;
-
- # Clean whitespace off the end.
- chomp $this_line;
-
- # The current line is ready to be printed.
- $this_line .= "\n${left_pad_str}";
-
- # Make sure the next line is allowed full room.
- $length_remaining = $max_line_length - (length ($user_indent));
-
- # Strip next_line, but then preserve any user_indent.
- $next_line =~ s/^\s*//;
-
- # Sneak a peek at the user_indent of the upcoming line, so
- # $next_line (which will now precede it) can inherit that
- # indent level. Otherwise, use whatever user_indent level
- # we currently have, which might be none.
- my $next_next_line = shift (@lines);
- if ((defined ($next_next_line)) && ($next_next_line =~ /^(\s+)/)) {
- $next_line = $1 . $next_line if (defined ($1));
- # $length_remaining = $max_line_length - (length ($1));
- $next_next_line =~ s/^\s*//;
- }
- else {
- $next_line = $user_indent . $next_line;
- }
- if (defined ($next_next_line)) {
- unshift (@lines, $next_next_line);
- }
- unshift (@lines, $next_line);
-
- # Our new next line might, coincidentally, begin with one of
- # the line-start regexps, so we temporarily turn off
- # sensitivity to that until we're past the line.
- $suppress_line_start_match = 1;
-
- last;
- }
- else
- {
- $idx--;
- }
- }
-
- if ($idx == 0)
- {
- # We bottomed out because the line is longer than the
- # available space. But that could be because the space is
- # small, or because the line is longer than even the maximum
- # possible space. Handle both cases below.
-
- if ($length_remaining == ($max_line_length - (length ($user_indent))))
- {
- # The line is simply too long -- there is no hope of ever
- # breaking it nicely, so just insert it verbatim, with
- # appropriate padding.
- $this_line = "\n${left_pad_str}${this_line}";
- }
- else
- {
- # Can't break it here, but may be able to on the next round...
- unshift (@lines, $this_line);
- $length_remaining = $max_line_length - (length ($user_indent));
- $this_line = "\n${left_pad_str}";
- }
- }
- }
- else # $this_len < $length_remaining, so tack on what we can.
- {
- # Leave a note for the next iteration.
- $length_remaining = $length_remaining - $this_len;
-
- if ($this_line =~ /\.$/)
- {
- $this_line .= " ";
- $length_remaining -= 2;
- }
- else # not a sentence end
- {
- $this_line .= " ";
- $length_remaining -= 1;
- }
- }
-
- # Unconditionally indicate that loop has run at least once.
- $first_time = 0;
-
- $wrapped_text .= "${user_indent}${this_line}";
- }
-
- # One last bit of padding.
- $wrapped_text .= "\n";
-
- return $wrapped_text;
-}
-
-
-sub xml_escape ()
-{
- my $txt = shift;
- $txt =~ s/&/&amp;/g;
- $txt =~ s/</&lt;/g;
- $txt =~ s/>/&gt;/g;
- return $txt;
-}
-
-
-sub maybe_read_user_map_file ()
-{
- my %expansions;
-
- if ($User_Map_File)
- {
- open (MAPFILE, "<$User_Map_File")
- or die ("Unable to open $User_Map_File ($!)");
-
- while (<MAPFILE>)
- {
- next if /^\s*#/; # Skip comment lines.
- next if not /:/; # Skip lines without colons.
-
- # It is now safe to split on ':'.
- my ($username, $expansion) = split ':';
- chomp $expansion;
- $expansion =~ s/^'(.*)'$/$1/;
- $expansion =~ s/^"(.*)"$/$1/;
-
- # If it looks like the expansion has a real name already, then
- # we toss the username we got from CVS log. Otherwise, keep
- # it to use in combination with the email address.
-
- if ($expansion =~ /^\s*<{0,1}\S+@.*/) {
- # Also, add angle brackets if none present
- if (! ($expansion =~ /<\S+@\S+>/)) {
- $expansions{$username} = "$username <$expansion>";
- }
- else {
- $expansions{$username} = "$username $expansion";
- }
- }
- else {
- $expansions{$username} = $expansion;
- }
- }
-
- close (MAPFILE);
- }
-
- return %expansions;
-}
-
-
-sub parse_options ()
-{
- # Check this internally before setting the global variable.
- my $output_file;
-
- # If this gets set, we encountered unknown options and will exit at
- # the end of this subroutine.
- my $exit_with_admonishment = 0;
-
- while (my $arg = shift (@ARGV))
- {
- if ($arg =~ /^-h$|^-help$|^--help$|^--usage$|^-?$/) {
- $Print_Usage = 1;
- }
- elsif ($arg =~ /^--debug$/) { # unadvertised option, heh
- $Debug = 1;
- }
- elsif ($arg =~ /^--version$/) {
- $Print_Version = 1;
- }
- elsif ($arg =~ /^-g$|^--global-opts$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- # Don't assume CVS is called "cvs" on the user's system:
- $Log_Source_Command =~ s/(^\S*)/$1 $narg/;
- }
- elsif ($arg =~ /^-l$|^--log-opts$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $Log_Source_Command .= " $narg";
- }
- elsif ($arg =~ /^-f$|^--file$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $output_file = $narg;
- }
- elsif ($arg =~ /^--accum$/) {
- $Cumulative = 1;
- }
- elsif ($arg =~ /^--fsf$/) {
- $FSF_Style = 1;
- }
- elsif ($arg =~ /^-U$|^--usermap$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $User_Map_File = $narg;
- }
- elsif ($arg =~ /^-W$|^--window$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $Max_Checkin_Duration = $narg;
- }
- elsif ($arg =~ /^-I$|^--ignore$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- push (@Ignore_Files, $narg);
- }
- elsif ($arg =~ /^-C$|^--case-insensitive$/) {
- $Case_Insensitive = 1;
- }
- elsif ($arg =~ /^-R$|^--regexp$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $Regexp_Gate = $narg;
- }
- elsif ($arg =~ /^--stdout$/) {
- $Output_To_Stdout = 1;
- }
- elsif ($arg =~ /^--version$/) {
- $Print_Version = 1;
- }
- elsif ($arg =~ /^-d$|^--distributed$/) {
- $Distributed = 1;
- }
- elsif ($arg =~ /^-P$|^--prune$/) {
- $Prune_Empty_Msgs = 1;
- }
- elsif ($arg =~ /^-S$|^--separate-header$/) {
- $After_Header = "\n\n";
- }
- elsif ($arg =~ /^--no-wrap$/) {
- $No_Wrap = 1;
- }
- elsif ($arg =~ /^--gmt$|^--utc$/) {
- $UTC_Times = 1;
- }
- elsif ($arg =~ /^-w$|^--day-of-week$/) {
- $Show_Day_Of_Week = 1;
- }
- elsif ($arg =~ /^-r$|^--revisions$/) {
- $Show_Revisions = 1;
- }
- elsif ($arg =~ /^-t$|^--tags$/) {
- $Show_Tags = 1;
- }
- elsif ($arg =~ /^-b$|^--branches$/) {
- $Show_Branches = 1;
- }
- elsif ($arg =~ /^-F$|^--follow$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- push (@Follow_Branches, $narg);
- }
- elsif ($arg =~ /^--stdin$/) {
- $Input_From_Stdin = 1;
- }
- elsif ($arg =~ /^--header$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $ChangeLog_Header = &slurp_file ($narg);
- if (! defined ($ChangeLog_Header)) {
- $ChangeLog_Header = "";
- }
- }
- elsif ($arg =~ /^--xml$/) {
- $XML_Output = 1;
- }
- elsif ($arg =~ /^--hide-filenames$/) {
- $Hide_Filenames = 1;
- $After_Header = "";
- }
- else {
- # Just add a filename as argument to the log command
- $Log_Source_Command .= " $arg";
- }
- }
-
- ## Check for contradictions...
-
- if ($Output_To_Stdout && $Distributed) {
- print STDERR "cannot pass both --stdout and --distributed\n";
- $exit_with_admonishment = 1;
- }
-
- if ($Output_To_Stdout && $output_file) {
- print STDERR "cannot pass both --stdout and --file\n";
- $exit_with_admonishment = 1;
- }
-
- if ($XML_Output && $Cumulative) {
- print STDERR "cannot pass both --xml and --accum\n";
- $exit_with_admonishment = 1;
- }
-
- # Or if any other error message has already been printed out, we
- # just leave now:
- if ($exit_with_admonishment) {
- &usage ();
- exit (1);
- }
- elsif ($Print_Usage) {
- &usage ();
- exit (0);
- }
- elsif ($Print_Version) {
- &version ();
- exit (0);
- }
-
- ## Else no problems, so proceed.
-
- if ($output_file) {
- $Log_File_Name = $output_file;
- }
-}
-
-
-sub slurp_file ()
-{
- my $filename = shift || die ("no filename passed to slurp_file()");
- my $retstr;
-
- open (SLURPEE, "<${filename}") or die ("unable to open $filename ($!)");
- my $saved_sep = $/;
- undef $/;
- $retstr = <SLURPEE>;
- $/ = $saved_sep;
- close (SLURPEE);
- return $retstr;
-}
-
-
-sub debug ()
-{
- if ($Debug) {
- my $msg = shift;
- print STDERR $msg;
- }
-}
-
-
-sub version ()
-{
- print "cvs2cl.pl version ${VERSION}; distributed under the GNU GPL.\n";
-}
-
-
-sub usage ()
-{
- &version ();
- print <<'END_OF_INFO';
-Generate GNU-style ChangeLogs in CVS working copies.
-
-Notes about the output format(s):
-
- The default output of cvs2cl.pl is designed to be compact, formally
- unambiguous, but still easy for humans to read. It is largely
- self-explanatory, I hope; the one abbreviation that might not be
- obvious is "utags". That stands for "universal tags" -- a
- universal tag is one held by all the files in a given change entry.
-
- If you need output that's easy for a program to parse, use the
- --xml option. Note that with XML output, just about all available
- information is included with each change entry, whether you asked
- for it or not, on the theory that your parser can ignore anything
- it's not looking for.
-
-Notes about the options and arguments (the actual options are listed
-last in this usage message):
-
- * The -I and -F options may appear multiple times.
-
- * To follow trunk revisions, use "-F trunk" ("-F TRUNK" also works).
- This is okay because no would ever, ever be crazy enough to name a
- branch "trunk", right? Right.
-
- * For the -U option, the UFILE should be formatted like
- CVSROOT/users. That is, each line of UFILE looks like this
- jrandom:jrandom@red-bean.com
- or maybe even like this
- jrandom:'Jesse Q. Random <jrandom@red-bean.com>'
- Don't forget to quote the portion after the colon if necessary.
-
- * Many people want to filter by date. To do so, invoke cvs2cl.pl
- like this:
- cvs2cl.pl -l "-d'DATESPEC'"
- where DATESPEC is any date specification valid for "cvs log -d".
- (Note that CVS 1.10.7 and below requires there be no space between
- -d and its argument).
-
-Options/Arguments:
-
- -h, -help, --help, or -? Show this usage and exit
- --version Show version and exit
- -r, --revisions Show revision numbers in output
- -b, --branches Show branch names in revisions when possible
- -t, --tags Show tags (symbolic names) in output
- --stdin Read from stdin, don't run cvs log
- --stdout Output to stdout not to ChangeLog
- -d, --distributed Put ChangeLogs in subdirs
- -f FILE, --file FILE Write to FILE instead of "ChangeLog"
- --fsf Use this if log data is in FSF ChangeLog style
- -W SECS, --window SECS Window of time within which log entries unify
- -U UFILE, --usermap UFILE Expand usernames to email addresses from UFILE
- -R REGEXP, --regexp REGEXP Include only entries that match REGEXP
- -I REGEXP, --ignore REGEXP Ignore files whose names match REGEXP
- -C, --case-insensitive Any regexp matching is done case-insensitively
- -F BRANCH, --follow BRANCH Show only revisions on or ancestral to BRANCH
- -S, --separate-header Blank line between each header and log message
- --no-wrap Don't auto-wrap log message (recommend -S also)
- --gmt, --utc Show times in GMT/UTC instead of local time
- --accum Add to an existing ChangeLog (incompat w/ --xml)
- -w, --day-of-week Show day of week
- --header FILE Get ChangeLog header from FILE ("-" means stdin)
- --xml Output XML instead of ChangeLog format
- --hide-filenames Don't show filenames (ignored for XML output)
- -P, --prune Don't show empty log messages
- -g OPTS, --global-opts OPTS Invoke like this "cvs OPTS log ..."
- -l OPTS, --log-opts OPTS Invoke like this "cvs ... log OPTS"
- FILE1 [FILE2 ...] Show only log information for the named FILE(s)
-
-See http://www.red-bean.com/cvs2cl for maintenance and bug info.
-END_OF_INFO
-}
-
-__END__
-
-=head1 NAME
-
-cvs2cl.pl - produces GNU-style ChangeLogs in CVS working copies, by
- running "cvs log" and parsing the output. Shared log entries are
- unified in an intuitive way.
-
-=head1 DESCRIPTION
-
-This script generates GNU-style ChangeLog files from CVS log
-information. Basic usage: just run it inside a working copy and a
-ChangeLog will appear. It requires repository access (i.e., 'cvs log'
-must work). Run "cvs2cl.pl --help" to see more advanced options.
-
-See http://www.red-bean.com/cvs2cl for updates, and for instructions
-on getting anonymous CVS access to this script.
-
-Maintainer: Karl Fogel <kfogel@red-bean.com>
-Please report bugs to <bug-cvs2cl@red-bean.com>.
-
-=head1 README
-
-This script generates GNU-style ChangeLog files from CVS log
-information. Basic usage: just run it inside a working copy and a
-ChangeLog will appear. It requires repository access (i.e., 'cvs log'
-must work). Run "cvs2cl.pl --help" to see more advanced options.
-
-See http://www.red-bean.com/cvs2cl for updates, and for instructions
-on getting anonymous CVS access to this script.
-
-Maintainer: Karl Fogel <kfogel@red-bean.com>
-Please report bugs to <bug-cvs2cl@red-bean.com>.
-
-=head1 PREREQUISITES
-
-This script requires C<Text::Wrap>, C<Time::Local>, and
-C<File::Basename>.
-It also seems to require C<Perl 5.004_04> or higher.
-
-=pod OSNAMES
-
-any
-
-=pod SCRIPT CATEGORIES
-
-Version_Control/CVS
-
-=cut
-
-
--*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*-
-
-Note about a bug-slash-opportunity:
------------------------------------
-
-There's a bug in Text::Wrap, which affects cvs2cl. This script
-reveals it:
-
- #!/usr/bin/perl -w
-
- use Text::Wrap;
-
- my $test_text =
- "This script demonstrates a bug in Text::Wrap. The very long line
- following this paragraph will be relocated relative to the surrounding
- text:
-
- ====================================================================
-
- See? When the bug happens, we'll get the line of equal signs below
- this paragraph, even though it should be above.";
-
-
- # Print out the test text with no wrapping:
- print "$test_text";
- print "\n";
- print "\n";
-
- # Now print it out wrapped, and see the bug:
- print wrap ("\t", " ", "$test_text");
- print "\n";
- print "\n";
-
-If the line of equal signs were one shorter, then the bug doesn't
-happen. Interesting.
-
-Anyway, rather than fix this in Text::Wrap, we might as well write a
-new wrap() which has the following much-needed features:
-
-* initial indentation, like current Text::Wrap()
-* subsequent line indentation, like current Text::Wrap()
-* user chooses among: force-break long words, leave them alone, or die()?
-* preserve existing indentation: chopped chunks from an indented line
- are indented by same (like this line, not counting the asterisk!)
-* optional list of things to preserve on line starts, default ">"
-
-Note that the last two are essentially the same concept, so unify in
-implementation and give a good interface to controlling them.
-
-And how about:
-
-Optionally, when encounter a line pre-indented by same as previous
-line, then strip the newline and refill, but indent by the same.
-Yeah...
diff --git a/ndb/home/bin/fix-cvs-root b/ndb/home/bin/fix-cvs-root
deleted file mode 100755
index 2c4f158f825..00000000000
--- a/ndb/home/bin/fix-cvs-root
+++ /dev/null
@@ -1,17 +0,0 @@
-#! /bin/sh
-
-# change all CVS/Root to current CVSROOT
-
-[ "$CVSROOT" ] || { echo "no CVSROOT in environment" >&2; exit 1; }
-
-echo "changing all CVS/Root files under `pwd`"
-sleep 1
-
-find . -path '*/CVS/Root' -print |
-while read file; do
- echo "$file"
- chmod +w $file || exit 1
- echo $CVSROOT >$file || exit 1
-done
-
-echo "done"
diff --git a/ndb/home/bin/import-from-bk.sh b/ndb/home/bin/import-from-bk.sh
deleted file mode 100755
index 4e3957be6d5..00000000000
--- a/ndb/home/bin/import-from-bk.sh
+++ /dev/null
@@ -1,158 +0,0 @@
-#! /bin/sh
-
-# XXX does not delete files
-# XXX does not handle nested new dirs
-# this script screams for perl, no time now
-# look for bk2cvs on the net
-
-PATH=/usr/local/bin:$PATH; export PATH
-LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH; export LD_LIBRARY_PATH
-
-batch=n
-if [ "$1" = "-batch" ]; then
- batch=y
- shift
-fi
-
-say() {
- echo "$*"
-}
-
-die() {
- case $# in
- 0) set -- "command failed" ;;
- esac
- say "$* -- aborted" >&2
- exit 1
-}
-
-usage() {
- die "usage: $0 [-batch] top -- copy from mysql/ndb to another NDB_TOP"
-}
-
-doit() {
- cmd="$*"
- if [ $batch = n ]; then
- echo -n "$cmd [y]"
- read junk
- sh -c "$cmd"
- return 0
- else
- echo "$cmd"
- sh -c "$cmd"
- return $?
- fi
-}
-
-say "======================"
-say "`date`"
-
-case $# in
-1) [ -d $1/src/CVS ] || die "$1 is not an NDB_TOP"
- top=$1 ;;
-*) usage ;;
-esac
-
-if ! fgrep ndb_kernel_version.h $top/include/kernel/CVS/Entries >/dev/null 2>&1; then
- die "$top is not an NDB_TOP"
-fi
-
-if find $top -path '*/CVS/Tag' -print | grep . >/dev/null; then
- die "$top: contains CVS/Tag files, not accepted"
-fi
-
-if [ ! -f include/SCCS/s.ndb_version.h ]; then
- die "current dir ($PWD) is not an NDB_TOP"
-fi
-
-doit "bk pull" || exit 1
-doit "bk -r clean"
-doit "bk -r get -q"
-
-files=`bk -r. sfiles -g |
- fgrep -v ' ' |
- fgrep -v /.cvsignore`
-
-n=0
-files2=
-for f in $files; do
- if [ ! -f $f ]; then
- die "$f: no such file"
- fi
- if [ -w $f ]; then
- say "$f: is writable, accept anyway"
- fi
- files2="$files2 $f"
- n=$((n+1))
-done
-files=$files2
-say "$n files..."
-
-adddirs= addfiles= updfiles=
-for f in $files; do
- d=`dirname $f`
- b=`basename $f`
- if [ ! -f $top/$d/CVS/Entries ]; then
- found=n
- for x in $adddirs; do
- if [ $x = $d ]; then found=y; break; fi
- done
- if [ $found = n ]; then
- say "$d: to create dir"
- adddirs="$adddirs $d"
- fi
- addfiles="$addfiles $f"
- say "$f: to create"
- elif ! fgrep "/$b/" $top/$d/CVS/Entries >/dev/null; then
- addfiles="$addfiles $f"
- say "$f: to create"
- else
- cmp $f $top/$f >/dev/null
- case $? in
- 0) continue ;;
- 1) ;;
- *) die "$f: unknown error" ;;
- esac
- updfiles="$updfiles $f"
- say "$f: to update"
- fi
-done
-
-for d in $adddirs; do
- doit "cd $top && mkdir -p $d" || die
-done
-
-for f in $addfiles $updfiles; do
- doit "cp -fp $f $top/$f" || die
-done
-
-for d in $adddirs; do
- # fix 1 level up
- d2=`dirname $d`
- if [ ! -d $top/$d2/CVS ]; then
- doit "cd $top && cvs add $d2" || die
- fi
- doit "cd $top && cvs add $d" || die
-done
-
-for f in $addfiles; do
- kb=
- if echo $f | perl -nle "print(-B $_)" | grep 1 >/dev/null; then
- kb="-kb"
- fi
- doit "cd $top && cvs add $kb $f" || die
-done
-
-tag=import_bk_`date +%Y_%m_%d`
-
-doit "cd $top && cvs commit -m $tag" || die
-doit "cd $top && cvs tag -F $tag" || die
-
-env="NDB_TOP=$top; export NDB_TOP"
-env="$env; USER_FLAGS='-DAPI_TRACE -fmessage-length=0'; export USER_FLAGS"
-doit "$env; cd $top && ./configure"
-doit "$env; cd $top && sh config/GuessConfig.sh"
-doit "$env; cd $top && make clean nuke-deps vim-tags"
-doit "$env; cd $top && make" || die
-
-say "imported ok"
diff --git a/ndb/home/bin/ndb_deploy b/ndb/home/bin/ndb_deploy
deleted file mode 100755
index 773fc9b8fd7..00000000000
--- a/ndb/home/bin/ndb_deploy
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-if [ $# -eq 0 ]
-then
- for i in $DEPLOY_DST
- do
- rsync -r -v --exclude '*.a' $NDB_TOP/bin $NDB_TOP/lib $i/
- done
-else
- while [ $# -gt 0 ]
- do
- arg=$1
- shift;
- if [ `echo $arg | grep -c lib` -eq 0 ]
- then
- dst=bin/
- else
- dst=lib/
- fi
-
- for i in $DEPLOY_DST
- do
- rsync -v $arg $i/$dst
- done
- done
-fi
-
diff --git a/ndb/home/bin/ndbdoxy.pl b/ndb/home/bin/ndbdoxy.pl
deleted file mode 100755
index 89b7de8440e..00000000000
--- a/ndb/home/bin/ndbdoxy.pl
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/local/bin/perl
-#
-# ndbdoxy.pl Executes doxygen on a checked out version of NDB Cluster
-#
-# Written by Lars Thalmann, 2003.
-
-use strict;
-umask 000;
-
-# -----------------------------------------------------------------------------
-# Settings
-# -----------------------------------------------------------------------------
-
-my $root = "/home/elathal/public_html/cvsdoxy";
-
-$ENV{LD_LIBRARY_PATH} = "/usr/local/lib:/opt/as/local/lib";
-$ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} . ":/opt/as/forte6/SUNWspro/lib";
-$ENV{PATH} = $ENV{PATH} . ":/usr/local/bin:/opt/as/local/bin";
-$ENV{PATH} = $ENV{PATH} . ":/opt/as/local/teTeX/bin/sparc-sun-solaris2.8";
-
-my $DOXYGEN = "doxygen";
-my $PDFLATEX = "pdflatex";
-my $MAKEINDEX = "makeindex";
-
-# -----------------------------------------------------------------------------
-# Argument handling
-# -----------------------------------------------------------------------------
-
-if (@ARGV != 3) {
- print<<END;
-Usage:
- ndbdoxy.pl <module> <title> <version>
-
- where
- <module> is cvsdoxy module to doxgenify
- <title> is title of report
- <version> is version of NDB Cluster
-END
- exit;
-}
-my $module = $ARGV[0];
-my $title = $ARGV[1];
-my $version = $ARGV[2];
-my $destdir = ".";
-
-# -----------------------------------------------------------------------------
-# Execute Doxygen -g
-# -----------------------------------------------------------------------------
-
-if (-r "${root}/doxyfiles/${module}.doxyfile") {
- system("cd ${destdir}; \
- cp ${root}/doxyfiles/${module}.doxyfile Doxyfile");
-} elsif (-r "${root}/doxyfiles/default.doxyfile") {
- system("cd ${destdir}; \
- cp ${root}/doxyfiles/default.doxyfile Doxyfile");
-} else {
- system("cd ${destdir}; $DOXYGEN -g");
-}
-
-# -----------------------------------------------------------------------------
-# HTML Footer
-# -----------------------------------------------------------------------------
-
-if (-r "${root}/doxyfiles/htmlfooter") {
- system("cd ${destdir}; \
- cp ${root}/doxyfiles/htmlfooter footer.html");
-
- open (INFILE, "< ${destdir}/footer.html")
- or die "Error opening ${destdir}/footer.html.\n";
- open (OUTFILE, "> ${destdir}/footer.html.new")
- or die "Error opening ${destdir}/footer.html.new.\n";
- while (<INFILE>) {
- if (/(.*)DATE(.*)$/) {
- print OUTFILE $1 . localtime() . $2;
- } else {
- print OUTFILE;
- }
- }
- close INFILE;
- close OUTFILE;
-
- system("mv ${destdir}/footer.html.new ${destdir}/footer.html");
-} else {
- print("Warning: No ${root}/doxyfiles/${module}.htmlfooter");
-}
-
-# -----------------------------------------------------------------------------
-# Execute Doxygen
-# -----------------------------------------------------------------------------
-
-system("cd ${destdir}; $DOXYGEN");
-
-# -----------------------------------------------------------------------------
-# Change a little in refman.tex
-# -----------------------------------------------------------------------------
-
-open (INFILE, "< ${destdir}/latex/refman.tex")
- or die "Error opening ${destdir}/latex/refman.tex.\n";
-open (OUTFILE, "> ${destdir}/latex/refman.tex.new")
- or die "Error opening ${destdir}/latex/refman.tex.new.\n";
-
-while (<INFILE>)
-{
- if (/(.*)Reference Manual(.*)$/) {
- print OUTFILE $1 .
- "\\mbox{}\\vspace{-3cm}\\mbox{}" .
- "\\hrule\\bigskip\\bigskip\\bigskip\\bigskip" .
- "\\Huge{" . $title . "}" . $2;
- } elsif (/(.*)Generated by Doxygen 1.2.1[0-9](.*)$/) {
- print OUTFILE $1 .
- "\\begin{center}" .
- "\\LARGE{MySQL AB}" .
- "\\end{center}".
- "\\hfill\\bigskip\\bigskip\\bigskip\\hrule" .
- "\\bigskip\\bigskip\\bigskip\\bigskip\\bigskip" .
- "\\bigskip\\bigskip\\bigskip\\bigskip\\bigskip" .
- "\\bigskip\\bigskip NDB Cluster Release " . $version .
- "\\bigskip\\bigskip\\bigskip\\bigskip\\bigskip\\hfill" .
- $2;
- } elsif (/\\chapter\{File Index\}/) {
- print OUTFILE "\%\\chapter{File Index}\n";
- } elsif (/\\input{files}/) {
- print OUTFILE "\%\\input{files}\n";
- } elsif (/\\chapter\{Page Index\}/) {
- print OUTFILE "\%\\chapter{Page Index}\n";
- } elsif (/\\input{pages}/) {
- print OUTFILE "\%\\input{pages}\n";
- } else {
- print OUTFILE;
- }
-}
-
-close INFILE;
-close OUTFILE;
-
-system("mv ${destdir}/latex/refman.tex.new ${destdir}/latex/refman.tex");
-
-# -----------------------------------------------------------------------------
-# Change a little in doxygen.sty
-# -----------------------------------------------------------------------------
-
-open (INFILE, "< ${destdir}/latex/doxygen.sty")
- or die "Error opening INFILE.\n";
-open (OUTFILE, "> ${destdir}/latex/doxygen.sty.new")
- or die "Error opening OUTFILE.\n";
-
-while (<INFILE>)
-{
- if (/\\rfoot/) {
- print OUTFILE "\\rfoot[\\fancyplain{}{\\bfseries\\small \\copyright~Copyright 2003 MySQL AB\\hfill support-cluster\@mysql.com}]{}\n";
- } elsif (/\\lfoot/) {
- print OUTFILE "\\lfoot[]{\\fancyplain{}{\\bfseries\\small support-cluster\@mysql.com\\hfill \\copyright~Copyright 2003 MySQL AB}}\n";
- } else {
- print OUTFILE;
- }
-}
-
-close INFILE;
-close OUTFILE;
-
-system("mv ${destdir}/latex/doxygen.sty.new ${destdir}/latex/doxygen.sty");
-
-# -----------------------------------------------------------------------------
-# Other
-# -----------------------------------------------------------------------------
-
-#system("cd ${root}/tmp/${module}; \
-# mkdir html.tar; \
-# cd html.tar; \
-# cp -r ../html ${module}; \
-# tar cf ${module}.html.tar ${module}; \
-# /usr/local/bin/gzip ${module}.html.tar; \
-# /bin/rm -rf ${root}/tmp/${module}/html.tar/${module}");
-
-#system("cd ${destdir}/latex/; \
-# $PDFLATEX refman.tex \
-# $MAKEINDEX refman.idx \
-# $PDFLATEX refman.tex \
-# mv -f refman.pdf ${module}.pdf");
-
-print<<END;
-Execute:
- latex refman; makeindex refman; latex refman
-END
diff --git a/ndb/home/bin/ngcalc b/ndb/home/bin/ngcalc
deleted file mode 100755
index a289d384db9..00000000000
--- a/ndb/home/bin/ngcalc
+++ /dev/null
@@ -1,78 +0,0 @@
-#! /usr/local/bin/perl
-
-use strict;
-use Getopt::Long;
-
-sub usage {
- print <<END;
-ngcalc -- calculate node groups and table fragments
-usage: ngcalc [ options ] f1 f2 ...
--g num number of node groups (default 2)
--r num number of replicas (default 2)
--n list comma-separated list of db nodes (default 1,2,...)
-fX number of fragments per node group in table X (e.g. 1,2,8)
- (all replicas count as same fragment)
-END
- exit(1);
-};
-
-use vars qw($cnoOfNodeGroups $cnoReplicas $nodeArray);
-
-$cnoOfNodeGroups = 2;
-$cnoReplicas = 2;
-GetOptions(
- "g=i" => \$cnoOfNodeGroups,
- "r=i" => \$cnoReplicas,
- "n=s" => \$nodeArray,
-) or &usage;
-
-my @tableList = @ARGV;
-
-$cnoOfNodeGroups > 0 or &usage;
-$cnoReplicas > 0 or &usage;
-if (! defined($nodeArray)) {
- $nodeArray = join(',', 1..($cnoOfNodeGroups*$cnoReplicas));
-}
-$nodeArray =~ /^\d+(,\d+)*$/ or &usage;
-my @nodeArray = split(/,/, $nodeArray);
-@nodeArray == $cnoOfNodeGroups*$cnoReplicas or &usage;
-
-my @nodeGroupRecord;
-for (my $i = 0; $i < $cnoOfNodeGroups; $i++) {
- my $rec = {};
- my $nodes = [];
- for (my $j = 0; $j < $cnoReplicas; $j++) {
- push(@$nodes, $nodeArray[$i * $cnoReplicas + $j]);
- }
- $rec->{nodesInGroup} = $nodes;
- $rec->{nodeCount} = $cnoReplicas;
- $rec->{nextReplicaNode} = 0;
- $nodeGroupRecord[$i] = $rec;
- print "NG $i: ", join(" ", @{$rec->{nodesInGroup}}), "\n";
-}
-
-# see Dbdih::execCREATE_FRAGMENTATION_REQ
-
-my $c_nextNodeGroup = 0;
-for (my $t = 0; $t < @tableList; $t++) {
- use integer;
- my $f = $tableList[$t];
- my $ng = $c_nextNodeGroup++;
- $c_nextNodeGroup = 0 if $c_nextNodeGroup == $cnoOfNodeGroups;
- my $noOfFragments = $f * $cnoOfNodeGroups;
- my @fragments;
- for (my $fragNo = 0; $fragNo < $noOfFragments; $fragNo++) {
- my $rec = $nodeGroupRecord[$ng];
- my $max = $rec->{nodeCount};
- my $ind = $rec->{nextReplicaNode};
- $rec->{nextReplicaNode} = ($ind + 1 >= $max ? 0 : $ind + 1);
- for (my $replicaNo = 0; $replicaNo < $cnoReplicas; $replicaNo++) {
- my $nodeId = $rec->{nodesInGroup}[$ind++];
- push(@fragments, $nodeId);
- $ind = ($ind == $max ? 0 : $ind);
- }
- $ng++;
- $ng = ($ng == $cnoOfNodeGroups ? 0 : $ng);
- }
- printf "%02d %s\n", $t, join(" ", @fragments);
-}
diff --git a/ndb/home/bin/parseConfigFile.awk b/ndb/home/bin/parseConfigFile.awk
deleted file mode 100644
index 6903949156c..00000000000
--- a/ndb/home/bin/parseConfigFile.awk
+++ /dev/null
@@ -1,98 +0,0 @@
-BEGIN{
- where=0;
- n_hosts=0;
- n_api=0;
- n_ndb=0;
- n_mgm=0;
- n_ports=0;
-}
-/COMPUTERS/ {
- where=1;
-}
-/\[[ \t]*COMPUTER[ \t]*\]/ {
- where=1;
-}
-/PROCESSES/ {
- where=2;
-}
-/Type: MGMT/ {
- if(where!=1){
- where=2;
- n_mgm++;
- }
-}
-/\[[ \t]*MGM[ \t]*\]/ {
- where=2;
- n_mgm++;
-}
-/Type: DB/ {
- if(where!=1){
- where=3;
- n_ndb++;
- }
-}
-/\[[ \t]*DB[ \t]*\]/ {
- where=3;
- n_ndb++;
-}
-/Type: API/ {
- if(where!=1){
- where=4;
- n_api++;
- }
-}
-/\[[ \t]*API[ \t]*\]/ {
- where=4;
- n_api++;
-}
-/HostName:/ {
- host_names[host_ids[n_hosts]]=$2;
-}
-
-/FileSystemPath:/ {
- if (where==3){
- ndb_fs[ndb_ids[n_ndb]]=$2;
- }
-}
-
-/Id:/{
- if(where==1){
- n_hosts++;
- host_ids[n_hosts]=$2;
- }
- if(where==2){
- mgm_ids[n_mgm]=$2;
- }
- if(where==3){
- ndb_ids[n_ndb]=$2;
- }
- if(where==4){
- api_ids[n_api]=$2;
- }
-}
-/ExecuteOnComputer:/{
- if(where==2){
- mgm_hosts[mgm_ids[n_mgm]]=host_names[$2];
- }
- if(where==3){
- ndb_hosts[ndb_ids[n_ndb]]=host_names[$2];
- }
- if(where==4){
- api_hosts[api_ids[n_api]]=host_names[$2];
- }
-}
-END {
- for(i=1; i<=n_mgm; i++){
- printf("mgm_%d=%s\n", mgm_ids[i], mgm_hosts[mgm_ids[i]]);
- }
- for(i=1; i<=n_ndb; i++){
- printf("ndb_%d=%s\n", ndb_ids[i], ndb_hosts[ndb_ids[i]]);
- printf("ndbfs_%d=%s\n", ndb_ids[i], ndb_fs[ndb_ids[i]]);
- }
- for(i=1; i<=n_api; i++){
- printf("api_%d=%s\n", api_ids[i], api_hosts[api_ids[i]]);
- }
- printf("mgm_nodes=%d\n", n_mgm);
- printf("ndb_nodes=%d\n", n_ndb);
- printf("api_nodes=%d\n", n_api);
-}
diff --git a/ndb/home/bin/setup-test.sh b/ndb/home/bin/setup-test.sh
deleted file mode 100755
index 61097c30027..00000000000
--- a/ndb/home/bin/setup-test.sh
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/bin/sh
-
-# NAME
-# run-test.sh - Run a test program
-#
-# SYNOPSIS
-# setup-test.sh [ -n <ndb dir>] [ -r <run dir>]
-#
-# DESCRIPTION
-# run a test
-#
-# OPTIONS
-#
-# EXAMPLES
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh shell script functions
-#
-# DIAGNOSTICTS
-#
-# VERSION
-# 1.01
-#
-# AUTHOR
-# Jonas Oreland
-#
-#
-
-progname=`basename $0`
-synopsis="setup-test.sh [-x xterm] [ -n <ndb dir>] [ -r <run dir>]"
-
-: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
-
-: ${RUN_NDB_NODE_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
- # You may have to experiment a bit
- # to get quoting right (if you need it).
-
-
-. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
-
-# defaults for options related variables
-#
-
-verbose=yes
-options=""
-ndb_dir=$NDB_TOP
-if [ -z "$ndb_dir" ]
-then
- ndb_dir=`pwd`
-fi
-
-local_dir=`pwd`
-own_host=`hostname`
-uniq_id=$$.$$
-
-_xterm=$XTERM
-_rlogin="ssh -X"
-
-# used if error when parsing the options environment variable
-#
-env_opterr="options environment variable: <<$options>>"
-
-
-# Option parsing, for the options variable as well as the command line.
-#
-# We want to be able to set options in an environment variable,
-# as well as on the command line. In order not to have to repeat
-# the same getopts information twice, we loop two times over the
-# getopts while loop. The first time, we process options from
-# the options environment variable, the second time we process
-# options from the command line.
-#
-# The things to change are the actual options and what they do.
-#
-#
-for optstring in "$options" "" # 1. options variable 2. cmd line
-do
- while getopts n:r:x: i $optstring # optstring empty => no arg => cmd line
- do
- case $i in
-
- n) ndb_dir=$OPTARG;; # Ndb dir
- r) run_dir=$OPTARG;; # Run dir
- x) _xterm=$OPTARG;;
- \?) syndie $env_opterr;; # print synopsis and exit
-
- esac
- done
-
- [ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmdline options
-
- env_opterr= # Round 2 should not use the value
-
-done
-shift `expr $OPTIND - 1`
-
-# --- option parsing done ---
-
-ndb_dir=`abspath $ndb_dir`
-run_dir=`abspath $run_dir`
-
-trace "Verifying arguments"
-
-if [ ! -d $ndb_dir/bin ] || [ ! -d $ndb_dir/lib ]
-then
- msg "Ndb home path seems incorrect either $ndb_dir/bin or $ndb_dir/lib not found"
- exit 1004
-fi
-
-ndb_bin=$ndb_dir/bin/ndb
-mgm_bin=$ndb_dir/bin/mgmtsrvr
-api_lib=$ndb_dir/lib/libNDB_API.so
-
-if [ ! -x $ndb_bin ]
-then
- msg "Ndb path seems incorrect ndb binary not found: $ndb_bin"
- exit 1004
-fi
-
-if [ ! -x $mgm_bin ]
-then
- msg "Ndb path seems incorrect management server binary not found: $mgm_bin"
- exit 1004
-fi
-
-init_config=$run_dir/mgm.1/initconfig.txt
-local_config=$run_dir/mgm.1/localcfg.txt
-if [ ! -r $init_config ] || [ ! -r $local_config ]
-then
- msg "Run path seems incorrect $init_config or $local_config not found"
- exit 1004
-fi
-
-trace "Parsing $init_config"
-awk -f $NDB_PROJ_HOME/bin/parseConfigFile.awk $init_config > /tmp/run-test.$uniq_id
-. /tmp/run-test.$uniq_id
-cat /tmp/run-test.$uniq_id
-rm -f /tmp/run-test.$uniq_id
-
-trace "Parsing $local_config"
-MgmPort=`grep -v "OwnProcessId" $local_config | cut -d " " -f 2`
-
-trace "Verifying that mgm port is empty"
-telnet $mgm_1 $MgmPort > /tmp/mgm_port.$uniq_id 2>&1 <<EOF
-EOF
-
-if [ 0 -lt `grep -c -i connected /tmp/mgm_port.$uniq_id` ]
-then
- rm /tmp/mgm_port.$uniq_id
- msg "There is already something using port $mgm_1:$MgmPort"
- exit 1003
-fi
-rm /tmp/mgm_port.$uniq_id
-
-fixhost(){
- if [ "$1" != localhost ]
- then
- echo $1
- else
- uname -n
- fi
-}
-
-do_xterm(){
- title=$1
- shift
- xterm -fg black -title "$title" -e $*
-}
-
-save_profile(){
- cp $HOME/.profile /tmp/.profile.$uniq_id
-}
-
-wait_restore_profile(){
- while [ -r /tmp/.profile.$uniq_id ]
- do
- sleep 1
- done
-}
-
-start_mgm(){
- trace "Starting Management server on: $mgm_1"
- save_profile
- mgm_1=`fixhost $mgm_1`
-
- (
- echo "PATH=$ndb_dir/bin:\$PATH"
- echo "LD_LIBRARY_PATH=$ndb_dir/lib:\$LD_LIBRARY_PATH"
- echo "export PATH LD_LIBRARY_PATH"
- echo "cd $run_dir/mgm.1"
- echo "ulimit -Sc unlimited"
- echo "mv /tmp/.profile.$uniq_id $HOME/.profile"
- ) >> $HOME/.profile
- do_xterm "Mmg on $mgm_1" ${_rlogin} $mgm_1 &
- wait_restore_profile
-}
-
-start_ndb_node(){
- node_id=$1
- dir=$run_dir/ndb.$1
- ndb_host=`eval echo "\$"ndb_$node_id`
- ndb_host=`fixhost $ndb_host`
- ndb_fs=`eval echo "\$"ndbfs_$node_id`
-
- trace "Starting Ndb node $node_id on $ndb_host"
- save_profile
-
- (
- echo "PATH=$ndb_dir/bin:\$PATH"
- echo "LD_LIBRARY_PATH=$ndb_dir/lib:\$LD_LIBRARY_PATH"
- echo "mkdir -p $ndb_fs"
- echo "export PATH LD_LIBRARY_PATH"
- echo "cd $dir"
- echo "ulimit -Sc unlimited"
- echo "mv /tmp/.profile.$uniq_id $HOME/.profile"
- ) >> $HOME/.profile
- do_xterm "Ndb: $node_id on $ndb_host" ${_rlogin} $ndb_host &
- wait_restore_profile
-}
-
-start_api_node(){
- node_id=$1
- dir=$run_dir/api.$1
- api_host=`eval echo "\$"api_$node_id`
- api_host=`fixhost $api_host`
-
- trace "Starting api node $node_id on $api_host"
- save_profile
-
- (
- echo "PATH=$ndb_dir/bin:\$PATH"
- echo "LD_LIBRARY_PATH=$ndb_dir/lib:\$LD_LIBRARY_PATH"
- echo "export PATH LD_LIBRARY_PATH NDB_PROJ_HOME"
- echo "cd $dir"
- echo "ulimit -Sc unlimited"
- echo "mv /tmp/.profile.$uniq_id $HOME/.profile"
- ) >> $HOME/.profile
- do_xterm "API: $node_id on $api_host" ${_rlogin} $api_host &
- wait_restore_profile
-}
-
-for_each_ndb_node(){
- i=1
- j=`expr $mgm_nodes + 1`
- while [ $i -le $ndb_nodes ]
- do
- $* $j
- j=`expr $j + 1`
- i=`expr $i + 1`
- done
-}
-
-for_each_api_node(){
- i=1
- j=`expr $mgm_nodes + $ndb_nodes + 1`
- while [ $i -le $api_nodes ]
- do
- $* $j
- j=`expr $j + 1`
- i=`expr $i + 1`
- done
-}
-
-start_mgm
-for_each_ndb_node start_ndb_node
-for_each_api_node start_api_node
-
-exit 0
-
diff --git a/ndb/home/bin/signallog2html.lib/signallog2list.awk b/ndb/home/bin/signallog2html.lib/signallog2list.awk
deleted file mode 100644
index 9839f314556..00000000000
--- a/ndb/home/bin/signallog2html.lib/signallog2list.awk
+++ /dev/null
@@ -1,102 +0,0 @@
-BEGIN{
- PRINT=0;
- SIGNAL_ARRAY[0]="";
- BLOCK_ID=0;
- SIGNAL_ID=-22;
-}
-{
- SIGNAL_ARRAY[SIGNAL_ID]=SIGNAL_ID;
-}
-
-/^---- Send ----- Signal ----------------/ {
- DIRECTION="S";
- SENDER="";
- SENDPROCESS="";
- RECEIVER="";
- RECPROCESS="";
- SIGNAL="";
- RECSIGID="?";
- SIGID="?";
- DELAY="N/A";
-}
-
-/^---- Send delay Signal/ {
- DIRECTION="SD";
- SENDER="";
- SENDPROCESS="";
- RECEIVER="";
- RECPROCESS="";
- SIGNAL="";
- RECSIGID="?";
- SIGID="?";
- DELAY=$5;
-
- LEN=length(DELAY);
- DELAY=substr(DELAY,2,LEN);
-}
-
-/^---- Received - Signal ----------------/ {
- DIRECTION="R";
- SENDER="";
- SENDPROCESS="";
- RECEIVER="";
- RECPROCESS="";
- SIGNAL="";
- RECSIGID="?";
- SIGID="?";
- DELAY="N/A";
-}
-
-/r.bn:/{
-
- RECEIVER=$3;
- RECPROCESS=$5;
-
- if(DIRECTION == "R"){
- SIGNAL=$10;
- RECSIGID=$7;
- }
- else
- SIGNAL=$8;
-}
-
-/s.bn:/{
-
- SENDER=$3;
- SIGID=$7;
-
- if(SIGID == SIGNAL_ARRAY[SIGID]){
- PRINT=1;
- if(DIRECTION == "R"){
- SIGNAL_ARRAY[RECSIGID]=RECSIGID;
- };
- }
-
- SENDPROCESS=$5;
-
- LEN=length(RECEIVER);
- RECEIVER=substr(RECEIVER,2,LEN-3);
-
- if(BLOCK_ID == "ALL" || RECEIVER==BLOCK_ID){PRINT=1; }
-
- LEN=length(SENDER);
- SENDER=substr(SENDER,2,LEN-3);
- if(BLOCK_ID == "ALL" || SENDER == BLOCK_ID){ PRINT=1;}
-
- LEN=length(SIGNAL);
- SIGNAL=substr(SIGNAL,2,LEN-2);
-
- LEN=length(SENDPROCESS);
- SENDPROCESS=substr(SENDPROCESS,1,LEN-1);
-
- LEN=length(RECPROCESS);
- RECPROCESS=substr(RECPROCESS,1,LEN-1);
-
- if( PRINT == 1){
- print DIRECTION" "SENDPROCESS" "SENDER" "RECPROCESS" "RECEIVER" "SIGNAL" "SIGID" "RECSIGID" "DELAY;
- }
-
- PRINT=0;
-}
-
-
diff --git a/ndb/home/bin/signallog2html.lib/uniq_blocks.awk b/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
deleted file mode 100644
index 43f48d1cde1..00000000000
--- a/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
+++ /dev/null
@@ -1,29 +0,0 @@
-BEGIN{
- NAMES[""]="";
- ORDER[0]="";
- NUM=0;
-}
-
-{
- if(NAMES[$2$3]!=$2$3){
- NAMES[$2$3]=$2$3;
- ORDER[NUM]=$2$3;
- NUM++;
- }
-
- if(NAMES[$4$5]!=$4$5){
- NAMES[$4$5]=$4$5;
- ORDER[NUM]=$4$5;
- NUM++;
- }
-
-
-}
-END{
- for(i=0; i<NUM; i++){
- LIST=ORDER[i]" "LIST;
-
- }
- print LIST;
-}
-
diff --git a/ndb/home/bin/signallog2html.sh b/ndb/home/bin/signallog2html.sh
deleted file mode 100755
index 5665275807c..00000000000
--- a/ndb/home/bin/signallog2html.sh
+++ /dev/null
@@ -1,349 +0,0 @@
-#!/bin/sh
-# NAME
-# signallog2html.sh
-#
-# SYNOPSIS
-# signallog2html.sh [ -b <block_name | ALL> ] [ -s <signal_id> ] -f signal_log_file
-#
-# DESCRIPTION
-# Creates a signal sequence diagram in HTML format that can be
-# viewed from a web browser. The HTML file is created from a signal
-# log file and it contains a big table with jpeg files in every
-# table cell. Every row in the table is a signal. The block_name
-# could be one of the following: CMVMI MISSRA NDBFS NDBCNTR DBACC
-# DBDICT DBLQH DBDIH DBTC DBTUP QMGR ALL. The signal_id is a
-# number. If no block_name or signal_id is given the default
-# block_name "ALL" is used.
-#
-#
-#
-# OPTIONS
-#
-# EXAMPLES
-#
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh General shell script functions.
-# uniq_blocks.awk Creates a list of unique blocks
-# in the signal_log_file.
-# signallog2list.awk Creates a list file from the signal_log_file.
-# empty.JPG Jpeg file, must exist in the HTML file
-# directory for viewing.
-# left_line.JPG
-# line.JPG
-# right_line.JPG
-# self_line.JPG
-#
-#
-# SEE ALSO
-#
-# DIAGNOSTICTS
-#
-# VERSION
-# 1.0
-#
-# DATE
-# 011029
-#
-# AUTHOR
-# Jan Markborg
-#
-
-progname=`basename $0`
-synopsis="signallog2html.sh [ -b <block_name | ALL> ] [ -s <signal_id> ] -f signal_log_file"
-block_name=""
-signal_id=""
-verbose=yes
-signal_log_file=""
-
-: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
-
-: ${NDB_LOCAL_BUILD_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
- # You may have to experiment a bit
- # to get quoting right (if you need it).
-
-
-. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
-
-# defaults for options related variables
-#
-report_date=`date '+%Y-%m-%d'`
-
-# Option parsing for the the command line.
-#
-
-while getopts f:b:s: i
-do
- case $i in
- f) signal_log_file=$OPTARG;;
- b) block_name=$OPTARG;;
- s) signal_id=$OPTARG;;
- \?) syndie ;; # print synopsis and exit
- esac
-done
-
-# -- Verify
-trace "Verifying signal_log_file $signal_log_file"
-
-if [ x$signal_log_file = "x" ]
-then
- syndie "Invalid signal_log_file name: $signal_log_file not found"
-fi
-
-
-if [ ! -r $signal_log_file ]
-then
- syndie "Invalid signal_log_file name: $signal_log_file not found"
-fi
-
-
-
-if [ blocknameSET = 1 ]
-then
-
- trace "Verifying block_name"
- case $block_name in
- CMVMI| MISSRA| NDBFS| NDBCNTR| DBACC| DBDICT| DBLQH| DBDIH| DBTC| DBTUP| QMGR);;
- ALL) trace "Signals to/from every block will be traced!";;
- *) syndie "Unknown block name: $block_name";;
- esac
-fi
-
-if [ block_name="" -a signal_id="" ]
-then
- block_name=ALL
- trace "block_name = $block_name"
-fi
-
-trace "Arguments OK"
-
-###
-#
-# General html functions
-header(){
- cat <<EOF
-<html><head><title>$*</title></head>
-<body>
-EOF
-}
-
-footer(){
- cat <<EOF
-</body></html>
-EOF
-}
-
-heading(){
- h=$1; shift
- cat <<EOF
-<h$h>$*</h$h>
-EOF
-}
-
-table(){
- echo "<table $*>"
-}
-
-table_header(){
- echo "<th>$*</th>"
-}
-
-end_table(){
- echo "</table>"
-}
-
-row(){
- echo "<tr>"
-}
-
-end_row(){
- echo "</tr>"
-}
-
-c_column(){
- cat <<EOF
-<td valign=center align=center>$*</td>
-EOF
-}
-
-bold(){
- cat <<EOF
-<b>$*</b>
-EOF
-}
-
-column(){
- cat <<EOF
-<td align=left>$*</td>
-EOF
-}
-
-para(){
- cat <<EOF
-<p></p>
-EOF
-}
-
-hr(){
- cat <<EOF
-<hr>
-EOF
-}
-
-img_column(){
- cat <<EOF
-<td><center><$* height=100% width=100%></center></td>
-EOF
-}
-
-# Check the direction of arrow.
-# arrowDirection(){ $columnarray $sendnode$sendblock $recnode$recblock
-arrowDirection(){
-if [ $2 = $3 ]
-then
- arrow=SELF
- return;
-else
- for x in $1
- do
- if [ $x = $2 ]
- then
- arrow=RIGHT
- break
- elif [ $x = $3 ]
- then
- arrow=LEFT
- break
- fi
- done
-fi
-}
-
-drawImages(){
-for x in $columnarray
-do
- case $arrow in
- SELF)
- if [ $x = $sendnode$sendblock ]
- then
- img_column img SRC=\"self_line.JPG\"
- else
- img_column img SRC=\"empty.JPG\"
- fi;;
-
- RIGHT)
- if [ $x = $recnode$recblock ]
- then
- img_column img SRC=\"right_line.JPG\"
- weHavePassedRec=1
- elif [ $x = $sendnode$sendblock ]
- then
- img_column img SRC=\"empty.JPG\"
- weHavePassedSen=1
- elif [ $weHavePassedRec = 1 -o $weHavePassedSen = 0 ]
- then
- img_column img SRC=\"empty.JPG\"
- elif [ $weHavePassedRec = 0 -a $weHavePassedSen = 1 ]
- then
- img_column img SRC=\"line.JPG\"
- fi;;
-
- LEFT)
- if [ $x = $recnode$recblock ]
- then
- img_column img SRC=\"empty.JPG\"
- weHaveJustPassedRec=1
- weHavePassedRec=1
- continue
- fi
- if [ $x = $sendnode$sendblock -a $weHaveJustPassedRec = 1 ]
- then
- img_column img SRC=\"left_line.JPG\"
- weHaveJustPassedRec=0
- weHavePassedSen=1
- continue
- fi
- if [ $x = $sendnode$sendblock ]
- then
- img_column img SRC=\"line.JPG\"
- weHavePassedSen=1
- continue
- fi
- if [ $weHaveJustPassedRec = 1 ]
- then
- img_column img SRC=\"left_line.JPG\"
- weHaveJustPassedRec=0
- continue
- fi
- if [ $weHavePassedSen = 1 -o $weHavePassedRec = 0 ]
- then
- img_column img SRC=\"empty.JPG\"
- continue
- fi
-
- if [ $weHavePassedRec = 1 -a $weHavePassedSen = 0 ]
- then
- img_column img SRC=\"line.JPG\"
- continue
-
- fi
- column ERROR;;
-
- *)
- echo ERROR;;
- esac
-done
-column $signal
-}
-
-### Main
-trace "Making HTML file"
-(
- header "Signal sequence diagram $report_date"
- heading 1 "Signal sequence diagram $report_date"
-
- trace "Making list file"
- #make a signal list file from the signal log file.
- `awk -f /home/ndb/bin/signallog2html.lib/signallog2list.awk SIGNAL_ID=$signal_id BLOCK_ID=$block_name $signal_log_file > $signal_log_file.list`
-
- COLUMNS=`awk -f /home/ndb/bin/signallog2html.lib/uniq_blocks.awk $signal_log_file.list | wc -w`
-
- table "border=0 cellspacing=0 cellpadding=0 cols=`expr $COLUMNS + 1`"
-
- columnarray=`awk -f /home/ndb/bin/signallog2html.lib/uniq_blocks.awk $signal_log_file.list`
-
- row
- column #make an empty first column!
- for col in $columnarray
- do
- table_header $col
- done
-
- grep "" $signal_log_file.list | \
- while read direction sendnode sendblock recnode recblock signal sigid recsigid delay
- do
- if [ $direction = "R" ]
- then
- row
- weHavePassedRec=0
- weHavePassedSen=0
- weHaveJustPassedRec=0
- arrow=""
-
- # calculate the direction of the arrow.
- arrowDirection "$columnarray" "$sendnode$sendblock" "$recnode$recblock"
-
- # Draw the arrow images.
- drawImages
- end_row
- fi
- done
- end_table
-
- footer
-) > $signal_log_file.html
-
-exit 0
diff --git a/ndb/home/bin/stripcr b/ndb/home/bin/stripcr
deleted file mode 100755
index 540418f88cf..00000000000
--- a/ndb/home/bin/stripcr
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/sh
-
-
-# NAME
-# stripcr - a program for removing carriage return chars from dos-files.
-#
-# SYNOPSIS
-# stripcr [file...]
-#
-# DESCRIPTION
-# stripcr deletes all CR characters from the given files.
-# The files are edited in place.
-# If no files are given, stdin and stdout are used instead.
-#
-# OPTIONS
-# -s extension Make a copy of the original of each file, and
-# give it the given extension (.bak, .orig, -bak, ...).
-#
-# EXAMPLES
-# stripcr file.txt innerloop.cc
-# stripcr -i.bak *.cc
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh Some userful functions for safe execution
-# of commands, printing, and tracing.
-#
-# VERSION
-# 1.0
-#
-# AUTHOR
-# Jonas Mölsä
-#
-
-
-progname=`basename $0`
-synopsis="stripcr [-s extension] [file...]"
-
-
-: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
-
-: ${STRIPCR_OPTIONS:=--} # If undefined, set to --, to keep getopts happy.
- # You may have to experiment, to get quoting right.
-
-. $NDB_PROJ_HOME/lib/funcs.sh
-
-
-# defaults for options related variables
-#
-extension=
-options="$STRIPCR_OPTIONS"
-
-# used if error when parsing the options environment variable
-#
-env_opterr="options environment variable: <<$options>>"
-
-
-
-# We want to be able to set options in an environment variable,
-# as well as on the command line. In order not to have to repeat
-# the same getopts information twice, we loop two times over the
-# getopts while loop. The first time, we process options from
-# the options environment variable, the second time we process
-# options from the command line.
-#
-# The things to change are the actual options and what they do.
-#
-#
-for optstring in "$options" "" # 1. options variable 2. cmd line
-do
- while getopts s: i $optstring # optstring empty => no arg => cmd line
- do
- case $i in
-
- s) extension="$OPTARG";;
- \?) syndie $env_opterr;; # print synopsis and exit
-
- esac
- done
-
- [ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmd line options
-
- env_opterr= # Round 2 should not use the value
-done
-shift `expr $OPTIND - 1`
-
-
-safe perl -i$extension -lpe 'tr/\r//d' $*
diff --git a/ndb/home/lib/funcs.sh b/ndb/home/lib/funcs.sh
deleted file mode 100644
index b7d8914035e..00000000000
--- a/ndb/home/lib/funcs.sh
+++ /dev/null
@@ -1,294 +0,0 @@
-# NAME
-# safe, safe_eval, die, rawdie, syndie, msg, errmsg,
-# rawmsg, rawerrmsg, trace, errtrace, is_wordmatch
-# - functions for safe execution and convenient printing and tracing
-#
-# abspath - make a path absolute
-#
-# SYNOPSIS
-# . funcs.sh
-#
-# is_wordmatch requires perl.
-#
-# DESCRIPTION
-# Funcs.sh is a collection of somewhat related functions.
-# The main categories and their respective functions are:
-# Controlled execution - safe, safe_eval
-# Exiting with a message - die, rawdie, syndie
-# Printing messages - msg, errmsg, rawmsg, rawerrmsg
-# Tracing - trace, errtrace
-# Pattern matching - is_wordmatch
-#
-#
-# ENVIRONMENT
-# These variables are not exported, but they are still visible
-# to, and used by, these functions.
-#
-# progname basename of $0
-# verbose empty or non-emtpy, used for tracing
-# synopsis string describing the syntax of $progname
-#
-# VERSION
-# 2.0
-#
-# AUTHOR
-# Jonas Mvlsd
-# Jonas Oreland - added abspath
-
-
-
-
-
-# Safely executes the given command and exits
-# with the given commands exit code if != 0,
-# else the return value ("the functions exit
-# code") is 0. Eg: safely cd $install_dir
-#
-safely ()
-{
- "$@"
- safely_code__=$?
- [ $safely_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safely_code__.";
- exit $safely_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-# Safely_eval executes "eval command" and exits
-# with the given commands exit code if != 0,
-# else the return value (the functions "exit
-# code") is 0.
-#
-# Safely_eval is just like like safely, but safely_eval does
-# "eval command" instead of just "command"
-#
-# Safely_eval even works with pipes etc., but you have to quote
-# the special characters. Eg: safely_eval ls \| wc \> tst.txt 2\>\&1
-#
-#
-safely_eval ()
-{
- eval "$@"
- safely_eval_code__=$?
- [ $safely_eval_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safely_eval_code__.";
- exit $safely_eval_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-
-
-#
-# safe and safe_eval are deprecated, use safely and safely_eval instead
-#
-
-# Safe executes the given command and exits
-# with the given commands exit code if != 0,
-# else the return value ("the functions exit
-# code") is 0.
-#
-safe ()
-{
- "$@"
- safe_code__=$?
- [ $safe_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safe_code__.";
- exit $safe_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-# Safe_eval executes "eval command" and exits
-# with the given commands exit code if != 0,
-# else the return value (the functions "exit
-# code") is 0.
-#
-# Safe_eval is just like like safe, but safe_eval does
-# "eval command" instead of just "command"
-#
-# Safe_eval even works with pipes etc., but you have to quote
-# the special characters. Eg: safe_eval ls \| wc \> tst.txt 2\>\&1
-#
-#
-safe_eval ()
-{
- eval "$@"
- safe_eval_code__=$?
- [ $safe_eval_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safe_eval_code__.";
- exit $safe_eval_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-
-
-# die prints the supplied message to stderr,
-# prefixed with the program name, and exits
-# with the exit code given by "-e num" or
-# 1, if no -e option is present.
-#
-die ()
-{
- die_code__=1
- [ "X$1" = X-e ] && { die_code__=$2; shift 2; }
- [ "X$1" = X-- ] && shift
- errmsg "$@"
- exit $die_code__
-}
-
-
-
-# rawdie prints the supplied message to stderr.
-# It then exits with the exit code given with "-e num"
-# or 1, if no -e option is present.
-#
-rawdie ()
-{
- rawdie_code__=1
- [ "X$1" = X-e ] && { rawdie_code__=$2; shift 2; }
- [ "X$1" = X-- ] && shift
- rawerrmsg "$@"
- exit $rawdie_code__
-}
-
-
-
-
-# Syndie prints the supplied message (if present) to stderr,
-# prefixed with the program name, on the first line.
-# On the second line, it prints $synopsis.
-# It then exits with the exit code given with "-e num"
-# or 1, if no -e option is present.
-#
-syndie ()
-{
- syndie_code__=1
- [ "X$1" = X-e ] && { syndie_code__=$2; shift 2; }
- [ "X$1" = X-- ] && shift
- [ -n "$*" ] && msg "$*"
- rawdie -e $syndie_code__ "Synopsis: $synopsis"
-}
-
-
-
-
-# msg prints the supplied message to stdout,
-# prefixed with the program name.
-#
-msg ()
-{
- echo "${progname:-<no program name set>}:" "$@"
-}
-
-
-
-# msg prints the supplied message to stderr,
-# prefixed with the program name.
-#
-errmsg ()
-{
- echo "${progname:-<no program name set>}:" "$@" >&2
-}
-
-
-
-rawmsg () { echo "$*"; } # print the supplied message to stdout
-rawerrmsg () { echo "$*" >&2; } # print the supplied message to stderr
-
-
-
-# trace prints the supplied message to stdout if verbose is non-null
-#
-trace ()
-{
- [ -n "$verbose" ] && msg "$@"
-}
-
-
-# errtrace prints the supplied message to stderr if verbose is non-null
-#
-errtrace ()
-{
- [ -n "$verbose" ] && msg "$@" >&2
-}
-
-
-
-# SYNTAX
-# is_wordmatch candidatelist wordlist
-#
-# DESCRIPTION
-# is_wordmatch returns true if any of the words (candidates)
-# in candidatelist is present in wordlist, otherwise it
-# returns false.
-#
-# EXAMPLES
-# is_wordmatch "tuareg nixdorf low content" "xx yy zz low fgj turn roff sd"
-# returns true, since "low" in candidatelist is present in wordlist.
-#
-# is_wordmatch "tuareg nixdorf low content" "xx yy zz slow fgj turn roff sd"
-# returns false, since none of the words in candidatelist occurs in wordlist.
-#
-# is_wordmatch "tuareg nixdorf low content" "xx yy zz low fgj tuareg roff"
-# returns true, since "low" and "tuareg" in candidatelist occurs in wordlist.
-#
-is_wordmatch ()
-{
- is_wordmatch_pattern__=`echo $1 |
- sed 's/^/\\\\b/;
- s/[ ][ ]*/\\\\b|\\\\b/g;
- s/$/\\\\b/;'`
- shift
- echo "$*" |
- perl -lne "m/$is_wordmatch_pattern__/ || exit 1"
-}
-
-#
-# abspath
-#
-# Stolen from http://oase-shareware.org/shell/shelltips/script_programmer.html
-#
-abspath()
-{
- __abspath_D=`dirname "$1"`
- __abspath_B=`basename "$1"`
- echo "`cd \"$__abspath_D\" 2>/dev/null && pwd || echo \"$__abspath_D\"`/$__abspath_B"
-}
-
-#
-#
-# NdbExit
-#
-#
-NdbExit()
-{
- echo "NdbExit: $1"
- exit $1
-}
-
-NdbGetExitCode()
-{
- __res__=`echo $* | awk '{if($1=="NdbExit:") print $2;}'`
- if [ -n $__res__ ]
- then
- echo $__res__
- else
- echo 255
- fi
-}
-