summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Pfaff <blp@nicira.com>2010-02-12 11:26:54 -0800
committerBen Pfaff <blp@nicira.com>2010-02-15 12:54:52 -0800
commit1e19e50e8d3a1822bc82e81409748f173079ccff (patch)
tree775a9ec89a05b0ea3fcc5e255d26de6678d64304
parent8e71cf88b78eb704d452b1a4abf8014de340438e (diff)
downloadopenvswitch-1e19e50e8d3a1822bc82e81409748f173079ccff.tar.gz
ovsdb: Implement ovsdb-tool commands "compact" and "convert".
Partial fix for bug #2391.
-rw-r--r--ovsdb/file.c171
-rw-r--r--ovsdb/file.h14
-rw-r--r--ovsdb/ovsdb-tool.1.in23
-rw-r--r--ovsdb/ovsdb-tool.c71
-rw-r--r--tests/automake.mk2
-rw-r--r--tests/ovsdb-file.at49
-rw-r--r--tests/ovsdb-tool.at266
-rw-r--r--tests/ovsdb.at2
8 files changed, 530 insertions, 68 deletions
diff --git a/ovsdb/file.c b/ovsdb/file.c
index d75582c4a..65a535b33 100644
--- a/ovsdb/file.c
+++ b/ovsdb/file.c
@@ -50,14 +50,58 @@ static struct ovsdb_error *ovsdb_file_txn_commit(struct json *,
const char *comment,
bool durable,
struct ovsdb_log *);
+
+static struct ovsdb_error *ovsdb_file_open__(const char *file_name,
+ const struct ovsdb_schema *,
+ bool read_only, struct ovsdb **);
static struct ovsdb_error *ovsdb_file_txn_from_json(struct ovsdb *,
const struct json *,
+ bool converting,
struct ovsdb_txn **);
static void ovsdb_file_replica_create(struct ovsdb *, struct ovsdb_log *);
+/* Opens database 'file_name' and stores a pointer to the new database in
+ * '*dbp'. If 'read_only' is false, then the database will be locked and
+ * changes to the database will be written to disk. If 'read_only' is true,
+ * the database will not be locked and changes to the database will persist
+ * only as long as the "struct ovsdb".
+ *
+ * On success, returns NULL. On failure, returns an ovsdb_error (which the
+ * caller must destroy) and sets '*dbp' to NULL. */
struct ovsdb_error *
ovsdb_file_open(const char *file_name, bool read_only, struct ovsdb **dbp)
{
+ return ovsdb_file_open__(file_name, NULL, read_only, dbp);
+}
+
+/* Opens database 'file_name' with an alternate schema. The specified 'schema'
+ * is used to interpret the data in 'file_name', ignoring the schema actually
+ * stored in the file. Data in the file for tables or columns that do not
+ * exist in 'schema' are ignored, but the ovsdb file format must otherwise be
+ * observed, including column constraints.
+ *
+ * This function can be useful for upgrading or downgrading databases to
+ * "almost-compatible" formats.
+ *
+ * The database will not be locked. Changes to the database will persist only
+ * as long as the "struct ovsdb".
+ *
+ * On success, stores a pointer to the new database in '*dbp' and returns a
+ * null pointer. On failure, returns an ovsdb_error (which the caller must
+ * destroy) and sets '*dbp' to NULL. */
+struct ovsdb_error *
+ovsdb_file_open_as_schema(const char *file_name,
+ const struct ovsdb_schema *schema,
+ struct ovsdb **dbp)
+{
+ return ovsdb_file_open__(file_name, schema, true, dbp);
+}
+
+static struct ovsdb_error *
+ovsdb_file_open__(const char *file_name,
+ const struct ovsdb_schema *alternate_schema,
+ bool read_only, struct ovsdb **dbp)
+{
enum ovsdb_log_open_mode open_mode;
struct ovsdb_schema *schema;
struct ovsdb_error *error;
@@ -79,12 +123,16 @@ ovsdb_file_open(const char *file_name, bool read_only, struct ovsdb **dbp)
file_name);
}
- error = ovsdb_schema_from_json(json, &schema);
- if (error) {
- json_destroy(json);
- return ovsdb_wrap_error(error,
- "failed to parse \"%s\" as ovsdb schema",
- file_name);
+ if (alternate_schema) {
+ schema = ovsdb_schema_clone(alternate_schema);
+ } else {
+ error = ovsdb_schema_from_json(json, &schema);
+ if (error) {
+ json_destroy(json);
+ return ovsdb_wrap_error(error,
+ "failed to parse \"%s\" as ovsdb schema",
+ file_name);
+ }
}
json_destroy(json);
@@ -92,7 +140,8 @@ ovsdb_file_open(const char *file_name, bool read_only, struct ovsdb **dbp)
while ((error = ovsdb_log_read(log, &json)) == NULL && json) {
struct ovsdb_txn *txn;
- error = ovsdb_file_txn_from_json(db, json, &txn);
+ error = ovsdb_file_txn_from_json(db, json, alternate_schema != NULL,
+ &txn);
json_destroy(json);
if (error) {
break;
@@ -119,7 +168,46 @@ ovsdb_file_open(const char *file_name, bool read_only, struct ovsdb **dbp)
}
static struct ovsdb_error *
+ovsdb_file_update_row_from_json(struct ovsdb_row *row, bool converting,
+ const struct json *json)
+{
+ struct ovsdb_table_schema *schema = row->table->schema;
+ struct ovsdb_error *error;
+ struct shash_node *node;
+
+ if (json->type != JSON_OBJECT) {
+ return ovsdb_syntax_error(json, NULL, "row must be JSON object");
+ }
+
+ SHASH_FOR_EACH (node, json_object(json)) {
+ const char *column_name = node->name;
+ const struct ovsdb_column *column;
+ struct ovsdb_datum datum;
+
+ column = ovsdb_table_schema_get_column(schema, column_name);
+ if (!column) {
+ if (converting) {
+ continue;
+ }
+ return ovsdb_syntax_error(json, "unknown column",
+ "No column %s in table %s.",
+ column_name, schema->name);
+ }
+
+ error = ovsdb_datum_from_json(&datum, &column->type, node->data, NULL);
+ if (error) {
+ return error;
+ }
+ ovsdb_datum_swap(&row->fields[column->index], &datum);
+ ovsdb_datum_destroy(&datum, &column->type);
+ }
+
+ return NULL;
+}
+
+static struct ovsdb_error *
ovsdb_file_txn_row_from_json(struct ovsdb_txn *txn, struct ovsdb_table *table,
+ bool converting,
const struct uuid *row_uuid, struct json *json)
{
const struct ovsdb_row *row = ovsdb_table_get_row(table, row_uuid);
@@ -132,15 +220,15 @@ ovsdb_file_txn_row_from_json(struct ovsdb_txn *txn, struct ovsdb_table *table,
ovsdb_txn_row_delete(txn, row);
return NULL;
} else if (row) {
- return ovsdb_row_from_json(ovsdb_txn_row_modify(txn, row),
- json, NULL, NULL);
+ return ovsdb_file_update_row_from_json(ovsdb_txn_row_modify(txn, row),
+ converting, json);
} else {
struct ovsdb_error *error;
struct ovsdb_row *new;
new = ovsdb_row_create(table);
*ovsdb_row_get_uuid_rw(new) = *row_uuid;
- error = ovsdb_row_from_json(new, json, NULL, NULL);
+ error = ovsdb_file_update_row_from_json(new, converting, json);
if (error) {
ovsdb_row_destroy(new);
}
@@ -153,7 +241,8 @@ ovsdb_file_txn_row_from_json(struct ovsdb_txn *txn, struct ovsdb_table *table,
static struct ovsdb_error *
ovsdb_file_txn_table_from_json(struct ovsdb_txn *txn,
- struct ovsdb_table *table, struct json *json)
+ struct ovsdb_table *table,
+ bool converting, struct json *json)
{
struct shash_node *node;
@@ -172,8 +261,8 @@ ovsdb_file_txn_table_from_json(struct ovsdb_txn *txn,
uuid_string);
}
- error = ovsdb_file_txn_row_from_json(txn, table, &row_uuid,
- txn_row_json);
+ error = ovsdb_file_txn_row_from_json(txn, table, converting,
+ &row_uuid, txn_row_json);
if (error) {
return error;
}
@@ -184,7 +273,7 @@ ovsdb_file_txn_table_from_json(struct ovsdb_txn *txn,
static struct ovsdb_error *
ovsdb_file_txn_from_json(struct ovsdb *db, const struct json *json,
- struct ovsdb_txn **txnp)
+ bool converting, struct ovsdb_txn **txnp)
{
struct ovsdb_error *error;
struct shash_node *node;
@@ -204,7 +293,8 @@ ovsdb_file_txn_from_json(struct ovsdb *db, const struct json *json,
table = shash_find_data(&db->tables, table_name);
if (!table) {
if (!strcmp(table_name, "_date")
- || !strcmp(table_name, "_comment")) {
+ || !strcmp(table_name, "_comment")
+ || converting) {
continue;
}
@@ -213,7 +303,8 @@ ovsdb_file_txn_from_json(struct ovsdb *db, const struct json *json,
goto error;
}
- error = ovsdb_file_txn_table_from_json(txn, table, txn_table_json);
+ error = ovsdb_file_txn_table_from_json(txn, table, converting,
+ txn_table_json);
if (error) {
goto error;
}
@@ -225,6 +316,54 @@ error:
ovsdb_txn_abort(txn);
return error;
}
+
+/* Saves a snapshot of 'db''s current contents as 'file_name'. If 'comment' is
+ * nonnull, then it is added along with the data contents and can be viewed
+ * with "ovsdb-tool show-log".
+ *
+ * 'locking' is passed along to ovsdb_log_open() untouched. */
+struct ovsdb_error *
+ovsdb_file_save_copy(const char *file_name, int locking,
+ const char *comment, const struct ovsdb *db)
+{
+ const struct shash_node *node;
+ struct ovsdb_file_txn ftxn;
+ struct ovsdb_error *error;
+ struct ovsdb_log *log;
+ struct json *json;
+
+ error = ovsdb_log_open(file_name, OVSDB_LOG_CREATE, locking, &log);
+ if (error) {
+ return error;
+ }
+
+ /* Write schema. */
+ json = ovsdb_schema_to_json(db->schema);
+ error = ovsdb_log_write(log, json);
+ json_destroy(json);
+ if (error) {
+ goto exit;
+ }
+
+ /* Write data. */
+ ovsdb_file_txn_init(&ftxn);
+ SHASH_FOR_EACH (node, &db->tables) {
+ const struct ovsdb_table *table = node->data;
+ const struct ovsdb_row *row;
+
+ HMAP_FOR_EACH (row, struct ovsdb_row, hmap_node, &table->rows) {
+ ovsdb_file_txn_add_row(&ftxn, NULL, row);
+ }
+ }
+ error = ovsdb_file_txn_commit(ftxn.json, comment, true, log);
+
+exit:
+ ovsdb_log_close(log);
+ if (error) {
+ remove(file_name);
+ }
+ return error;
+}
/* Replica implementation. */
diff --git a/ovsdb/file.h b/ovsdb/file.h
index 2a2747798..40701720a 100644
--- a/ovsdb/file.h
+++ b/ovsdb/file.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009 Nicira Networks
+/* Copyright (c) 2009, 2010 Nicira Networks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,11 +18,23 @@
#include <stdbool.h>
#include "compiler.h"
+#include "log.h"
struct ovsdb;
+struct ovsdb_schema;
struct ovsdb_error *ovsdb_file_open(const char *file_name, bool read_only,
struct ovsdb **)
WARN_UNUSED_RESULT;
+struct ovsdb_error *ovsdb_file_open_as_schema(const char *file_name,
+ const struct ovsdb_schema *,
+ struct ovsdb **)
+ WARN_UNUSED_RESULT;
+
+struct ovsdb_error *ovsdb_file_save_copy(const char *file_name, int locking,
+ const char *comment,
+ const struct ovsdb *)
+ WARN_UNUSED_RESULT;
+
#endif /* ovsdb/file.h */
diff --git a/ovsdb/ovsdb-tool.1.in b/ovsdb/ovsdb-tool.1.in
index 04e629686..c2c2ce32d 100644
--- a/ovsdb/ovsdb-tool.1.in
+++ b/ovsdb/ovsdb-tool.1.in
@@ -40,6 +40,29 @@ existing \fIdb\fR.
\fIschema\fR must contain an OVSDB schema in JSON format. Refer to
the OVSDB specification for details.
.
+.IP "\fBcompact\fI db \fR[\fItarget\fR]"
+Reads \fIdb\fR and writes a compacted version. If \fItarget\fR is
+specified, the compacted version is written as a new file named
+\fItarget\fR, which must not already exist. If \fItarget\fR is
+omitted, then the compacted version of the database replaces \fIdb\fR
+in-place.
+.
+.IP "\fBconvert\fI db schema \fR[\fItarget\fR]"
+Reads \fIdb\fR, translating it into to the schema specified in
+\fIschema\fR, and writes out the new interpretation. If \fItarget\fR
+is specified, the translated version is written as a new file named
+\fItarget\fR, which must not already exist. If \fItarget\fR is
+omitted, then the translated version of the database replaces \fIdb\fR
+in-place.
+.IP
+This command can do simple ``upgrades'' and ``downgrades'' on a
+database's schema. The data in \fIdb\fR must be valid when
+interpreted under \fIschema\fR, with only one exception: data in
+\fIdb\fR for tables and columns that do not exist in \fIschema\fR are
+ignored. Columns that exist in \fIschema\fR but not in \fIdb\fR are
+set to their default values. All of \fIschema\fR's constraints apply
+in full.
+.
.IP "\fBquery\fI db transaction\fR"
Opens \fIdb\fR, executes \fItransaction\fR on it, and prints the
results. The \fItransaction\fR must be a JSON array in the format of
diff --git a/ovsdb/ovsdb-tool.c b/ovsdb/ovsdb-tool.c
index f419fb852..26f9003bd 100644
--- a/ovsdb/ovsdb-tool.c
+++ b/ovsdb/ovsdb-tool.c
@@ -25,10 +25,12 @@
#include "command-line.h"
#include "compiler.h"
#include "file.h"
+#include "lockfile.h"
#include "log.h"
#include "json.h"
#include "ovsdb.h"
#include "ovsdb-error.h"
+#include "socket-util.h"
#include "table.h"
#include "timeval.h"
#include "util.h"
@@ -109,6 +111,7 @@ usage(void)
"usage: %s [OPTIONS] COMMAND [ARG...]\n"
" create DB SCHEMA create DB with the given SCHEMA\n"
" compact DB [DST] compact DB in-place (or to DST)\n"
+ " convert DB SCHEMA [DST] convert DB to SCHEMA (to DST)\n"
" extract-schema DB print DB's schema on stdout\n"
" query DB TRNS execute read-only transaction on DB\n"
" transact DB TRNS execute read/write transaction on DB\n"
@@ -174,6 +177,72 @@ do_create(int argc OVS_UNUSED, char *argv[])
}
static void
+compact_or_convert(const char *src_name, const char *dst_name,
+ const struct ovsdb_schema *new_schema,
+ const char *comment)
+{
+ struct lockfile *src_lock;
+ struct lockfile *dst_lock;
+ bool in_place = dst_name == NULL;
+ struct ovsdb *db;
+ int retval;
+
+ /* Get (temporary) destination. */
+ if (in_place) {
+ dst_name = xasprintf("%s.tmp", src_name);
+ }
+
+ /* Lock source and (temporary) destination. */
+ retval = lockfile_lock(src_name, INT_MAX, &src_lock);
+ if (retval) {
+ ovs_fatal(retval, "%s: failed to lock lockfile", src_name);
+ }
+
+ retval = lockfile_lock(dst_name, INT_MAX, &dst_lock);
+ if (retval) {
+ ovs_fatal(retval, "%s: failed to lock lockfile", dst_name);
+ }
+
+ /* Save a copy. */
+ check_ovsdb_error(new_schema
+ ? ovsdb_file_open_as_schema(src_name, new_schema, &db)
+ : ovsdb_file_open(src_name, true, &db));
+ check_ovsdb_error(ovsdb_file_save_copy(dst_name, false, comment, db));
+ ovsdb_destroy(db);
+
+ /* Replace source. */
+ if (in_place) {
+ if (rename(dst_name, src_name)) {
+ ovs_fatal(errno, "failed to rename \"%s\" to \"%s\"",
+ dst_name, src_name);
+ }
+ fsync_parent_dir(dst_name);
+ } else {
+ lockfile_unlock(src_lock);
+ }
+
+ lockfile_unlock(dst_lock);
+}
+
+static void
+do_compact(int argc OVS_UNUSED, char *argv[])
+{
+ compact_or_convert(argv[1], argv[2], NULL, "compacted by ovsdb-tool");
+}
+
+static void
+do_convert(int argc OVS_UNUSED, char *argv[])
+{
+ const char *schema_file_name = argv[2];
+ struct ovsdb_schema *new_schema;
+
+ check_ovsdb_error(ovsdb_schema_from_file(schema_file_name, &new_schema));
+ compact_or_convert(argv[1], argv[3], new_schema,
+ "converted by ovsdb-tool");
+ ovsdb_schema_destroy(new_schema);
+}
+
+static void
transact(bool read_only, const char *db_file_name, const char *transaction)
{
struct json *request, *result;
@@ -337,6 +406,8 @@ do_help(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
static const struct command all_commands[] = {
{ "create", 2, 2, do_create },
+ { "compact", 1, 2, do_compact },
+ { "convert", 2, 3, do_convert },
{ "query", 2, 2, do_query },
{ "transact", 2, 2, do_transact },
{ "show-log", 1, 1, do_show_log },
diff --git a/tests/automake.mk b/tests/automake.mk
index fe493fd36..920117f0b 100644
--- a/tests/automake.mk
+++ b/tests/automake.mk
@@ -33,7 +33,7 @@ TESTSUITE_AT = \
tests/ovsdb-transaction.at \
tests/ovsdb-execution.at \
tests/ovsdb-trigger.at \
- tests/ovsdb-file.at \
+ tests/ovsdb-tool.at \
tests/ovsdb-server.at \
tests/ovsdb-monitor.at \
tests/ovsdb-idl.at \
diff --git a/tests/ovsdb-file.at b/tests/ovsdb-file.at
deleted file mode 100644
index 4f8f7ee40..000000000
--- a/tests/ovsdb-file.at
+++ /dev/null
@@ -1,49 +0,0 @@
-AT_BANNER([OVSDB -- file storage])
-
-# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
-#
-# Creates a database with the given SCHEMA and runs each of the
-# TRANSACTIONS (which should be a quoted list of quoted strings)
-# against it with ovsdb-tool one at a time.
-#
-# Checks that the overall output is OUTPUT, but UUIDs in the output
-# are replaced by markers of the form <N> where N is a number. The
-# first unique UUID is replaced by <0>, the next by <1>, and so on.
-# If a given UUID appears more than once it is always replaced by the
-# same marker.
-#
-# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
-m4_define([OVSDB_CHECK_EXECUTION],
- [AT_SETUP([$1])
- AT_KEYWORDS([ovsdb file positive $5])
- AT_DATA([schema], [$2
-])
- touch .db.~lock~
- AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
- m4_foreach([txn], [$3],
- [AT_CHECK([ovsdb-tool transact db 'txn'], [0], [stdout], [ignore])
-cat stdout >> output
-])
- AT_CHECK([perl $srcdir/uuidfilt.pl output], [0], [$4])
- AT_CLEANUP])
-
-EXECUTION_EXAMPLES
-
-AT_SETUP([transaction comments])
-AT_KEYWORDS([ovsdb file positive])
-AT_DATA([schema], [ORDINAL_SCHEMA
-])
-touch .db.~lock~
-AT_CHECK([ovsdb-tool create db schema], [0], [], [ignore])
-AT_CHECK([[ovsdb-tool transact db '
- ["ordinals",
- {"op": "insert",
- "table": "ordinals",
- "row": {"name": "five", "number": 5}},
- {"op": "comment",
- "comment": "add row for 5"}]']], [0], [stdout], [ignore])
-AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
- [[[{"uuid":["uuid","<0>"]},{}]
-]])
-AT_CHECK([grep -q "add row for 5" db])
-AT_CLEANUP
diff --git a/tests/ovsdb-tool.at b/tests/ovsdb-tool.at
new file mode 100644
index 000000000..5e25fe5ae
--- /dev/null
+++ b/tests/ovsdb-tool.at
@@ -0,0 +1,266 @@
+AT_BANNER([OVSDB -- ovsdb-tool])
+
+# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
+#
+# Creates a database with the given SCHEMA and runs each of the
+# TRANSACTIONS (which should be a quoted list of quoted strings)
+# against it with ovsdb-tool one at a time.
+#
+# Checks that the overall output is OUTPUT, but UUIDs in the output
+# are replaced by markers of the form <N> where N is a number. The
+# first unique UUID is replaced by <0>, the next by <1>, and so on.
+# If a given UUID appears more than once it is always replaced by the
+# same marker.
+#
+# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
+m4_define([OVSDB_CHECK_EXECUTION],
+ [AT_SETUP([$1])
+ AT_KEYWORDS([ovsdb file positive $5])
+ AT_DATA([schema], [$2
+])
+ touch .db.~lock~
+ AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
+ m4_foreach([txn], [$3],
+ [AT_CHECK([ovsdb-tool transact db 'txn'], [0], [stdout], [ignore])
+cat stdout >> output
+])
+ AT_CHECK([perl $srcdir/uuidfilt.pl output], [0], [$4])
+ AT_CLEANUP])
+
+EXECUTION_EXAMPLES
+
+AT_SETUP([transaction comments])
+AT_KEYWORDS([ovsdb file positive])
+AT_DATA([schema], [ORDINAL_SCHEMA
+])
+touch .db.~lock~
+AT_CHECK([ovsdb-tool create db schema], [0], [], [ignore])
+AT_CHECK([[ovsdb-tool transact db '
+ ["ordinals",
+ {"op": "insert",
+ "table": "ordinals",
+ "row": {"name": "five", "number": 5}},
+ {"op": "comment",
+ "comment": "add row for 5"}]']], [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [[[{"uuid":["uuid","<0>"]},{}]
+]])
+AT_CHECK([grep -q "add row for 5" db])
+AT_CLEANUP
+
+AT_SETUP([ovsdb-tool compact])
+AT_KEYWORDS([ovsdb file positive])
+AT_DATA([schema], [ORDINAL_SCHEMA
+])
+touch .db.~lock~
+AT_CHECK([ovsdb-tool create db schema], [0], [], [ignore])
+dnl Do a bunch of random transactions that put crap in the database log.
+AT_CHECK(
+ [[for pair in 'zero 0' 'one 1' 'two 2' 'three 3' 'four 4' 'five 5'; do
+ set -- $pair
+ ovsdb-tool transact db '
+ ["ordinals",
+ {"op": "insert",
+ "table": "ordinals",
+ "row": {"name": "'$1'", "number": '$2'}},
+ {"op": "comment",
+ "comment": "add row for '"$pair"'"}]'
+ ovsdb-tool transact db '
+ ["ordinals",
+ {"op": "delete",
+ "table": "ordinals",
+ "where": [["number", "==", '$2']]},
+ {"op": "comment",
+ "comment": "delete row for '"$2"'"}]'
+ ovsdb-tool transact db '
+ ["ordinals",
+ {"op": "insert",
+ "table": "ordinals",
+ "row": {"name": "'$1'", "number": '$2'}},
+ {"op": "comment",
+ "comment": "add back row for '"$pair"'"}]'
+ done]],
+ [0], [stdout], [ignore])
+dnl Check that all the crap is in fact in the database log.
+AT_CHECK([[perl $srcdir/uuidfilt.pl db | grep -v ^OVSDB | sed 's/"_date":[0-9]*/"_date":0/']], [0],
+ [[{"name":"ordinals","tables":{"ordinals":{"columns":{"name":{"type":"string"},"number":{"type":"integer"}}}}}
+{"ordinals":{"<0>":{"name":"zero"}},"_comment":"add row for zero 0","_date":0}
+{"ordinals":{"<0>":null},"_comment":"delete row for 0","_date":0}
+{"ordinals":{"<1>":{"name":"zero"}},"_comment":"add back row for zero 0","_date":0}
+{"ordinals":{"<2>":{"number":1,"name":"one"}},"_comment":"add row for one 1","_date":0}
+{"ordinals":{"<2>":null},"_comment":"delete row for 1","_date":0}
+{"ordinals":{"<3>":{"number":1,"name":"one"}},"_comment":"add back row for one 1","_date":0}
+{"ordinals":{"<4>":{"number":2,"name":"two"}},"_comment":"add row for two 2","_date":0}
+{"ordinals":{"<4>":null},"_comment":"delete row for 2","_date":0}
+{"ordinals":{"<5>":{"number":2,"name":"two"}},"_comment":"add back row for two 2","_date":0}
+{"ordinals":{"<6>":{"number":3,"name":"three"}},"_comment":"add row for three 3","_date":0}
+{"ordinals":{"<6>":null},"_comment":"delete row for 3","_date":0}
+{"ordinals":{"<7>":{"number":3,"name":"three"}},"_comment":"add back row for three 3","_date":0}
+{"ordinals":{"<8>":{"number":4,"name":"four"}},"_comment":"add row for four 4","_date":0}
+{"ordinals":{"<8>":null},"_comment":"delete row for 4","_date":0}
+{"ordinals":{"<9>":{"number":4,"name":"four"}},"_comment":"add back row for four 4","_date":0}
+{"ordinals":{"<10>":{"number":5,"name":"five"}},"_comment":"add row for five 5","_date":0}
+{"ordinals":{"<10>":null},"_comment":"delete row for 5","_date":0}
+{"ordinals":{"<11>":{"number":5,"name":"five"}},"_comment":"add back row for five 5","_date":0}
+]])
+dnl Dump out and check the actual database contents.
+AT_CHECK([[ovsdb-server --unixctl=$PWD/unixctl --remote=punix:socket --run "ovsdb-client dump unix:socket ordinals" db]],
+ [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [_uuid name number
+------------------------------------ ----- ------
+<0> five 5 @&t@
+<1> four 4 @&t@
+<2> one 1 @&t@
+<3> three 3 @&t@
+<4> two 2 @&t@
+<5> zero 0 @&t@
+])
+dnl Now compact the database in-place.
+touch .db.tmp.~lock~
+AT_CHECK([[ovsdb-tool compact db]], [0], [], [ignore])
+dnl We can't fully re-check the contents of the database log, because the
+dnl order of the records is not predictable, but there should only be 4 lines
+dnl in it now.
+AT_CAPTURE_FILE([db])
+AT_CHECK([wc -l < db], [0], [4
+])
+dnl And check that the dumped data is the same too:
+AT_CHECK([[ovsdb-server --unixctl=$PWD/unixctl --remote=punix:socket --run "ovsdb-client dump unix:socket ordinals" db]],
+ [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [_uuid name number
+------------------------------------ ----- ------
+<0> five 5 @&t@
+<1> four 4 @&t@
+<2> one 1 @&t@
+<3> three 3 @&t@
+<4> two 2 @&t@
+<5> zero 0 @&t@
+])
+AT_CLEANUP
+
+AT_SETUP([ovsdb-tool convert -- removing a column])
+AT_KEYWORDS([ovsdb file positive])
+AT_DATA([schema], [ORDINAL_SCHEMA
+])
+AT_DATA([new-schema],
+ [[{"name": "ordinals",
+ "tables": {
+ "ordinals": {
+ "columns": {
+ "number": {"type": "integer"}}}}}
+]])
+touch .db.~lock~
+AT_CHECK([ovsdb-tool create db schema], [0], [], [ignore])
+dnl Put some data in the database.
+AT_CHECK(
+ [[for pair in 'zero 0' 'one 1' 'two 2' 'three 3' 'four 4' 'five 5'; do
+ set -- $pair
+ ovsdb-tool transact db '
+ ["ordinals",
+ {"op": "insert",
+ "table": "ordinals",
+ "row": {"name": "'$1'", "number": '$2'}},
+ {"op": "comment",
+ "comment": "add row for '"$pair"'"}]'
+ done]],
+ [0], [stdout], [ignore])
+dnl Dump out and check the actual database contents.
+AT_CHECK([[ovsdb-server --unixctl=$PWD/unixctl --remote=punix:socket --run "ovsdb-client dump unix:socket ordinals" db]],
+ [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [_uuid name number
+------------------------------------ ----- ------
+<0> five 5 @&t@
+<1> four 4 @&t@
+<2> one 1 @&t@
+<3> three 3 @&t@
+<4> two 2 @&t@
+<5> zero 0 @&t@
+])
+dnl Now convert the database in-place.
+touch .db.tmp.~lock~
+AT_CHECK([[ovsdb-tool convert db new-schema]], [0], [], [ignore])
+dnl We can't fully re-check the contents of the database log, because the
+dnl order of the records is not predictable, but there should only be 4 lines
+dnl in it now.
+AT_CAPTURE_FILE([db])
+AT_CHECK([wc -l < db], [0], [4
+])
+dnl And check that the dumped data is the same except for the removed column:
+AT_CHECK([[ovsdb-server --unixctl=$PWD/unixctl --remote=punix:socket --run "ovsdb-client dump unix:socket ordinals" db]],
+ [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [_uuid number
+------------------------------------ ------
+<0> 0 @&t@
+<1> 1 @&t@
+<2> 2 @&t@
+<3> 3 @&t@
+<4> 4 @&t@
+<5> 5 @&t@
+])
+AT_CLEANUP
+
+AT_SETUP([ovsdb-tool convert -- adding a column])
+AT_KEYWORDS([ovsdb file positive])
+AT_DATA([schema],
+ [[{"name": "ordinals",
+ "tables": {
+ "ordinals": {
+ "columns": {
+ "number": {"type": "integer"}}}}}
+]])
+AT_DATA([new-schema], [ORDINAL_SCHEMA
+])
+touch .db.~lock~
+AT_CHECK([ovsdb-tool create db schema], [0], [], [ignore])
+dnl Put some data in the database.
+AT_CHECK(
+ [[for number in 0 1 2 3 4 5; do
+ ovsdb-tool transact db '
+ ["ordinals",
+ {"op": "insert",
+ "table": "ordinals",
+ "row": {"number": '$number'}},
+ {"op": "comment",
+ "comment": "add row for '"$number"'"}]'
+ done]],
+ [0], [stdout], [ignore])
+dnl Dump out and check the actual database contents.
+AT_CHECK([[ovsdb-server --unixctl=$PWD/unixctl --remote=punix:socket --run "ovsdb-client dump unix:socket ordinals" db]],
+ [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [_uuid number
+------------------------------------ ------
+<0> 0 @&t@
+<1> 1 @&t@
+<2> 2 @&t@
+<3> 3 @&t@
+<4> 4 @&t@
+<5> 5 @&t@
+])
+dnl Now convert the database in-place.
+touch .db.tmp.~lock~
+AT_CHECK([[ovsdb-tool convert db new-schema]], [0], [], [ignore])
+dnl We can't fully re-check the contents of the database log, because the
+dnl order of the records is not predictable, but there should only be 4 lines
+dnl in it now.
+AT_CAPTURE_FILE([db])
+AT_CHECK([wc -l < db], [0], [4
+])
+dnl And check that the dumped data is the same except for the added column:
+AT_CHECK([[ovsdb-server --unixctl=$PWD/unixctl --remote=punix:socket --run "ovsdb-client dump unix:socket ordinals" db]],
+ [0], [stdout], [ignore])
+AT_CHECK([perl $srcdir/uuidfilt.pl stdout], [0],
+ [_uuid name number
+------------------------------------ ---- ------
+<0> "" 0 @&t@
+<1> "" 1 @&t@
+<2> "" 2 @&t@
+<3> "" 3 @&t@
+<4> "" 4 @&t@
+<5> "" 5 @&t@
+])
+AT_CLEANUP
diff --git a/tests/ovsdb.at b/tests/ovsdb.at
index 275c90d6d..141417aad 100644
--- a/tests/ovsdb.at
+++ b/tests/ovsdb.at
@@ -48,7 +48,7 @@ m4_include([tests/ovsdb-query.at])
m4_include([tests/ovsdb-transaction.at])
m4_include([tests/ovsdb-execution.at])
m4_include([tests/ovsdb-trigger.at])
-m4_include([tests/ovsdb-file.at])
+m4_include([tests/ovsdb-tool.at])
m4_include([tests/ovsdb-server.at])
m4_include([tests/ovsdb-monitor.at])
m4_include([tests/ovsdb-idl.at])