summaryrefslogtreecommitdiff
path: root/lang/csharp/src/HashDatabaseConfig.cs
diff options
context:
space:
mode:
Diffstat (limited to 'lang/csharp/src/HashDatabaseConfig.cs')
-rw-r--r--lang/csharp/src/HashDatabaseConfig.cs131
1 files changed, 114 insertions, 17 deletions
diff --git a/lang/csharp/src/HashDatabaseConfig.cs b/lang/csharp/src/HashDatabaseConfig.cs
index 468c6962..7f36b4da 100644
--- a/lang/csharp/src/HashDatabaseConfig.cs
+++ b/lang/csharp/src/HashDatabaseConfig.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -16,9 +16,8 @@ namespace BerkeleyDB {
public class HashDatabaseConfig : DatabaseConfig {
/* Fields for db->set_flags() */
/// <summary>
- /// Policy for duplicate data items in the database; that is, insertion
- /// when the key of the key/data pair being inserted already exists in
- /// the database will be successful.
+ /// Policy for duplicate data items in the database. Allows a key/data pair
+ /// to be inserted into the database even if the key already exists.
/// </summary>
/// <remarks>
/// <para>
@@ -31,19 +30,19 @@ namespace BerkeleyDB {
/// duplicate comparison function. If the application does not specify a
/// comparison function using
/// <see cref="DuplicateCompare"/>, a default lexical
- /// comparison will be used.
+ /// comparison is be used.
/// </para>
- /// <para>
+ /// <para>
/// <see cref="DuplicatesPolicy.SORTED"/> is preferred to
/// <see cref="DuplicatesPolicy.UNSORTED"/> for performance reasons.
/// <see cref="DuplicatesPolicy.UNSORTED"/> should only be used by
/// applications wanting to order duplicate data items manually.
/// </para>
- /// <para>
+ /// <para>
/// If the database already exists, the value of Duplicates must be the
- /// same as the existing database or an error will be returned.
+ /// same as the existing database or an error is returned.
/// </para>
- /// </remarks>
+ /// </remarks>
public DuplicatesPolicy Duplicates;
internal new uint flags {
get {
@@ -59,7 +58,7 @@ namespace BerkeleyDB {
/// <remarks>
/// If the database does not already exist and
/// <see cref="CreatePolicy.NEVER"/> is set,
- /// <see cref="HashDatabase.Open"/> will fail.
+ /// <see cref="HashDatabase.Open"/> fails.
/// </remarks>
public CreatePolicy Creation;
internal new uint openFlags {
@@ -71,6 +70,46 @@ namespace BerkeleyDB {
}
/// <summary>
+ /// The path of the directory where blobs are stored.
+ /// <para>
+ /// If the database is opened within <see cref="DatabaseEnvironment"/>,
+ /// this path setting is ignored during
+ /// <see cref="HashDatabase.Open"/>. Use
+ /// <see cref="HashDatabase.BlobDir"/> to identify the current storage
+ /// location of blobs after opening the database.
+ /// </para>
+ /// </summary>
+ public string BlobDir;
+
+ internal bool blobThresholdIsSet;
+ private uint blobThreshold;
+ /// <summary>
+ /// The size in bytes which is used to determine when a data item
+ /// is stored as a blob.
+ /// <para>
+ /// Any data item that is equal to or larger in size than the
+ /// threshold value is automatically stored as a blob.
+ /// </para>
+ /// <para>
+ /// If the threshold value is 0, blobs are never be used by the
+ /// database.
+ /// </para>
+ /// <para>
+ /// It is illegal to enable blob support in the database which is configured
+ /// as in-memory database or with chksum, encryption, duplicates,
+ /// sorted duplicates, compression, multiversion concurrency control
+ /// and transactional read operations with degree 1 isolation.
+ /// </para>
+ /// </summary>
+ public uint BlobThreshold {
+ get { return blobThreshold; }
+ set {
+ blobThresholdIsSet = true;
+ blobThreshold = value;
+ }
+ }
+
+ /// <summary>
/// The Hash key comparison function.
/// </summary>
/// <remarks>
@@ -95,7 +134,7 @@ namespace BerkeleyDB {
private uint ffactor;
/// <summary>
/// The desired density within the hash table. If no value is specified,
- /// the fill factor will be selected dynamically as pages are filled.
+ /// the fill factor is selected dynamically as pages are filled.
/// </summary>
/// <remarks>
/// <para>
@@ -109,7 +148,7 @@ namespace BerkeleyDB {
/// (pagesize - 32) / (average_key_size + average_data_size + 8)
/// </para>
/// <para>
- /// If the database already exists, this setting will be ignored.
+ /// If the database already exists, this setting is ignored.
/// </para>
/// </remarks>
public uint FillFactor {
@@ -149,12 +188,12 @@ namespace BerkeleyDB {
/// setting <see cref="Duplicates"/> to
/// <see cref="DuplicatesPolicy.SORTED"/>.
/// </para>
- /// <para>
+ /// <para>
/// If no comparison function is specified, the data items are compared
/// lexically, with shorter data items collating before longer data
/// items.
/// </para>
- /// <para>
+ /// <para>
/// If the database already exists when <see cref="HashDatabase.Open"/>
/// is called, the delegate must be the same as that historically used
/// to create the database or corruption can occur.
@@ -162,6 +201,63 @@ namespace BerkeleyDB {
/// </remarks>
public EntryComparisonDelegate DuplicateCompare;
+ internal bool partitionIsSet;
+ private PartitionDelegate partitionFunc;
+ /// <summary>
+ /// Return the application-specified partitioning function.
+ /// </summary>
+ public PartitionDelegate Partition { get { return partitionFunc; } }
+ private DatabaseEntry[] partitionKeys;
+ /// <summary>
+ /// Return an array of type DatabaseEntry where each array entry
+ /// contains the range of keys contained in one of the database's
+ /// partitions. The array contains the information for the entire
+ /// database.
+ /// </summary>
+ public DatabaseEntry[] PartitionKeys { get { return partitionKeys; } }
+ private uint nparts;
+ /// <summary>
+ /// Return the number of partitions to create.
+ /// </summary>
+ public uint NParts { get { return nparts; } }
+ private bool SetPartition(uint parts, DatabaseEntry[] partKeys,
+ PartitionDelegate partFunc) {
+ partitionIsSet = true;
+ nparts = parts;
+ partitionKeys = partKeys;
+ partitionFunc = partFunc;
+ if (nparts < 2)
+ partitionIsSet = false;
+ else if (partitionKeys == null && partitionFunc == null)
+ partitionIsSet = false;
+ return partitionIsSet;
+ }
+ /// <summary>
+ /// Enable database partitioning using the specified partition keys.
+ /// Return true if partitioning is successfully enabled; otherwise
+ /// return false.
+ /// <param name="keys">
+ /// An array of DatabaseEntry where each array entry defines the range
+ /// of key values to be stored in each partition
+ /// </param>
+ /// </summary>
+ public bool SetPartitionByKeys(DatabaseEntry[] keys) {
+ uint parts = (keys == null ? 0 : ((uint)keys.Length + 1));
+ return (SetPartition(parts, keys, null));
+ }
+ /// <summary>
+ /// Enable database partitioning using the specified number of
+ /// partitions and partition function.
+ /// Return true if the specified number of partitions are successfully
+ /// enabled; otherwise return false.
+ /// <param name="parts">The number of partitions to create</param>
+ /// <param name="partFunc">The name of partitioning function</param>
+ /// </summary>
+ public bool SetPartitionByCallback(
+ uint parts, PartitionDelegate partFunc) {
+ return (SetPartition(parts, null, partFunc));
+ }
+
internal bool nelemIsSet;
private uint nelems;
/// <summary>
@@ -171,12 +267,12 @@ namespace BerkeleyDB {
/// <para>
/// In order for the estimate to be used when creating the database,
/// <see cref="FillFactor"/> must also be set. If the estimate or fill
- /// factor are not set or are set too low, hash tables will still expand
+ /// factor is not set or is set too low, hash tables still expand
/// gracefully as keys are entered, although a slight performance
/// degradation may be noticed.
/// </para>
/// <para>
- /// If the database already exists, this setting will be ignored.
+ /// If the database already exists, this setting is ignored.
/// </para>
/// </remarks>
public uint TableSize {
@@ -191,6 +287,7 @@ namespace BerkeleyDB {
/// Instantiate a new HashDatabaseConfig object
/// </summary>
public HashDatabaseConfig() {
+ blobThresholdIsSet = false;
Duplicates = DuplicatesPolicy.NONE;
HashComparison = null;
fillFactorIsSet = false;
@@ -198,4 +295,4 @@ namespace BerkeleyDB {
Creation = CreatePolicy.NEVER;
}
}
-} \ No newline at end of file
+}