/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
using System.Collections.Generic;
using System.Text;
namespace BerkeleyDB {
///
/// A class representing configuration parameters for
///
///
public class HashDatabaseConfig : DatabaseConfig {
/* Fields for db->set_flags() */
///
/// Policy for duplicate data items in the database. Allows a key/data pair
/// to be inserted into the database even if the key already exists.
///
///
///
/// The ordering of duplicates in the database for
/// is determined by the order
/// of insertion, unless the ordering is otherwise specified by use of a
/// cursor operation or a duplicate sort function. The ordering of
/// duplicates in the database for
/// is determined by the
/// duplicate comparison function. If the application does not specify a
/// comparison function using
/// , a default lexical
/// comparison is be used.
///
///
/// is preferred to
/// for performance reasons.
/// should only be used by
/// applications wanting to order duplicate data items manually.
///
///
/// If the database already exists, the value of Duplicates must be the
/// same as the existing database or an error is returned.
///
///
public DuplicatesPolicy Duplicates;
internal new uint flags {
get {
uint ret = base.flags;
ret |= (uint)Duplicates;
return ret;
}
}
///
/// The policy for how to handle database creation.
///
///
/// If the database does not already exist and
/// is set,
/// fails.
///
public CreatePolicy Creation;
internal new uint openFlags {
get {
uint flags = base.openFlags;
flags |= (uint)Creation;
return flags;
}
}
///
/// The path of the directory where blobs are stored.
///
/// If the database is opened within ,
/// this path setting is ignored during
/// . Use
/// to identify the current storage
/// location of blobs after opening the database.
///
///
public string BlobDir;
internal bool blobThresholdIsSet;
private uint blobThreshold;
///
/// The size in bytes which is used to determine when a data item
/// is stored as a blob.
///
/// Any data item that is equal to or larger in size than the
/// threshold value is automatically stored as a blob.
///
///
/// If the threshold value is 0, blobs are never be used by the
/// database.
///
///
/// It is illegal to enable blob support in the database which is configured
/// as in-memory database or with chksum, encryption, duplicates,
/// sorted duplicates, compression, multiversion concurrency control
/// and transactional read operations with degree 1 isolation.
///
///
public uint BlobThreshold {
get { return blobThreshold; }
set {
blobThresholdIsSet = true;
blobThreshold = value;
}
}
///
/// The Hash key comparison function.
///
///
///
/// The comparison function is called whenever it is necessary to
/// compare a key specified by the application with a key currently
/// stored in the tree.
///
///
/// If no comparison function is specified, the keys are compared
/// lexically, with shorter keys collating before longer keys.
///
///
/// If the database already exists, the comparison function must be the
/// same as that historically used to create the database or corruption
/// can occur.
///
///
public EntryComparisonDelegate HashComparison;
internal bool fillFactorIsSet;
private uint ffactor;
///
/// The desired density within the hash table. If no value is specified,
/// the fill factor is selected dynamically as pages are filled.
///
///
///
/// The density is an approximation of the number of keys allowed to
/// accumulate in any one bucket, determining when the hash table grows
/// or shrinks. If you know the average sizes of the keys and data in
/// your data set, setting the fill factor can enhance performance. A
/// reasonable rule computing fill factor is to set it to the following:
///
///
/// (pagesize - 32) / (average_key_size + average_data_size + 8)
///
///
/// If the database already exists, this setting is ignored.
///
///
public uint FillFactor {
get { return ffactor; }
set {
fillFactorIsSet = true;
ffactor = value;
}
}
///
/// A user-defined hash function; if no hash function is specified, a
/// default hash function is used.
///
///
///
/// Because no hash function performs equally well on all possible data,
/// the user may find that the built-in hash function performs poorly
/// with a particular data set.
///
///
/// If the database already exists, HashFunction must be the same as
/// that historically used to create the database or corruption can
/// occur.
///
///
public HashFunctionDelegate HashFunction;
///
/// The duplicate data item comparison function.
///
///
///
/// The comparison function is called whenever it is necessary to
/// compare a data item specified by the application with a data item
/// currently stored in the database. Setting DuplicateCompare implies
/// setting to
/// .
///
///
/// If no comparison function is specified, the data items are compared
/// lexically, with shorter data items collating before longer data
/// items.
///
///
/// If the database already exists when
/// is called, the delegate must be the same as that historically used
/// to create the database or corruption can occur.
///
///
public EntryComparisonDelegate DuplicateCompare;
internal bool partitionIsSet;
private PartitionDelegate partitionFunc;
///
/// Return the application-specified partitioning function.
///
public PartitionDelegate Partition { get { return partitionFunc; } }
private DatabaseEntry[] partitionKeys;
///
/// Return an array of type DatabaseEntry where each array entry
/// contains the range of keys contained in one of the database's
/// partitions. The array contains the information for the entire
/// database.
///
public DatabaseEntry[] PartitionKeys { get { return partitionKeys; } }
private uint nparts;
///
/// Return the number of partitions to create.
///
public uint NParts { get { return nparts; } }
private bool SetPartition(uint parts, DatabaseEntry[] partKeys,
PartitionDelegate partFunc) {
partitionIsSet = true;
nparts = parts;
partitionKeys = partKeys;
partitionFunc = partFunc;
if (nparts < 2)
partitionIsSet = false;
else if (partitionKeys == null && partitionFunc == null)
partitionIsSet = false;
return partitionIsSet;
}
///
/// Enable database partitioning using the specified partition keys.
/// Return true if partitioning is successfully enabled; otherwise
/// return false.
///
/// An array of DatabaseEntry where each array entry defines the range
/// of key values to be stored in each partition
///
///
public bool SetPartitionByKeys(DatabaseEntry[] keys) {
uint parts = (keys == null ? 0 : ((uint)keys.Length + 1));
return (SetPartition(parts, keys, null));
}
///
/// Enable database partitioning using the specified number of
/// partitions and partition function.
/// Return true if the specified number of partitions are successfully
/// enabled; otherwise return false.
/// The number of partitions to create
/// The name of partitioning function
///
public bool SetPartitionByCallback(
uint parts, PartitionDelegate partFunc) {
return (SetPartition(parts, null, partFunc));
}
internal bool nelemIsSet;
private uint nelems;
///
/// An estimate of the final size of the hash table.
///
///
///
/// In order for the estimate to be used when creating the database,
/// must also be set. If the estimate or fill
/// factor is not set or is set too low, hash tables still expand
/// gracefully as keys are entered, although a slight performance
/// degradation may be noticed.
///
///
/// If the database already exists, this setting is ignored.
///
///
public uint TableSize {
get { return nelems; }
set {
nelemIsSet = true;
nelems = value;
}
}
///
/// Instantiate a new HashDatabaseConfig object
///
public HashDatabaseConfig() {
blobThresholdIsSet = false;
Duplicates = DuplicatesPolicy.NONE;
HashComparison = null;
fillFactorIsSet = false;
nelemIsSet = false;
Creation = CreatePolicy.NEVER;
}
}
}