1// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file. See the AUTHORS file for names of contributors.
4
5#ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
6#define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
7
8#include <cstddef>
9
10#include "leveldb/export.h"
11
12namespace leveldb {
13
14class Cache;
15class Comparator;
16class Env;
17class FilterPolicy;
18class Logger;
19class Snapshot;
20
21// DB contents are stored in a set of blocks, each of which holds a
22// sequence of key,value pairs. Each block may be compressed before
23// being stored in a file. The following enum describes which
24// compression method (if any) is used to compress a block.
25enum CompressionType {
26 // NOTE: do not change the values of existing entries, as these are
27 // part of the persistent format on disk.
28 kNoCompression = 0x0,
29 kSnappyCompression = 0x1
30};
31
32// Options to control the behavior of a database (passed to DB::Open)
33struct LEVELDB_EXPORT Options {
34 // Create an Options object with default values for all fields.
35 Options();
36
37 // -------------------
38 // Parameters that affect behavior
39
40 // Comparator used to define the order of keys in the table.
41 // Default: a comparator that uses lexicographic byte-wise ordering
42 //
43 // REQUIRES: The client must ensure that the comparator supplied
44 // here has the same name and orders keys *exactly* the same as the
45 // comparator provided to previous open calls on the same DB.
46 const Comparator* comparator;
47
48 // If true, the database will be created if it is missing.
49 bool create_if_missing = false;
50
51 // If true, an error is raised if the database already exists.
52 bool error_if_exists = false;
53
54 // If true, the implementation will do aggressive checking of the
55 // data it is processing and will stop early if it detects any
56 // errors. This may have unforeseen ramifications: for example, a
57 // corruption of one DB entry may cause a large number of entries to
58 // become unreadable or for the entire DB to become unopenable.
59 bool paranoid_checks = false;
60
61 // Use the specified object to interact with the environment,
62 // e.g. to read/write files, schedule background work, etc.
63 // Default: Env::Default()
64 Env* env;
65
66 // Any internal progress/error information generated by the db will
67 // be written to info_log if it is non-null, or to a file stored
68 // in the same directory as the DB contents if info_log is null.
69 Logger* info_log = nullptr;
70
71 // -------------------
72 // Parameters that affect performance
73
74 // Amount of data to build up in memory (backed by an unsorted log
75 // on disk) before converting to a sorted on-disk file.
76 //
77 // Larger values increase performance, especially during bulk loads.
78 // Up to two write buffers may be held in memory at the same time,
79 // so you may wish to adjust this parameter to control memory usage.
80 // Also, a larger write buffer will result in a longer recovery time
81 // the next time the database is opened.
82 size_t write_buffer_size = 4 * 1024 * 1024;
83
84 // Number of open files that can be used by the DB. You may need to
85 // increase this if your database has a large working set (budget
86 // one open file per 2MB of working set).
87 int max_open_files = 1000;
88
89 // Control over blocks (user data is stored in a set of blocks, and
90 // a block is the unit of reading from disk).
91
92 // If non-null, use the specified cache for blocks.
93 // If null, leveldb will automatically create and use an 8MB internal cache.
94 Cache* block_cache = nullptr;
95
96 // Approximate size of user data packed per block. Note that the
97 // block size specified here corresponds to uncompressed data. The
98 // actual size of the unit read from disk may be smaller if
99 // compression is enabled. This parameter can be changed dynamically.
100 size_t block_size = 4 * 1024;
101
102 // Number of keys between restart points for delta encoding of keys.
103 // This parameter can be changed dynamically. Most clients should
104 // leave this parameter alone.
105 int block_restart_interval = 16;
106
107 // Leveldb will write up to this amount of bytes to a file before
108 // switching to a new one.
109 // Most clients should leave this parameter alone. However if your
110 // filesystem is more efficient with larger files, you could
111 // consider increasing the value. The downside will be longer
112 // compactions and hence longer latency/performance hiccups.
113 // Another reason to increase this parameter might be when you are
114 // initially populating a large database.
115 size_t max_file_size = 2 * 1024 * 1024;
116
117 // Compress blocks using the specified compression algorithm. This
118 // parameter can be changed dynamically.
119 //
120 // Default: kSnappyCompression, which gives lightweight but fast
121 // compression.
122 //
123 // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
124 // ~200-500MB/s compression
125 // ~400-800MB/s decompression
126 // Note that these speeds are significantly faster than most
127 // persistent storage speeds, and therefore it is typically never
128 // worth switching to kNoCompression. Even if the input data is
129 // incompressible, the kSnappyCompression implementation will
130 // efficiently detect that and will switch to uncompressed mode.
131 CompressionType compression = kSnappyCompression;
132
133 // EXPERIMENTAL: If true, append to existing MANIFEST and log files
134 // when a database is opened. This can significantly speed up open.
135 //
136 // Default: currently false, but may become true later.
137 bool reuse_logs = false;
138
139 // If non-null, use the specified filter policy to reduce disk reads.
140 // Many applications will benefit from passing the result of
141 // NewBloomFilterPolicy() here.
142 const FilterPolicy* filter_policy = nullptr;
143};
144
145// Options that control read operations
146struct LEVELDB_EXPORT ReadOptions {
147 // If true, all data read from underlying storage will be
148 // verified against corresponding checksums.
149 bool verify_checksums = false;
150
151 // Should the data read for this iteration be cached in memory?
152 // Callers may wish to set this field to false for bulk scans.
153 bool fill_cache = true;
154
155 // If "snapshot" is non-null, read as of the supplied snapshot
156 // (which must belong to the DB that is being read and which must
157 // not have been released). If "snapshot" is null, use an implicit
158 // snapshot of the state at the beginning of this read operation.
159 const Snapshot* snapshot = nullptr;
160};
161
162// Options that control write operations
163struct LEVELDB_EXPORT WriteOptions {
164 WriteOptions() = default;
165
166 // If true, the write will be flushed from the operating system
167 // buffer cache (by calling WritableFile::Sync()) before the write
168 // is considered complete. If this flag is true, writes will be
169 // slower.
170 //
171 // If this flag is false, and the machine crashes, some recent
172 // writes may be lost. Note that if it is just the process that
173 // crashes (i.e., the machine does not reboot), no writes will be
174 // lost even if sync==false.
175 //
176 // In other words, a DB write with sync==false has similar
177 // crash semantics as the "write()" system call. A DB write
178 // with sync==true has similar crash semantics to a "write()"
179 // system call followed by "fsync()".
180 bool sync = false;
181};
182
183} // namespace leveldb
184
185#endif // STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
186