summaryrefslogtreecommitdiff
path: root/chromium/net/disk_cache
diff options
context:
space:
mode:
authorZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
committerZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
commit679147eead574d186ebf3069647b4c23e8ccace6 (patch)
treefc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /chromium/net/disk_cache
downloadqtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz
Initial import.
Diffstat (limited to 'chromium/net/disk_cache')
-rw-r--r--chromium/net/disk_cache/addr.cc92
-rw-r--r--chromium/net/disk_cache/addr.h184
-rw-r--r--chromium/net/disk_cache/addr_unittest.cc59
-rw-r--r--chromium/net/disk_cache/backend_impl.cc2120
-rw-r--r--chromium/net/disk_cache/backend_impl.h400
-rw-r--r--chromium/net/disk_cache/backend_unittest.cc3415
-rw-r--r--chromium/net/disk_cache/bitmap.cc311
-rw-r--r--chromium/net/disk_cache/bitmap.h136
-rw-r--r--chromium/net/disk_cache/bitmap_unittest.cc293
-rw-r--r--chromium/net/disk_cache/block_files.cc695
-rw-r--r--chromium/net/disk_cache/block_files.h152
-rw-r--r--chromium/net/disk_cache/block_files_unittest.cc350
-rw-r--r--chromium/net/disk_cache/cache_creator.cc163
-rw-r--r--chromium/net/disk_cache/cache_util.cc114
-rw-r--r--chromium/net/disk_cache/cache_util.h41
-rw-r--r--chromium/net/disk_cache/cache_util_posix.cc46
-rw-r--r--chromium/net/disk_cache/cache_util_unittest.cc96
-rw-r--r--chromium/net/disk_cache/cache_util_win.cc46
-rw-r--r--chromium/net/disk_cache/disk_cache.h324
-rw-r--r--chromium/net/disk_cache/disk_cache_perftest.cc250
-rw-r--r--chromium/net/disk_cache/disk_cache_test_base.cc307
-rw-r--r--chromium/net/disk_cache/disk_cache_test_base.h176
-rw-r--r--chromium/net/disk_cache/disk_cache_test_util.cc146
-rw-r--r--chromium/net/disk_cache/disk_cache_test_util.h105
-rw-r--r--chromium/net/disk_cache/disk_format.cc15
-rw-r--r--chromium/net/disk_cache/disk_format.h153
-rw-r--r--chromium/net/disk_cache/disk_format_base.h130
-rw-r--r--chromium/net/disk_cache/entry_impl.cc1550
-rw-r--r--chromium/net/disk_cache/entry_impl.h278
-rw-r--r--chromium/net/disk_cache/entry_unittest.cc3405
-rw-r--r--chromium/net/disk_cache/errors.h33
-rw-r--r--chromium/net/disk_cache/eviction.cc597
-rw-r--r--chromium/net/disk_cache/eviction.h91
-rw-r--r--chromium/net/disk_cache/experiments.h28
-rw-r--r--chromium/net/disk_cache/file.cc16
-rw-r--r--chromium/net/disk_cache/file.h95
-rw-r--r--chromium/net/disk_cache/file_block.h31
-rw-r--r--chromium/net/disk_cache/file_lock.cc47
-rw-r--r--chromium/net/disk_cache/file_lock.h45
-rw-r--r--chromium/net/disk_cache/file_posix.cc309
-rw-r--r--chromium/net/disk_cache/file_win.cc275
-rw-r--r--chromium/net/disk_cache/flash/flash_cache_test_base.cc29
-rw-r--r--chromium/net/disk_cache/flash/flash_cache_test_base.h43
-rw-r--r--chromium/net/disk_cache/flash/flash_entry_impl.cc150
-rw-r--r--chromium/net/disk_cache/flash/flash_entry_impl.h98
-rw-r--r--chromium/net/disk_cache/flash/format.h32
-rw-r--r--chromium/net/disk_cache/flash/internal_entry.cc86
-rw-r--r--chromium/net/disk_cache/flash/internal_entry.h63
-rw-r--r--chromium/net/disk_cache/flash/log_store.cc185
-rw-r--r--chromium/net/disk_cache/flash/log_store.h101
-rw-r--r--chromium/net/disk_cache/flash/log_store_entry.cc171
-rw-r--r--chromium/net/disk_cache/flash/log_store_entry.h65
-rw-r--r--chromium/net/disk_cache/flash/log_store_entry_unittest.cc69
-rw-r--r--chromium/net/disk_cache/flash/log_store_unittest.cc131
-rw-r--r--chromium/net/disk_cache/flash/segment.cc122
-rw-r--r--chromium/net/disk_cache/flash/segment.h118
-rw-r--r--chromium/net/disk_cache/flash/segment_unittest.cc152
-rw-r--r--chromium/net/disk_cache/flash/storage.cc63
-rw-r--r--chromium/net/disk_cache/flash/storage.h35
-rw-r--r--chromium/net/disk_cache/flash/storage_unittest.cc41
-rw-r--r--chromium/net/disk_cache/histogram_macros.h124
-rw-r--r--chromium/net/disk_cache/in_flight_backend_io.cc522
-rw-r--r--chromium/net/disk_cache/in_flight_backend_io.h223
-rw-r--r--chromium/net/disk_cache/in_flight_io.cc110
-rw-r--r--chromium/net/disk_cache/in_flight_io.h136
-rw-r--r--chromium/net/disk_cache/mapped_file.cc65
-rw-r--r--chromium/net/disk_cache/mapped_file.h73
-rw-r--r--chromium/net/disk_cache/mapped_file_avoid_mmap_posix.cc73
-rw-r--r--chromium/net/disk_cache/mapped_file_posix.cc64
-rw-r--r--chromium/net/disk_cache/mapped_file_unittest.cc91
-rw-r--r--chromium/net/disk_cache/mapped_file_win.cc65
-rw-r--r--chromium/net/disk_cache/mem_backend_impl.cc337
-rw-r--r--chromium/net/disk_cache/mem_backend_impl.h120
-rw-r--r--chromium/net/disk_cache/mem_entry_impl.cc631
-rw-r--r--chromium/net/disk_cache/mem_entry_impl.h189
-rw-r--r--chromium/net/disk_cache/mem_rankings.cc67
-rw-r--r--chromium/net/disk_cache/mem_rankings.h44
-rw-r--r--chromium/net/disk_cache/net_log_parameters.cc133
-rw-r--r--chromium/net/disk_cache/net_log_parameters.h62
-rw-r--r--chromium/net/disk_cache/rankings.cc922
-rw-r--r--chromium/net/disk_cache/rankings.h214
-rw-r--r--chromium/net/disk_cache/simple/OWNERS2
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.cc570
-rw-r--r--chromium/net/disk_cache/simple/simple_backend_impl.h182
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_format.cc21
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_format.h57
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.cc1187
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_impl.h290
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_operation.cc184
-rw-r--r--chromium/net/disk_cache/simple/simple_entry_operation.h125
-rw-r--r--chromium/net/disk_cache/simple/simple_index.cc461
-rw-r--r--chromium/net/disk_cache/simple/simple_index.h203
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.cc423
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file.h156
-rw-r--r--chromium/net/disk_cache/simple/simple_index_file_unittest.cc243
-rw-r--r--chromium/net/disk_cache/simple/simple_index_unittest.cc581
-rw-r--r--chromium/net/disk_cache/simple/simple_net_log_parameters.cc55
-rw-r--r--chromium/net/disk_cache/simple/simple_net_log_parameters.h32
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.cc635
-rw-r--r--chromium/net/disk_cache/simple/simple_synchronous_entry.h166
-rw-r--r--chromium/net/disk_cache/simple/simple_test_util.cc34
-rw-r--r--chromium/net/disk_cache/simple/simple_test_util.h29
-rw-r--r--chromium/net/disk_cache/simple/simple_util.cc100
-rw-r--r--chromium/net/disk_cache/simple/simple_util.h73
-rw-r--r--chromium/net/disk_cache/simple/simple_util_unittest.cc75
-rw-r--r--chromium/net/disk_cache/sparse_control.cc884
-rw-r--r--chromium/net/disk_cache/sparse_control.h177
-rw-r--r--chromium/net/disk_cache/stats.cc309
-rw-r--r--chromium/net/disk_cache/stats.h105
-rw-r--r--chromium/net/disk_cache/stats_histogram.cc89
-rw-r--r--chromium/net/disk_cache/stats_histogram.h55
-rw-r--r--chromium/net/disk_cache/storage_block-inl.h175
-rw-r--r--chromium/net/disk_cache/storage_block.h95
-rw-r--r--chromium/net/disk_cache/storage_block_unittest.cc72
-rw-r--r--chromium/net/disk_cache/stress_cache.cc294
-rw-r--r--chromium/net/disk_cache/stress_support.h39
-rw-r--r--chromium/net/disk_cache/trace.cc192
-rw-r--r--chromium/net/disk_cache/trace.h41
-rw-r--r--chromium/net/disk_cache/tracing_cache_backend.cc317
-rw-r--r--chromium/net/disk_cache/tracing_cache_backend.h81
-rw-r--r--chromium/net/disk_cache/v3/backend_impl_v3.cc1640
-rw-r--r--chromium/net/disk_cache/v3/backend_impl_v3.h288
-rw-r--r--chromium/net/disk_cache/v3/backend_worker.cc485
-rw-r--r--chromium/net/disk_cache/v3/backend_worker.h60
-rw-r--r--chromium/net/disk_cache/v3/block_bitmaps.cc332
-rw-r--r--chromium/net/disk_cache/v3/block_bitmaps.h75
-rw-r--r--chromium/net/disk_cache/v3/block_bitmaps_unittest.cc350
-rw-r--r--chromium/net/disk_cache/v3/disk_format_v3.h190
-rw-r--r--chromium/net/disk_cache/v3/entry_impl_v3.cc1395
-rw-r--r--chromium/net/disk_cache/v3/entry_impl_v3.h223
-rw-r--r--chromium/net/disk_cache/v3/eviction_v3.cc502
-rw-r--r--chromium/net/disk_cache/v3/eviction_v3.h74
-rw-r--r--chromium/net/disk_cache/v3/sparse_control_v3.cc868
-rw-r--r--chromium/net/disk_cache/v3/sparse_control_v3.h175
134 files changed, 38604 insertions, 0 deletions
diff --git a/chromium/net/disk_cache/addr.cc b/chromium/net/disk_cache/addr.cc
new file mode 100644
index 00000000000..8f41e6fe278
--- /dev/null
+++ b/chromium/net/disk_cache/addr.cc
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/addr.h"
+
+#include "base/logging.h"
+
+namespace disk_cache {
+
+int Addr::start_block() const {
+ DCHECK(is_block_file());
+ return value_ & kStartBlockMask;
+}
+
+int Addr::num_blocks() const {
+ DCHECK(is_block_file() || !value_);
+ return ((value_ & kNumBlocksMask) >> kNumBlocksOffset) + 1;
+}
+
+bool Addr::SetFileNumber(int file_number) {
+ DCHECK(is_separate_file());
+ if (file_number & ~kFileNameMask)
+ return false;
+ value_ = kInitializedMask | file_number;
+ return true;
+}
+
+bool Addr::SanityCheckV2() const {
+ if (!is_initialized())
+ return !value_;
+
+ if (file_type() > BLOCK_4K)
+ return false;
+
+ if (is_separate_file())
+ return true;
+
+ return !reserved_bits();
+}
+
+bool Addr::SanityCheckV3() const {
+ if (!is_initialized())
+ return !value_;
+
+ // For actual entries, SanityCheckForEntryV3 should be used.
+ if (file_type() > BLOCK_FILES)
+ return false;
+
+ if (is_separate_file())
+ return true;
+
+ return !reserved_bits();
+}
+
+bool Addr::SanityCheckForEntryV2() const {
+ if (!SanityCheckV2() || !is_initialized())
+ return false;
+
+ if (is_separate_file() || file_type() != BLOCK_256)
+ return false;
+
+ return true;
+}
+
+bool Addr::SanityCheckForEntryV3() const {
+ if (!is_initialized())
+ return false;
+
+ if (reserved_bits())
+ return false;
+
+ if (file_type() != BLOCK_ENTRIES && file_type() != BLOCK_EVICTED)
+ return false;
+
+ if (num_blocks() != 1)
+ return false;
+
+ return true;
+}
+
+bool Addr::SanityCheckForRankings() const {
+ if (!SanityCheckV2() || !is_initialized())
+ return false;
+
+ if (is_separate_file() || file_type() != RANKINGS || num_blocks() != 1)
+ return false;
+
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/addr.h b/chromium/net/disk_cache/addr.h
new file mode 100644
index 00000000000..f0fb1ca5701
--- /dev/null
+++ b/chromium/net/disk_cache/addr.h
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is an internal class that handles the address of a cache record.
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_ADDR_H_
+#define NET_DISK_CACHE_ADDR_H_
+
+#include "net/base/net_export.h"
+#include "net/disk_cache/disk_format_base.h"
+
+namespace disk_cache {
+
+enum FileType {
+ EXTERNAL = 0,
+ RANKINGS = 1,
+ BLOCK_256 = 2,
+ BLOCK_1K = 3,
+ BLOCK_4K = 4,
+ BLOCK_FILES = 5,
+ BLOCK_ENTRIES = 6,
+ BLOCK_EVICTED = 7
+};
+
+const int kMaxBlockSize = 4096 * 4;
+const int kMaxBlockFile = 255;
+const int kMaxNumBlocks = 4;
+const int kFirstAdditionalBlockFile = 4;
+const int kFirstAdditionalBlockFileV3 = 7;
+
+// Defines a storage address for a cache record
+//
+// Header:
+// 1000 0000 0000 0000 0000 0000 0000 0000 : initialized bit
+// 0111 0000 0000 0000 0000 0000 0000 0000 : file type
+//
+// File type values:
+// 0 = separate file on disk
+// 1 = rankings block file
+// 2 = 256 byte block file
+// 3 = 1k byte block file
+// 4 = 4k byte block file
+// 5 = external files block file
+// 6 = active entries block file
+// 7 = evicted entries block file
+//
+// If separate file:
+// 0000 1111 1111 1111 1111 1111 1111 1111 : file# 0 - 268,435,456 (2^28)
+//
+// If block file:
+// 0000 1100 0000 0000 0000 0000 0000 0000 : reserved bits
+// 0000 0011 0000 0000 0000 0000 0000 0000 : number of contiguous blocks 1-4
+// 0000 0000 1111 1111 0000 0000 0000 0000 : file selector 0 - 255
+// 0000 0000 0000 0000 1111 1111 1111 1111 : block# 0 - 65,535 (2^16)
+class NET_EXPORT_PRIVATE Addr {
+ public:
+ Addr() : value_(0) {}
+ explicit Addr(CacheAddr address) : value_(address) {}
+ Addr(FileType file_type, int max_blocks, int block_file, int index) {
+ value_ = ((file_type << kFileTypeOffset) & kFileTypeMask) |
+ (((max_blocks - 1) << kNumBlocksOffset) & kNumBlocksMask) |
+ ((block_file << kFileSelectorOffset) & kFileSelectorMask) |
+ (index & kStartBlockMask) | kInitializedMask;
+ }
+
+ CacheAddr value() const { return value_; }
+ void set_value(CacheAddr address) {
+ value_ = address;
+ }
+
+ bool is_initialized() const {
+ return (value_ & kInitializedMask) != 0;
+ }
+
+ bool is_separate_file() const {
+ return (value_ & kFileTypeMask) == 0;
+ }
+
+ bool is_block_file() const {
+ return !is_separate_file();
+ }
+
+ FileType file_type() const {
+ return static_cast<FileType>((value_ & kFileTypeMask) >> kFileTypeOffset);
+ }
+
+ int FileNumber() const {
+ if (is_separate_file())
+ return value_ & kFileNameMask;
+ else
+ return ((value_ & kFileSelectorMask) >> kFileSelectorOffset);
+ }
+
+ int start_block() const;
+ int num_blocks() const;
+ bool SetFileNumber(int file_number);
+ int BlockSize() const {
+ return BlockSizeForFileType(file_type());
+ }
+
+ bool operator==(Addr other) const {
+ return value_ == other.value_;
+ }
+
+ bool operator!=(Addr other) const {
+ return value_ != other.value_;
+ }
+
+ static Addr FromEntryAddress(uint32 value) {
+ return Addr(kInitializedMask + (BLOCK_ENTRIES << kFileTypeOffset) + value);
+ }
+
+ static Addr FromEvictedAddress(uint32 value) {
+ return Addr(kInitializedMask + (BLOCK_EVICTED << kFileTypeOffset) + value);
+ }
+
+ static int BlockSizeForFileType(FileType file_type) {
+ switch (file_type) {
+ case RANKINGS:
+ return 36;
+ case BLOCK_256:
+ return 256;
+ case BLOCK_1K:
+ return 1024;
+ case BLOCK_4K:
+ return 4096;
+ case BLOCK_FILES:
+ return 8;
+ case BLOCK_ENTRIES:
+ return 104;
+ case BLOCK_EVICTED:
+ return 48;
+ default:
+ return 0;
+ }
+ }
+
+ static FileType RequiredFileType(int size) {
+ if (size < 1024)
+ return BLOCK_256;
+ else if (size < 4096)
+ return BLOCK_1K;
+ else if (size <= 4096 * 4)
+ return BLOCK_4K;
+ else
+ return EXTERNAL;
+ }
+
+ static int RequiredBlocks(int size, FileType file_type) {
+ int block_size = BlockSizeForFileType(file_type);
+ return (size + block_size - 1) / block_size;
+ }
+
+ // Returns true if this address looks like a valid one.
+ bool SanityCheckV2() const;
+ bool SanityCheckV3() const;
+ bool SanityCheckForEntryV2() const;
+ bool SanityCheckForEntryV3() const;
+ bool SanityCheckForRankings() const;
+
+ private:
+ uint32 reserved_bits() const {
+ return value_ & kReservedBitsMask;
+ }
+
+ static const uint32 kInitializedMask = 0x80000000;
+ static const uint32 kFileTypeMask = 0x70000000;
+ static const uint32 kFileTypeOffset = 28;
+ static const uint32 kReservedBitsMask = 0x0c000000;
+ static const uint32 kNumBlocksMask = 0x03000000;
+ static const uint32 kNumBlocksOffset = 24;
+ static const uint32 kFileSelectorMask = 0x00ff0000;
+ static const uint32 kFileSelectorOffset = 16;
+ static const uint32 kStartBlockMask = 0x0000FFFF;
+ static const uint32 kFileNameMask = 0x0FFFFFFF;
+
+ CacheAddr value_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ADDR_H_
diff --git a/chromium/net/disk_cache/addr_unittest.cc b/chromium/net/disk_cache/addr_unittest.cc
new file mode 100644
index 00000000000..a6da03cf777
--- /dev/null
+++ b/chromium/net/disk_cache/addr_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+TEST_F(DiskCacheTest, CacheAddr_Size) {
+ Addr addr1(0);
+ EXPECT_FALSE(addr1.is_initialized());
+
+ // The object should not be more expensive than the actual address.
+ EXPECT_EQ(sizeof(uint32), sizeof(addr1));
+}
+
+TEST_F(DiskCacheTest, CacheAddr_ValidValues) {
+ Addr addr2(BLOCK_1K, 3, 5, 25);
+ EXPECT_EQ(BLOCK_1K, addr2.file_type());
+ EXPECT_EQ(3, addr2.num_blocks());
+ EXPECT_EQ(5, addr2.FileNumber());
+ EXPECT_EQ(25, addr2.start_block());
+ EXPECT_EQ(1024, addr2.BlockSize());
+}
+
+TEST_F(DiskCacheTest, CacheAddr_InvalidValues) {
+ Addr addr3(BLOCK_4K, 0x44, 0x41508, 0x952536);
+ EXPECT_EQ(BLOCK_4K, addr3.file_type());
+ EXPECT_EQ(4, addr3.num_blocks());
+ EXPECT_EQ(8, addr3.FileNumber());
+ EXPECT_EQ(0x2536, addr3.start_block());
+ EXPECT_EQ(4096, addr3.BlockSize());
+}
+
+TEST_F(DiskCacheTest, CacheAddr_SanityCheck) {
+ // First a few valid values.
+ EXPECT_TRUE(Addr(0).SanityCheckV2());
+ EXPECT_TRUE(Addr(0x80001000).SanityCheckV2());
+ EXPECT_TRUE(Addr(0xC3FFFFFF).SanityCheckV2());
+ EXPECT_TRUE(Addr(0xC0FFFFFF).SanityCheckV2());
+ EXPECT_TRUE(Addr(0xD0001000).SanityCheckV3());
+
+ // Not initialized.
+ EXPECT_FALSE(Addr(0x20).SanityCheckV2());
+ EXPECT_FALSE(Addr(0x10001000).SanityCheckV2());
+
+ // Invalid file type.
+ EXPECT_FALSE(Addr(0xD0001000).SanityCheckV2());
+ EXPECT_FALSE(Addr(0xE0001000).SanityCheckV3());
+ EXPECT_FALSE(Addr(0xF0000000).SanityCheckV2());
+
+ // Reserved bits.
+ EXPECT_FALSE(Addr(0x14000000).SanityCheckV2());
+ EXPECT_FALSE(Addr(0x18000000).SanityCheckV2());
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/backend_impl.cc b/chromium/net/disk_cache/backend_impl.cc
new file mode 100644
index 00000000000..8d7fd461102
--- /dev/null
+++ b/chromium/net/disk_cache/backend_impl.cc
@@ -0,0 +1,2120 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/backend_impl.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/hash.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/stats_counters.h"
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/file.h"
+
+// This has to be defined before including histogram_macros.h from this file.
+#define NET_DISK_CACHE_BACKEND_IMPL_CC_
+#include "net/disk_cache/histogram_macros.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+const char* kIndexName = "index";
+
+// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
+// Note that the actual target is to keep the index table load factor under 55%
+// for most users.
+const int k64kEntriesStore = 240 * 1000 * 1000;
+const int kBaseTableLen = 64 * 1024;
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
+// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
+const int kTrimDelay = 10;
+
+int DesiredIndexTableLen(int32 storage_size) {
+ if (storage_size <= k64kEntriesStore)
+ return kBaseTableLen;
+ if (storage_size <= k64kEntriesStore * 2)
+ return kBaseTableLen * 2;
+ if (storage_size <= k64kEntriesStore * 4)
+ return kBaseTableLen * 4;
+ if (storage_size <= k64kEntriesStore * 8)
+ return kBaseTableLen * 8;
+
+ // The biggest storage_size for int32 requires a 4 MB table.
+ return kBaseTableLen * 16;
+}
+
+int MaxStorageSizeForTable(int table_len) {
+ return table_len * (k64kEntriesStore / kBaseTableLen);
+}
+
+size_t GetIndexSize(int table_len) {
+ size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
+ return sizeof(disk_cache::IndexHeader) + table_size;
+}
+
+// ------------------------------------------------------------------------
+
+// Sets group for the current experiment. Returns false if the files should be
+// discarded.
+bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
+ if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
+ header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
+ // Discard current cache.
+ return false;
+ }
+
+ if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
+ "ExperimentControl") {
+ if (cache_created) {
+ header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
+ return true;
+ }
+ return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
+ }
+
+ header->experiment = disk_cache::NO_EXPERIMENT;
+ return true;
+}
+
+// A callback to perform final cleanup on the background thread.
+void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
+ backend->CleanupCache();
+}
+
+} // namespace
+
+// ------------------------------------------------------------------------
+
+namespace disk_cache {
+
+// Returns the preferred maximum number of bytes for the cache given the
+// number of available bytes.
+int PreferedCacheSize(int64 available) {
+ // Return 80% of the available space if there is not enough space to use
+ // kDefaultCacheSize.
+ if (available < kDefaultCacheSize * 10 / 8)
+ return static_cast<int32>(available * 8 / 10);
+
+ // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
+ if (available < kDefaultCacheSize * 10)
+ return kDefaultCacheSize;
+
+ // Return 10% of the available space if the target size
+ // (2.5 * kDefaultCacheSize) is more than 10%.
+ if (available < static_cast<int64>(kDefaultCacheSize) * 25)
+ return static_cast<int32>(available / 10);
+
+ // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
+ // of the available space.
+ if (available < static_cast<int64>(kDefaultCacheSize) * 250)
+ return kDefaultCacheSize * 5 / 2;
+
+ // Return 1% of the available space if it does not exceed kint32max.
+ if (available < static_cast<int64>(kint32max) * 100)
+ return static_cast<int32>(available / 100);
+
+ return kint32max;
+}
+
+// ------------------------------------------------------------------------
+
+BackendImpl::BackendImpl(const base::FilePath& path,
+ base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log)
+ : background_queue_(this, cache_thread),
+ path_(path),
+ block_files_(path),
+ mask_(0),
+ max_size_(0),
+ up_ticks_(0),
+ cache_type_(net::DISK_CACHE),
+ uma_report_(0),
+ user_flags_(0),
+ init_(false),
+ restarted_(false),
+ unit_test_(false),
+ read_only_(false),
+ disabled_(false),
+ new_eviction_(false),
+ first_timer_(true),
+ user_load_(false),
+ net_log_(net_log),
+ done_(true, false),
+ ptr_factory_(this) {
+}
+
+BackendImpl::BackendImpl(const base::FilePath& path,
+ uint32 mask,
+ base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log)
+ : background_queue_(this, cache_thread),
+ path_(path),
+ block_files_(path),
+ mask_(mask),
+ max_size_(0),
+ up_ticks_(0),
+ cache_type_(net::DISK_CACHE),
+ uma_report_(0),
+ user_flags_(kMask),
+ init_(false),
+ restarted_(false),
+ unit_test_(false),
+ read_only_(false),
+ disabled_(false),
+ new_eviction_(false),
+ first_timer_(true),
+ user_load_(false),
+ net_log_(net_log),
+ done_(true, false),
+ ptr_factory_(this) {
+}
+
+BackendImpl::~BackendImpl() {
+ if (user_flags_ & kNoRandom) {
+ // This is a unit test, so we want to be strict about not leaking entries
+ // and completing all the work.
+ background_queue_.WaitForPendingIO();
+ } else {
+ // This is most likely not a test, so we want to do as little work as
+ // possible at this time, at the price of leaving dirty entries behind.
+ background_queue_.DropPendingIO();
+ }
+
+ if (background_queue_.BackgroundIsCurrentThread()) {
+ // Unit tests may use the same thread for everything.
+ CleanupCache();
+ } else {
+ background_queue_.background_thread()->PostTask(
+ FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
+ // http://crbug.com/74623
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ done_.Wait();
+ }
+}
+
+int BackendImpl::Init(const CompletionCallback& callback) {
+ background_queue_.Init(callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::SyncInit() {
+#if defined(NET_BUILD_STRESS_CACHE)
+ // Start evictions right away.
+ up_ticks_ = kTrimDelay * 2;
+#endif
+ DCHECK(!init_);
+ if (init_)
+ return net::ERR_FAILED;
+
+ bool create_files = false;
+ if (!InitBackingStore(&create_files)) {
+ ReportError(ERR_STORAGE_ERROR);
+ return net::ERR_FAILED;
+ }
+
+ num_refs_ = num_pending_io_ = max_refs_ = 0;
+ entry_count_ = byte_count_ = 0;
+
+ if (!restarted_) {
+ buffer_bytes_ = 0;
+ trace_object_ = TraceObject::GetTraceObject();
+ // Create a recurrent timer of 30 secs.
+ int timer_delay = unit_test_ ? 1000 : 30000;
+ timer_.reset(new base::RepeatingTimer<BackendImpl>());
+ timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
+ &BackendImpl::OnStatsTimer);
+ }
+
+ init_ = true;
+ Trace("Init");
+
+ if (data_->header.experiment != NO_EXPERIMENT &&
+ cache_type_ != net::DISK_CACHE) {
+ // No experiment for other caches.
+ return net::ERR_FAILED;
+ }
+
+ if (!(user_flags_ & kNoRandom)) {
+ // The unit test controls directly what to test.
+ new_eviction_ = (cache_type_ == net::DISK_CACHE);
+ }
+
+ if (!CheckIndex()) {
+ ReportError(ERR_INIT_FAILED);
+ return net::ERR_FAILED;
+ }
+
+ if (!restarted_ && (create_files || !data_->header.num_entries))
+ ReportError(ERR_CACHE_CREATED);
+
+ if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
+ !InitExperiment(&data_->header, create_files)) {
+ return net::ERR_FAILED;
+ }
+
+ // We don't care if the value overflows. The only thing we care about is that
+ // the id cannot be zero, because that value is used as "not dirty".
+ // Increasing the value once per second gives us many years before we start
+ // having collisions.
+ data_->header.this_id++;
+ if (!data_->header.this_id)
+ data_->header.this_id++;
+
+ bool previous_crash = (data_->header.crash != 0);
+ data_->header.crash = 1;
+
+ if (!block_files_.Init(create_files))
+ return net::ERR_FAILED;
+
+ // We want to minimize the changes to cache for an AppCache.
+ if (cache_type() == net::APP_CACHE) {
+ DCHECK(!new_eviction_);
+ read_only_ = true;
+ } else if (cache_type() == net::SHADER_CACHE) {
+ DCHECK(!new_eviction_);
+ }
+
+ eviction_.Init(this);
+
+ // stats_ and rankings_ may end up calling back to us so we better be enabled.
+ disabled_ = false;
+ if (!InitStats())
+ return net::ERR_FAILED;
+
+ disabled_ = !rankings_.Init(this, new_eviction_);
+
+#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
+ trace_object_->EnableTracing(false);
+ int sc = SelfCheck();
+ if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
+ NOTREACHED();
+ trace_object_->EnableTracing(true);
+#endif
+
+ if (previous_crash) {
+ ReportError(ERR_PREVIOUS_CRASH);
+ } else if (!restarted_) {
+ ReportError(ERR_NO_ERROR);
+ }
+
+ FlushIndex();
+
+ return disabled_ ? net::ERR_FAILED : net::OK;
+}
+
+void BackendImpl::CleanupCache() {
+ Trace("Backend Cleanup");
+ eviction_.Stop();
+ timer_.reset();
+
+ if (init_) {
+ StoreStats();
+ if (data_)
+ data_->header.crash = 0;
+
+ if (user_flags_ & kNoRandom) {
+ // This is a net_unittest, verify that we are not 'leaking' entries.
+ File::WaitForPendingIO(&num_pending_io_);
+ DCHECK(!num_refs_);
+ } else {
+ File::DropPendingIO();
+ }
+ }
+ block_files_.CloseFiles();
+ FlushIndex();
+ index_ = NULL;
+ ptr_factory_.InvalidateWeakPtrs();
+ done_.Signal();
+}
+
+// ------------------------------------------------------------------------
+
+int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.OpenPrevEntry(iter, prev_entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
+ DCHECK(entry);
+ *entry = OpenEntryImpl(key);
+ return (*entry) ? net::OK : net::ERR_FAILED;
+}
+
+int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
+ DCHECK(entry);
+ *entry = CreateEntryImpl(key);
+ return (*entry) ? net::OK : net::ERR_FAILED;
+}
+
+int BackendImpl::SyncDoomEntry(const std::string& key) {
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ EntryImpl* entry = OpenEntryImpl(key);
+ if (!entry)
+ return net::ERR_FAILED;
+
+ entry->DoomImpl();
+ entry->Release();
+ return net::OK;
+}
+
+int BackendImpl::SyncDoomAllEntries() {
+ // This is not really an error, but it is an interesting condition.
+ ReportError(ERR_CACHE_DOOMED);
+ stats_.OnEvent(Stats::DOOM_CACHE);
+ if (!num_refs_) {
+ RestartCache(false);
+ return disabled_ ? net::ERR_FAILED : net::OK;
+ } else {
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ eviction_.TrimCache(true);
+ return net::OK;
+ }
+}
+
+int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time) {
+ DCHECK_NE(net::APP_CACHE, cache_type_);
+ if (end_time.is_null())
+ return SyncDoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ EntryImpl* node;
+ void* iter = NULL;
+ EntryImpl* next = OpenNextEntryImpl(&iter);
+ if (!next)
+ return net::OK;
+
+ while (next) {
+ node = next;
+ next = OpenNextEntryImpl(&iter);
+
+ if (node->GetLastUsed() >= initial_time &&
+ node->GetLastUsed() < end_time) {
+ node->DoomImpl();
+ } else if (node->GetLastUsed() < initial_time) {
+ if (next)
+ next->Release();
+ next = NULL;
+ SyncEndEnumeration(iter);
+ }
+
+ node->Release();
+ }
+
+ return net::OK;
+}
+
+// We use OpenNextEntryImpl to retrieve elements from the cache, until we get
+// entries that are too old.
+int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
+ DCHECK_NE(net::APP_CACHE, cache_type_);
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ stats_.OnEvent(Stats::DOOM_RECENT);
+ for (;;) {
+ void* iter = NULL;
+ EntryImpl* entry = OpenNextEntryImpl(&iter);
+ if (!entry)
+ return net::OK;
+
+ if (initial_time > entry->GetLastUsed()) {
+ entry->Release();
+ SyncEndEnumeration(iter);
+ return net::OK;
+ }
+
+ entry->DoomImpl();
+ entry->Release();
+ SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
+ }
+}
+
+int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
+ *next_entry = OpenNextEntryImpl(iter);
+ return (*next_entry) ? net::OK : net::ERR_FAILED;
+}
+
+int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
+ *prev_entry = OpenPrevEntryImpl(iter);
+ return (*prev_entry) ? net::OK : net::ERR_FAILED;
+}
+
+void BackendImpl::SyncEndEnumeration(void* iter) {
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(iter));
+}
+
+void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
+ if (disabled_)
+ return;
+
+ uint32 hash = base::Hash(key);
+ bool error;
+ EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (cache_entry) {
+ if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
+ UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
+ }
+ cache_entry->Release();
+ }
+}
+
+EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
+ if (disabled_)
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ uint32 hash = base::Hash(key);
+ Trace("Open hash 0x%x", hash);
+
+ bool error;
+ EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
+ // The entry was already evicted.
+ cache_entry->Release();
+ cache_entry = NULL;
+ }
+
+ int current_size = data_->header.num_bytes / (1024 * 1024);
+ int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
+ int64 use_hours = total_hours - no_use_hours;
+
+ if (!cache_entry) {
+ CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
+ CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
+ CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
+ CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return NULL;
+ }
+
+ eviction_.OnOpenEntry(cache_entry);
+ entry_count_++;
+
+ Trace("Open hash 0x%x end: 0x%x", hash,
+ cache_entry->entry()->address().value());
+ CACHE_UMA(AGE_MS, "OpenTime", 0, start);
+ CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
+ CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
+ CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
+ stats_.OnEvent(Stats::OPEN_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.hit");
+ return cache_entry;
+}
+
+EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
+ if (disabled_ || key.empty())
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ uint32 hash = base::Hash(key);
+ Trace("Create hash 0x%x", hash);
+
+ scoped_refptr<EntryImpl> parent;
+ Addr entry_address(data_->table[hash & mask_]);
+ if (entry_address.is_initialized()) {
+ // We have an entry already. It could be the one we are looking for, or just
+ // a hash conflict.
+ bool error;
+ EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (old_entry)
+ return ResurrectEntry(old_entry);
+
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
+ DCHECK(!error);
+ if (parent_entry) {
+ parent.swap(&parent_entry);
+ } else if (data_->table[hash & mask_]) {
+ // We should have corrected the problem.
+ NOTREACHED();
+ return NULL;
+ }
+ }
+
+ // The general flow is to allocate disk space and initialize the entry data,
+ // followed by saving that to disk, then linking the entry though the index
+ // and finally through the lists. If there is a crash in this process, we may
+ // end up with:
+ // a. Used, unreferenced empty blocks on disk (basically just garbage).
+ // b. Used, unreferenced but meaningful data on disk (more garbage).
+ // c. A fully formed entry, reachable only through the index.
+ // d. A fully formed entry, also reachable through the lists, but still dirty.
+ //
+ // Anything after (b) can be automatically cleaned up. We may consider saving
+ // the current operation (as we do while manipulating the lists) so that we
+ // can detect and cleanup (a) and (b).
+
+ int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
+ if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ Addr node_address(0);
+ if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
+ block_files_.DeleteBlock(entry_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(
+ new EntryImpl(this, entry_address, false));
+ IncreaseNumRefs();
+
+ if (!cache_entry->CreateEntry(node_address, key, hash)) {
+ block_files_.DeleteBlock(entry_address, false);
+ block_files_.DeleteBlock(node_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ cache_entry->BeginLogging(net_log_, true);
+
+ // We are not failing the operation; let's add this to the map.
+ open_entries_[entry_address.value()] = cache_entry.get();
+
+ // Save the entry.
+ cache_entry->entry()->Store();
+ cache_entry->rankings()->Store();
+ IncreaseNumEntries();
+ entry_count_++;
+
+ // Link this entry through the index.
+ if (parent.get()) {
+ parent->SetNextAddress(entry_address);
+ } else {
+ data_->table[hash & mask_] = entry_address.value();
+ }
+
+ // Link this entry through the lists.
+ eviction_.OnCreateEntry(cache_entry.get());
+
+ CACHE_UMA(AGE_MS, "CreateTime", 0, start);
+ stats_.OnEvent(Stats::CREATE_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.miss");
+ Trace("create entry hit ");
+ FlushIndex();
+ cache_entry->AddRef();
+ return cache_entry.get();
+}
+
+EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
+ return OpenFollowingEntry(true, iter);
+}
+
+EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
+ return OpenFollowingEntry(false, iter);
+}
+
+bool BackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ // Avoid a DCHECK later on.
+ if (max_bytes >= kint32max - kint32max / 10)
+ max_bytes = kint32max - kint32max / 10 - 1;
+
+ user_flags_ |= kMaxSize;
+ max_size_ = max_bytes;
+ return true;
+}
+
+void BackendImpl::SetType(net::CacheType type) {
+ DCHECK_NE(net::MEMORY_CACHE, type);
+ cache_type_ = type;
+}
+
+base::FilePath BackendImpl::GetFileName(Addr address) const {
+ if (!address.is_separate_file() || !address.is_initialized()) {
+ NOTREACHED();
+ return base::FilePath();
+ }
+
+ std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
+ return path_.AppendASCII(tmp);
+}
+
+MappedFile* BackendImpl::File(Addr address) {
+ if (disabled_)
+ return NULL;
+ return block_files_.GetFile(address);
+}
+
+base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
+ return background_queue_.GetWeakPtr();
+}
+
+bool BackendImpl::CreateExternalFile(Addr* address) {
+ int file_number = data_->header.last_file + 1;
+ Addr file_address(0);
+ bool success = false;
+ for (int i = 0; i < 0x0fffffff; i++, file_number++) {
+ if (!file_address.SetFileNumber(file_number)) {
+ file_number = 1;
+ continue;
+ }
+ base::FilePath name = GetFileName(file_address);
+ int flags = base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_CREATE |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+ base::PlatformFileError error;
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(name, flags, NULL, &error)));
+ if (!file->IsValid()) {
+ if (error != base::PLATFORM_FILE_ERROR_EXISTS) {
+ LOG(ERROR) << "Unable to create file: " << error;
+ return false;
+ }
+ continue;
+ }
+
+ success = true;
+ break;
+ }
+
+ DCHECK(success);
+ if (!success)
+ return false;
+
+ data_->header.last_file = file_number;
+ address->set_value(file_address.value());
+ return true;
+}
+
+bool BackendImpl::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ return block_files_.CreateBlock(block_type, block_count, block_address);
+}
+
+void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
+ block_files_.DeleteBlock(block_address, deep);
+}
+
+LruData* BackendImpl::GetLruData() {
+ return &data_->header.lru;
+}
+
+void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
+ if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
+ return;
+ eviction_.UpdateRank(entry, modified);
+}
+
+void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
+ Addr address(rankings->Data()->contents);
+ EntryImpl* cache_entry = NULL;
+ if (NewEntry(address, &cache_entry)) {
+ STRESS_NOTREACHED();
+ return;
+ }
+
+ uint32 hash = cache_entry->GetHash();
+ cache_entry->Release();
+
+ // Anything on the table means that this entry is there.
+ if (data_->table[hash & mask_])
+ return;
+
+ data_->table[hash & mask_] = address.value();
+ FlushIndex();
+}
+
+void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
+ uint32 hash = entry->GetHash();
+ std::string key = entry->GetKey();
+ Addr entry_addr = entry->entry()->address();
+ bool error;
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
+ CacheAddr child(entry->GetNextAddress());
+
+ Trace("Doom entry 0x%p", entry);
+
+ if (!entry->doomed()) {
+ // We may have doomed this entry from within MatchEntry.
+ eviction_.OnDoomEntry(entry);
+ entry->InternalDoom();
+ if (!new_eviction_) {
+ DecreaseNumEntries();
+ }
+ stats_.OnEvent(Stats::DOOM_ENTRY);
+ }
+
+ if (parent_entry) {
+ parent_entry->SetNextAddress(Addr(child));
+ parent_entry->Release();
+ } else if (!error) {
+ data_->table[hash & mask_] = child;
+ }
+
+ FlushIndex();
+}
+
+#if defined(NET_BUILD_STRESS_CACHE)
+
+CacheAddr BackendImpl::GetNextAddr(Addr address) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end()) {
+ EntryImpl* this_entry = it->second;
+ return this_entry->GetNextAddress();
+ }
+ DCHECK(block_files_.IsValid(address));
+ DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
+
+ CacheEntryBlock entry(File(address), address);
+ CHECK(entry.Load());
+ return entry.Data()->next;
+}
+
+void BackendImpl::NotLinked(EntryImpl* entry) {
+ Addr entry_addr = entry->entry()->address();
+ uint32 i = entry->GetHash() & mask_;
+ Addr address(data_->table[i]);
+ if (!address.is_initialized())
+ return;
+
+ for (;;) {
+ DCHECK(entry_addr.value() != address.value());
+ address.set_value(GetNextAddr(address));
+ if (!address.is_initialized())
+ break;
+ }
+}
+#endif // NET_BUILD_STRESS_CACHE
+
+// An entry may be linked on the DELETED list for a while after being doomed.
+// This function is called when we want to remove it.
+void BackendImpl::RemoveEntry(EntryImpl* entry) {
+#if defined(NET_BUILD_STRESS_CACHE)
+ NotLinked(entry);
+#endif
+ if (!new_eviction_)
+ return;
+
+ DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
+
+ Trace("Remove entry 0x%p", entry);
+ eviction_.OnDestroyEntry(entry);
+ DecreaseNumEntries();
+}
+
+void BackendImpl::OnEntryDestroyBegin(Addr address) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end())
+ open_entries_.erase(it);
+}
+
+void BackendImpl::OnEntryDestroyEnd() {
+ DecreaseNumRefs();
+ if (data_->header.num_bytes > max_size_ && !read_only_ &&
+ (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
+ eviction_.TrimCache(false);
+}
+
+EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
+ DCHECK(rankings->HasData());
+ EntriesMap::const_iterator it =
+ open_entries_.find(rankings->Data()->contents);
+ if (it != open_entries_.end()) {
+ // We have this entry in memory.
+ return it->second;
+ }
+
+ return NULL;
+}
+
+int32 BackendImpl::GetCurrentEntryId() const {
+ return data_->header.this_id;
+}
+
+int BackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (disabled_ || old_size == new_size)
+ return;
+ if (old_size > new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+
+ FlushIndex();
+
+ // Update the usage statistics.
+ stats_.ModifyStorageStats(old_size, new_size);
+}
+
+void BackendImpl::TooMuchStorageRequested(int32 size) {
+ stats_.ModifyStorageStats(0, size);
+}
+
+bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
+ DCHECK_GT(new_size, current_size);
+ if (user_flags_ & kNoBuffering)
+ return false;
+
+ int to_add = new_size - current_size;
+ if (buffer_bytes_ + to_add > MaxBuffersSize())
+ return false;
+
+ buffer_bytes_ += to_add;
+ CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
+ return true;
+}
+
+void BackendImpl::BufferDeleted(int size) {
+ buffer_bytes_ -= size;
+ DCHECK_GE(size, 0);
+}
+
+bool BackendImpl::IsLoaded() const {
+ CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
+ if (user_flags_ & kNoLoadProtection)
+ return false;
+
+ return (num_pending_io_ > 5 || user_load_);
+}
+
+std::string BackendImpl::HistogramName(const char* name, int experiment) const {
+ if (!experiment)
+ return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
+ return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
+ name, experiment);
+}
+
+base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
+ return ptr_factory_.GetWeakPtr();
+}
+
+// We want to remove biases from some histograms so we only send data once per
+// week.
+bool BackendImpl::ShouldReportAgain() {
+ if (uma_report_)
+ return uma_report_ == 2;
+
+ uma_report_++;
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+ Time last_time = Time::FromInternalValue(last_report);
+ if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
+ stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
+ uma_report_++;
+ return true;
+ }
+ return false;
+}
+
+void BackendImpl::FirstEviction() {
+ DCHECK(data_->header.create_time);
+ if (!GetEntryCount())
+ return; // This is just for unit tests.
+
+ Time create_time = Time::FromInternalValue(data_->header.create_time);
+ CACHE_UMA(AGE, "FillupAge", 0, create_time);
+
+ int64 use_time = stats_.GetCounter(Stats::TIMER);
+ CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
+ CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
+
+ if (!use_time)
+ use_time = 1;
+ CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
+ static_cast<int>(data_->header.num_entries / use_time));
+ CACHE_UMA(COUNTS, "FirstByteIORate", 0,
+ static_cast<int>((data_->header.num_bytes / 1024) / use_time));
+
+ int avg_size = data_->header.num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
+ CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
+
+ if (new_eviction_) {
+ CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
+ data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
+ data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
+ data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
+ }
+
+ stats_.ResetRatios();
+}
+
+void BackendImpl::CriticalError(int error) {
+ STRESS_NOTREACHED();
+ LOG(ERROR) << "Critical error found " << error;
+ if (disabled_)
+ return;
+
+ stats_.OnEvent(Stats::FATAL_ERROR);
+ LogStats();
+ ReportError(error);
+
+ // Setting the index table length to an invalid value will force re-creation
+ // of the cache files.
+ data_->header.table_len = 1;
+ disabled_ = true;
+
+ if (!num_refs_)
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
+}
+
+void BackendImpl::ReportError(int error) {
+ STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
+ error == ERR_CACHE_CREATED);
+
+ // We transmit positive numbers, instead of direct error codes.
+ DCHECK_LE(error, 0);
+ CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
+}
+
+void BackendImpl::OnEvent(Stats::Counters an_event) {
+ stats_.OnEvent(an_event);
+}
+
+void BackendImpl::OnRead(int32 bytes) {
+ DCHECK_GE(bytes, 0);
+ byte_count_ += bytes;
+ if (byte_count_ < 0)
+ byte_count_ = kint32max;
+}
+
+void BackendImpl::OnWrite(int32 bytes) {
+ // We use the same implementation as OnRead... just log the number of bytes.
+ OnRead(bytes);
+}
+
+void BackendImpl::OnStatsTimer() {
+ stats_.OnEvent(Stats::TIMER);
+ int64 time = stats_.GetCounter(Stats::TIMER);
+ int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
+
+ // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
+ // the bias towards 0.
+ if (num_refs_ && (current != num_refs_)) {
+ int64 diff = (num_refs_ - current) / 50;
+ if (!diff)
+ diff = num_refs_ > current ? 1 : -1;
+ current = current + diff;
+ stats_.SetCounter(Stats::OPEN_ENTRIES, current);
+ stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
+ }
+
+ CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
+
+ CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
+ CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
+
+ // These values cover about 99.5% of the population (Oct 2011).
+ user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
+ entry_count_ = 0;
+ byte_count_ = 0;
+ up_ticks_++;
+
+ if (!data_)
+ first_timer_ = false;
+ if (first_timer_) {
+ first_timer_ = false;
+ if (ShouldReportAgain())
+ ReportStats();
+ }
+
+ // Save stats to disk at 5 min intervals.
+ if (time % 10 == 0)
+ StoreStats();
+}
+
+void BackendImpl::IncrementIoCount() {
+ num_pending_io_++;
+}
+
+void BackendImpl::DecrementIoCount() {
+ num_pending_io_--;
+}
+
+void BackendImpl::SetUnitTestMode() {
+ user_flags_ |= kUnitTestMode;
+ unit_test_ = true;
+}
+
+void BackendImpl::SetUpgradeMode() {
+ user_flags_ |= kUpgradeMode;
+ read_only_ = true;
+}
+
+void BackendImpl::SetNewEviction() {
+ user_flags_ |= kNewEviction;
+ new_eviction_ = true;
+}
+
+void BackendImpl::SetFlags(uint32 flags) {
+ user_flags_ |= flags;
+}
+
+void BackendImpl::ClearRefCountForTest() {
+ num_refs_ = 0;
+}
+
+int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
+ background_queue_.FlushQueue(callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::RunTaskForTest(const base::Closure& task,
+ const CompletionCallback& callback) {
+ background_queue_.RunTask(task, callback);
+ return net::ERR_IO_PENDING;
+}
+
+void BackendImpl::TrimForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimCache(empty);
+}
+
+void BackendImpl::TrimDeletedListForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimDeletedList(empty);
+}
+
+int BackendImpl::SelfCheck() {
+ if (!init_) {
+ LOG(ERROR) << "Init failed";
+ return ERR_INIT_FAILED;
+ }
+
+ int num_entries = rankings_.SelfCheck();
+ if (num_entries < 0) {
+ LOG(ERROR) << "Invalid rankings list, error " << num_entries;
+#if !defined(NET_BUILD_STRESS_CACHE)
+ return num_entries;
+#endif
+ }
+
+ if (num_entries != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+#if !defined(NET_BUILD_STRESS_CACHE)
+ return ERR_NUM_ENTRIES_MISMATCH;
+#endif
+ }
+
+ return CheckAllEntries();
+}
+
+void BackendImpl::FlushIndex() {
+ if (index_.get() && !disabled_)
+ index_->Flush();
+}
+
+// ------------------------------------------------------------------------
+
+net::CacheType BackendImpl::GetCacheType() const {
+ return cache_type_;
+}
+
+int32 BackendImpl::GetEntryCount() const {
+ if (!index_.get() || disabled_)
+ return 0;
+ // num_entries includes entries already evicted.
+ int32 not_deleted = data_->header.num_entries -
+ data_->header.lru.sizes[Rankings::DELETED];
+
+ if (not_deleted < 0) {
+ NOTREACHED();
+ not_deleted = 0;
+ }
+
+ return not_deleted;
+}
+
+int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.OpenEntry(key, entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.CreateEntry(key, entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomEntry(const std::string& key,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.DoomEntry(key, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.DoomAllEntries(callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomEntriesSince(const base::Time initial_time,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.DoomEntriesSince(initial_time, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.OpenNextEntry(iter, next_entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+void BackendImpl::EndEnumeration(void** iter) {
+ background_queue_.EndEnumeration(*iter);
+ *iter = NULL;
+}
+
+void BackendImpl::GetStats(StatsItems* stats) {
+ if (disabled_)
+ return;
+
+ std::pair<std::string, std::string> item;
+
+ item.first = "Entries";
+ item.second = base::StringPrintf("%d", data_->header.num_entries);
+ stats->push_back(item);
+
+ item.first = "Pending IO";
+ item.second = base::StringPrintf("%d", num_pending_io_);
+ stats->push_back(item);
+
+ item.first = "Max size";
+ item.second = base::StringPrintf("%d", max_size_);
+ stats->push_back(item);
+
+ item.first = "Current size";
+ item.second = base::StringPrintf("%d", data_->header.num_bytes);
+ stats->push_back(item);
+
+ item.first = "Cache type";
+ item.second = "Blockfile Cache";
+ stats->push_back(item);
+
+ stats_.GetItems(stats);
+}
+
+void BackendImpl::OnExternalCacheHit(const std::string& key) {
+ background_queue_.OnExternalCacheHit(key);
+}
+
+// ------------------------------------------------------------------------
+
+// We just created a new file so we're going to write the header and set the
+// file length to include the hash table (zero filled).
+bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
+ AdjustMaxCacheSize(0);
+
+ IndexHeader header;
+ header.table_len = DesiredIndexTableLen(max_size_);
+
+ // We need file version 2.1 for the new eviction algorithm.
+ if (new_eviction_)
+ header.version = 0x20001;
+
+ header.create_time = Time::Now().ToInternalValue();
+
+ if (!file->Write(&header, sizeof(header), 0))
+ return false;
+
+ return file->SetLength(GetIndexSize(header.table_len));
+}
+
+bool BackendImpl::InitBackingStore(bool* file_created) {
+ if (!file_util::CreateDirectory(path_))
+ return false;
+
+ base::FilePath index_name = path_.AppendASCII(kIndexName);
+
+ int flags = base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_OPEN_ALWAYS |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(index_name, flags, file_created, NULL)));
+
+ if (!file->IsValid())
+ return false;
+
+ bool ret = true;
+ if (*file_created)
+ ret = CreateBackingStore(file.get());
+
+ file = NULL;
+ if (!ret)
+ return false;
+
+ index_ = new MappedFile();
+ data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
+ if (!data_) {
+ LOG(ERROR) << "Unable to map Index file";
+ return false;
+ }
+
+ if (index_->GetLength() < sizeof(Index)) {
+ // We verify this again on CheckIndex() but it's easier to make sure now
+ // that the header is there.
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ return true;
+}
+
+// The maximum cache size will be either set explicitly by the caller, or
+// calculated by this code.
+void BackendImpl::AdjustMaxCacheSize(int table_len) {
+ if (max_size_)
+ return;
+
+ // If table_len is provided, the index file exists.
+ DCHECK(!table_len || data_->header.magic);
+
+ // The user is not setting the size, let's figure it out.
+ int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
+ if (available < 0) {
+ max_size_ = kDefaultCacheSize;
+ return;
+ }
+
+ if (table_len)
+ available += data_->header.num_bytes;
+
+ max_size_ = PreferedCacheSize(available);
+
+ // Let's not use more than the default size while we tune-up the performance
+ // of bigger caches. TODO(rvargas): remove this limit.
+ if (max_size_ > kDefaultCacheSize * 4)
+ max_size_ = kDefaultCacheSize * 4;
+
+ if (!table_len)
+ return;
+
+ // If we already have a table, adjust the size to it.
+ int current_max_size = MaxStorageSizeForTable(table_len);
+ if (max_size_ > current_max_size)
+ max_size_= current_max_size;
+}
+
+bool BackendImpl::InitStats() {
+ Addr address(data_->header.stats);
+ int size = stats_.StorageSize();
+
+ if (!address.is_initialized()) {
+ FileType file_type = Addr::RequiredFileType(size);
+ DCHECK_NE(file_type, EXTERNAL);
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!CreateBlock(file_type, num_blocks, &address))
+ return false;
+
+ data_->header.stats = address.value();
+ return stats_.Init(NULL, 0, address);
+ }
+
+ if (!address.is_block_file()) {
+ NOTREACHED();
+ return false;
+ }
+
+ // Load the required data.
+ size = address.num_blocks() * address.BlockSize();
+ MappedFile* file = File(address);
+ if (!file)
+ return false;
+
+ scoped_ptr<char[]> data(new char[size]);
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (!file->Read(data.get(), size, offset))
+ return false;
+
+ if (!stats_.Init(data.get(), size, address))
+ return false;
+ if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
+ stats_.InitSizeHistogram();
+ return true;
+}
+
+void BackendImpl::StoreStats() {
+ int size = stats_.StorageSize();
+ scoped_ptr<char[]> data(new char[size]);
+ Addr address;
+ size = stats_.SerializeStats(data.get(), size, &address);
+ DCHECK(size);
+ if (!address.is_initialized())
+ return;
+
+ MappedFile* file = File(address);
+ if (!file)
+ return;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ file->Write(data.get(), size, offset); // ignore result.
+}
+
+void BackendImpl::RestartCache(bool failure) {
+ int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
+ int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
+ int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+
+ PrepareForRestart();
+ if (failure) {
+ DCHECK(!num_refs_);
+ DCHECK(!open_entries_.size());
+ DelayedCacheCleanup(path_);
+ } else {
+ DeleteCache(path_, false);
+ }
+
+ // Don't call Init() if directed by the unit test: we are simulating a failure
+ // trying to re-enable the cache.
+ if (unit_test_)
+ init_ = true; // Let the destructor do proper cleanup.
+ else if (SyncInit() == net::OK) {
+ stats_.SetCounter(Stats::FATAL_ERROR, errors);
+ stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
+ stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
+ stats_.SetCounter(Stats::LAST_REPORT, last_report);
+ }
+}
+
+void BackendImpl::PrepareForRestart() {
+ // Reset the mask_ if it was not given by the user.
+ if (!(user_flags_ & kMask))
+ mask_ = 0;
+
+ if (!(user_flags_ & kNewEviction))
+ new_eviction_ = false;
+
+ disabled_ = true;
+ data_->header.crash = 0;
+ index_->Flush();
+ index_ = NULL;
+ data_ = NULL;
+ block_files_.CloseFiles();
+ rankings_.Reset();
+ init_ = false;
+ restarted_ = true;
+}
+
+int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end()) {
+ // Easy job. This entry is already in memory.
+ EntryImpl* this_entry = it->second;
+ this_entry->AddRef();
+ *entry = this_entry;
+ return 0;
+ }
+
+ STRESS_DCHECK(block_files_.IsValid(address));
+
+ if (!address.SanityCheckForEntryV2()) {
+ LOG(WARNING) << "Wrong entry address.";
+ STRESS_NOTREACHED();
+ return ERR_INVALID_ADDRESS;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(
+ new EntryImpl(this, address, read_only_));
+ IncreaseNumRefs();
+ *entry = NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ if (!cache_entry->entry()->Load())
+ return ERR_READ_FAILURE;
+
+ if (IsLoaded()) {
+ CACHE_UMA(AGE_MS, "LoadTime", 0, start);
+ }
+
+ if (!cache_entry->SanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ STRESS_NOTREACHED();
+ return ERR_INVALID_ENTRY;
+ }
+
+ STRESS_DCHECK(block_files_.IsValid(
+ Addr(cache_entry->entry()->Data()->rankings_node)));
+
+ if (!cache_entry->LoadNodeAddress())
+ return ERR_READ_FAILURE;
+
+ if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
+ STRESS_NOTREACHED();
+ cache_entry->SetDirtyFlag(0);
+ // Don't remove this from the list (it is not linked properly). Instead,
+ // break the link back to the entry because it is going away, and leave the
+ // rankings node to be deleted if we find it through a list.
+ rankings_.SetContents(cache_entry->rankings(), 0);
+ } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
+ STRESS_NOTREACHED();
+ cache_entry->SetDirtyFlag(0);
+ rankings_.SetContents(cache_entry->rankings(), address.value());
+ }
+
+ if (!cache_entry->DataSanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ cache_entry->SetDirtyFlag(0);
+ cache_entry->FixForDelete();
+ }
+
+ // Prevent overwriting the dirty flag on the destructor.
+ cache_entry->SetDirtyFlag(GetCurrentEntryId());
+
+ if (cache_entry->dirty()) {
+ Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
+ address.value());
+ }
+
+ open_entries_[address.value()] = cache_entry.get();
+
+ cache_entry->BeginLogging(net_log_, false);
+ cache_entry.swap(entry);
+ return 0;
+}
+
+EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
+ bool find_parent, Addr entry_addr,
+ bool* match_error) {
+ Addr address(data_->table[hash & mask_]);
+ scoped_refptr<EntryImpl> cache_entry, parent_entry;
+ EntryImpl* tmp = NULL;
+ bool found = false;
+ std::set<CacheAddr> visited;
+ *match_error = false;
+
+ for (;;) {
+ if (disabled_)
+ break;
+
+ if (visited.find(address.value()) != visited.end()) {
+ // It's possible for a buggy version of the code to write a loop. Just
+ // break it.
+ Trace("Hash collision loop 0x%x", address.value());
+ address.set_value(0);
+ parent_entry->SetNextAddress(address);
+ }
+ visited.insert(address.value());
+
+ if (!address.is_initialized()) {
+ if (find_parent)
+ found = true;
+ break;
+ }
+
+ int error = NewEntry(address, &tmp);
+ cache_entry.swap(&tmp);
+
+ if (error || cache_entry->dirty()) {
+ // This entry is dirty on disk (it was not properly closed): we cannot
+ // trust it.
+ Addr child(0);
+ if (!error)
+ child.set_value(cache_entry->GetNextAddress());
+
+ if (parent_entry.get()) {
+ parent_entry->SetNextAddress(child);
+ parent_entry = NULL;
+ } else {
+ data_->table[hash & mask_] = child.value();
+ }
+
+ Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
+ address.value());
+
+ if (!error) {
+ // It is important to call DestroyInvalidEntry after removing this
+ // entry from the table.
+ DestroyInvalidEntry(cache_entry.get());
+ cache_entry = NULL;
+ } else {
+ Trace("NewEntry failed on MatchEntry 0x%x", address.value());
+ }
+
+ // Restart the search.
+ address.set_value(data_->table[hash & mask_]);
+ visited.clear();
+ continue;
+ }
+
+ DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
+ if (cache_entry->IsSameEntry(key, hash)) {
+ if (!cache_entry->Update())
+ cache_entry = NULL;
+ found = true;
+ if (find_parent && entry_addr.value() != address.value()) {
+ Trace("Entry not on the index 0x%x", address.value());
+ *match_error = true;
+ parent_entry = NULL;
+ }
+ break;
+ }
+ if (!cache_entry->Update())
+ cache_entry = NULL;
+ parent_entry = cache_entry;
+ cache_entry = NULL;
+ if (!parent_entry.get())
+ break;
+
+ address.set_value(parent_entry->GetNextAddress());
+ }
+
+ if (parent_entry.get() && (!find_parent || !found))
+ parent_entry = NULL;
+
+ if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
+ *match_error = true;
+ parent_entry = NULL;
+ }
+
+ if (cache_entry.get() && (find_parent || !found))
+ cache_entry = NULL;
+
+ find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
+ FlushIndex();
+ return tmp;
+}
+
+// This is the actual implementation for OpenNextEntry and OpenPrevEntry.
+EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
+ if (disabled_)
+ return NULL;
+
+ DCHECK(iter);
+
+ const int kListsToSearch = 3;
+ scoped_refptr<EntryImpl> entries[kListsToSearch];
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(*iter));
+ *iter = NULL;
+
+ if (!iterator.get()) {
+ iterator.reset(new Rankings::Iterator(&rankings_));
+ bool ret = false;
+
+ // Get an entry from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
+ &iterator->nodes[i], &temp);
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ if (!ret)
+ return NULL;
+ } else {
+ // Get the next entry from the last list, and the actual entries for the
+ // elements on the other lists.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ if (iterator->list == i) {
+ OpenFollowingEntryFromList(forward, iterator->list,
+ &iterator->nodes[i], &temp);
+ } else {
+ temp = GetEnumeratedEntry(iterator->nodes[i],
+ static_cast<Rankings::List>(i));
+ }
+
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ }
+
+ int newest = -1;
+ int oldest = -1;
+ Time access_times[kListsToSearch];
+ for (int i = 0; i < kListsToSearch; i++) {
+ if (entries[i].get()) {
+ access_times[i] = entries[i]->GetLastUsed();
+ if (newest < 0) {
+ DCHECK_LT(oldest, 0);
+ newest = oldest = i;
+ continue;
+ }
+ if (access_times[i] > access_times[newest])
+ newest = i;
+ if (access_times[i] < access_times[oldest])
+ oldest = i;
+ }
+ }
+
+ if (newest < 0 || oldest < 0)
+ return NULL;
+
+ EntryImpl* next_entry;
+ if (forward) {
+ next_entry = entries[newest].get();
+ iterator->list = static_cast<Rankings::List>(newest);
+ } else {
+ next_entry = entries[oldest].get();
+ iterator->list = static_cast<Rankings::List>(oldest);
+ }
+
+ *iter = iterator.release();
+ next_entry->AddRef();
+ return next_entry;
+}
+
+bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
+ CacheRankingsBlock** from_entry,
+ EntryImpl** next_entry) {
+ if (disabled_)
+ return false;
+
+ if (!new_eviction_ && Rankings::NO_USE != list)
+ return false;
+
+ Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
+ CacheRankingsBlock* next_block = forward ?
+ rankings_.GetNext(rankings.get(), list) :
+ rankings_.GetPrev(rankings.get(), list);
+ Rankings::ScopedRankingsBlock next(&rankings_, next_block);
+ *from_entry = NULL;
+
+ *next_entry = GetEnumeratedEntry(next.get(), list);
+ if (!*next_entry)
+ return false;
+
+ *from_entry = next.release();
+ return true;
+}
+
+EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
+ Rankings::List list) {
+ if (!next || disabled_)
+ return NULL;
+
+ EntryImpl* entry;
+ int rv = NewEntry(Addr(next->Data()->contents), &entry);
+ if (rv) {
+ STRESS_NOTREACHED();
+ rankings_.Remove(next, list, false);
+ if (rv == ERR_INVALID_ADDRESS) {
+ // There is nothing linked from the index. Delete the rankings node.
+ DeleteBlock(next->address(), true);
+ }
+ return NULL;
+ }
+
+ if (entry->dirty()) {
+ // We cannot trust this entry.
+ InternalDoomEntry(entry);
+ entry->Release();
+ return NULL;
+ }
+
+ if (!entry->Update()) {
+ STRESS_NOTREACHED();
+ entry->Release();
+ return NULL;
+ }
+
+ // Note that it is unfortunate (but possible) for this entry to be clean, but
+ // not actually the real entry. In other words, we could have lost this entry
+ // from the index, and it could have been replaced with a newer one. It's not
+ // worth checking that this entry is "the real one", so we just return it and
+ // let the enumeration continue; this entry will be evicted at some point, and
+ // the regular path will work with the real entry. With time, this problem
+ // will disasappear because this scenario is just a bug.
+
+ // Make sure that we save the key for later.
+ entry->GetKey();
+
+ return entry;
+}
+
+EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
+ if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
+ deleted_entry->Release();
+ stats_.OnEvent(Stats::CREATE_MISS);
+ Trace("create entry miss ");
+ return NULL;
+ }
+
+ // We are attempting to create an entry and found out that the entry was
+ // previously deleted.
+
+ eviction_.OnCreateEntry(deleted_entry);
+ entry_count_++;
+
+ stats_.OnEvent(Stats::RESURRECT_HIT);
+ Trace("Resurrect entry hit ");
+ return deleted_entry;
+}
+
+void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
+ LOG(WARNING) << "Destroying invalid entry.";
+ Trace("Destroying invalid entry 0x%p", entry);
+
+ entry->SetPointerForInvalidEntry(GetCurrentEntryId());
+
+ eviction_.OnDoomEntry(entry);
+ entry->InternalDoom();
+
+ if (!new_eviction_)
+ DecreaseNumEntries();
+ stats_.OnEvent(Stats::INVALID_ENTRY);
+}
+
+void BackendImpl::AddStorageSize(int32 bytes) {
+ data_->header.num_bytes += bytes;
+ DCHECK_GE(data_->header.num_bytes, 0);
+}
+
+void BackendImpl::SubstractStorageSize(int32 bytes) {
+ data_->header.num_bytes -= bytes;
+ DCHECK_GE(data_->header.num_bytes, 0);
+}
+
+void BackendImpl::IncreaseNumRefs() {
+ num_refs_++;
+ if (max_refs_ < num_refs_)
+ max_refs_ = num_refs_;
+}
+
+void BackendImpl::DecreaseNumRefs() {
+ DCHECK(num_refs_);
+ num_refs_--;
+
+ if (!num_refs_ && disabled_)
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
+}
+
+void BackendImpl::IncreaseNumEntries() {
+ data_->header.num_entries++;
+ DCHECK_GT(data_->header.num_entries, 0);
+}
+
+void BackendImpl::DecreaseNumEntries() {
+ data_->header.num_entries--;
+ if (data_->header.num_entries < 0) {
+ NOTREACHED();
+ data_->header.num_entries = 0;
+ }
+}
+
+void BackendImpl::LogStats() {
+ StatsItems stats;
+ GetStats(&stats);
+
+ for (size_t index = 0; index < stats.size(); index++)
+ VLOG(1) << stats[index].first << ": " << stats[index].second;
+}
+
+void BackendImpl::ReportStats() {
+ CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
+
+ int current_size = data_->header.num_bytes / (1024 * 1024);
+ int max_size = max_size_ / (1024 * 1024);
+ int hit_ratio_as_percentage = stats_.GetHitRatio();
+
+ CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
+ // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
+ // ratio of that bin's total count to the count in the same bin in the Size2
+ // histogram.
+ if (base::RandInt(0, 99) < hit_ratio_as_percentage)
+ CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
+ CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
+ if (!max_size)
+ max_size++;
+ CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
+
+ CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
+ static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
+ CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
+ static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
+ stats_.SetCounter(Stats::MAX_ENTRIES, 0);
+
+ CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
+ static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
+ CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
+ static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
+ CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
+ static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
+ stats_.SetCounter(Stats::FATAL_ERROR, 0);
+ stats_.SetCounter(Stats::DOOM_CACHE, 0);
+ stats_.SetCounter(Stats::DOOM_RECENT, 0);
+
+ int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ if (!data_->header.create_time || !data_->header.lru.filled) {
+ int cause = data_->header.create_time ? 0 : 1;
+ if (!data_->header.lru.filled)
+ cause |= 2;
+ CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
+ CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
+ return;
+ }
+
+ // This is an up to date client that will report FirstEviction() data. After
+ // that event, start reporting this:
+
+ CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
+ // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
+ // time is the ratio of that bin's total count to the count in the same bin in
+ // the TotalTime histogram.
+ if (base::RandInt(0, 99) < hit_ratio_as_percentage)
+ CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
+
+ int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
+ stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
+
+ // We may see users with no use_hours at this point if this is the first time
+ // we are running this code.
+ if (use_hours)
+ use_hours = total_hours - use_hours;
+
+ if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
+ return;
+
+ CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
+ // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
+ // is the ratio of that bin's total count to the count in the same bin in the
+ // UseTime histogram.
+ if (base::RandInt(0, 99) < hit_ratio_as_percentage)
+ CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
+ CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
+
+ int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
+ CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
+
+ int avg_size = data_->header.num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
+ CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
+
+ CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
+ data_->header.num_entries * 100 / (mask_ + 1));
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
+ CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
+
+ if (new_eviction_) {
+ CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
+ data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
+ data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
+ data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
+ data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
+ }
+
+ stats_.ResetRatios();
+ stats_.SetCounter(Stats::TRIM_ENTRY, 0);
+
+ if (cache_type_ == net::DISK_CACHE)
+ block_files_.ReportStats();
+}
+
+void BackendImpl::UpgradeTo2_1() {
+ // 2.1 is basically the same as 2.0, except that new fields are actually
+ // updated by the new eviction algorithm.
+ DCHECK(0x20000 == data_->header.version);
+ data_->header.version = 0x20001;
+ data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
+}
+
+bool BackendImpl::CheckIndex() {
+ DCHECK(data_);
+
+ size_t current_size = index_->GetLength();
+ if (current_size < sizeof(Index)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ if (new_eviction_) {
+ // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion >> 16 != data_->header.version >> 16) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ if (kCurrentVersion == data_->header.version) {
+ // We need file version 2.1 for the new eviction algorithm.
+ UpgradeTo2_1();
+ }
+ } else {
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion != data_->header.version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ }
+
+ if (!data_->header.table_len) {
+ LOG(ERROR) << "Invalid table size";
+ return false;
+ }
+
+ if (current_size < GetIndexSize(data_->header.table_len) ||
+ data_->header.table_len & (kBaseTableLen - 1)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ AdjustMaxCacheSize(data_->header.table_len);
+
+#if !defined(NET_BUILD_STRESS_CACHE)
+ if (data_->header.num_bytes < 0 ||
+ (max_size_ < kint32max - kDefaultCacheSize &&
+ data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
+ LOG(ERROR) << "Invalid cache (current) size";
+ return false;
+ }
+#endif
+
+ if (data_->header.num_entries < 0) {
+ LOG(ERROR) << "Invalid number of entries";
+ return false;
+ }
+
+ if (!mask_)
+ mask_ = data_->header.table_len - 1;
+
+ // Load the table into memory with a single read.
+ scoped_ptr<char[]> buf(new char[current_size]);
+ return index_->Read(buf.get(), current_size, 0);
+}
+
+int BackendImpl::CheckAllEntries() {
+ int num_dirty = 0;
+ int num_entries = 0;
+ DCHECK(mask_ < kuint32max);
+ for (unsigned int i = 0; i <= mask_; i++) {
+ Addr address(data_->table[i]);
+ if (!address.is_initialized())
+ continue;
+ for (;;) {
+ EntryImpl* tmp;
+ int ret = NewEntry(address, &tmp);
+ if (ret) {
+ STRESS_NOTREACHED();
+ return ret;
+ }
+ scoped_refptr<EntryImpl> cache_entry;
+ cache_entry.swap(&tmp);
+
+ if (cache_entry->dirty())
+ num_dirty++;
+ else if (CheckEntry(cache_entry.get()))
+ num_entries++;
+ else
+ return ERR_INVALID_ENTRY;
+
+ DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
+ address.set_value(cache_entry->GetNextAddress());
+ if (!address.is_initialized())
+ break;
+ }
+ }
+
+ Trace("CheckAllEntries End");
+ if (num_entries + num_dirty != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
+ " " << data_->header.num_entries;
+ DCHECK_LT(num_entries, data_->header.num_entries);
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return num_dirty;
+}
+
+bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
+ bool ok = block_files_.IsValid(cache_entry->entry()->address());
+ ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
+ EntryStore* data = cache_entry->entry()->Data();
+ for (size_t i = 0; i < arraysize(data->data_addr); i++) {
+ if (data->data_addr[i]) {
+ Addr address(data->data_addr[i]);
+ if (address.is_block_file())
+ ok = ok && block_files_.IsValid(address);
+ }
+ }
+
+ return ok && cache_entry->rankings()->VerifyHash();
+}
+
+int BackendImpl::MaxBuffersSize() {
+ static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
+ static bool done = false;
+
+ if (!done) {
+ const int kMaxBuffersSize = 30 * 1024 * 1024;
+
+ // We want to use up to 2% of the computer's memory.
+ total_memory = total_memory * 2 / 100;
+ if (total_memory > kMaxBuffersSize || total_memory <= 0)
+ total_memory = kMaxBuffersSize;
+
+ done = true;
+ }
+
+ return static_cast<int>(total_memory);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/backend_impl.h b/chromium/net/disk_cache/backend_impl.h
new file mode 100644
index 00000000000..982bee64818
--- /dev/null
+++ b/chromium/net/disk_cache/backend_impl.h
@@ -0,0 +1,400 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_BACKEND_IMPL_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/timer/timer.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/eviction.h"
+#include "net/disk_cache/in_flight_backend_io.h"
+#include "net/disk_cache/rankings.h"
+#include "net/disk_cache/stats.h"
+#include "net/disk_cache/stress_support.h"
+#include "net/disk_cache/trace.h"
+
+namespace net {
+class NetLog;
+} // namespace net
+
+namespace disk_cache {
+
+struct Index;
+
+enum BackendFlags {
+ kNone = 0,
+ kMask = 1, // A mask (for the index table) was specified.
+ kMaxSize = 1 << 1, // A maximum size was provided.
+ kUnitTestMode = 1 << 2, // We are modifying the behavior for testing.
+ kUpgradeMode = 1 << 3, // This is the upgrade tool (dump).
+ kNewEviction = 1 << 4, // Use of new eviction was specified.
+ kNoRandom = 1 << 5, // Don't add randomness to the behavior.
+ kNoLoadProtection = 1 << 6, // Don't act conservatively under load.
+ kNoBuffering = 1 << 7 // Disable extended IO buffering.
+};
+
+// This class implements the Backend interface. An object of this
+// class handles the operations of the cache for a particular profile.
+class NET_EXPORT_PRIVATE BackendImpl : public Backend {
+ friend class Eviction;
+ public:
+ BackendImpl(const base::FilePath& path, base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log);
+ // mask can be used to limit the usable size of the hash table, for testing.
+ BackendImpl(const base::FilePath& path, uint32 mask,
+ base::MessageLoopProxy* cache_thread, net::NetLog* net_log);
+ virtual ~BackendImpl();
+
+ // Performs general initialization for this current instance of the cache.
+ int Init(const CompletionCallback& callback);
+
+ // Performs the actual initialization and final cleanup on destruction.
+ int SyncInit();
+ void CleanupCache();
+
+ // Same behavior as OpenNextEntry but walks the list from back to front.
+ int OpenPrevEntry(void** iter, Entry** prev_entry,
+ const CompletionCallback& callback);
+
+ // Synchronous implementation of the asynchronous interface.
+ int SyncOpenEntry(const std::string& key, Entry** entry);
+ int SyncCreateEntry(const std::string& key, Entry** entry);
+ int SyncDoomEntry(const std::string& key);
+ int SyncDoomAllEntries();
+ int SyncDoomEntriesBetween(base::Time initial_time,
+ base::Time end_time);
+ int SyncDoomEntriesSince(base::Time initial_time);
+ int SyncOpenNextEntry(void** iter, Entry** next_entry);
+ int SyncOpenPrevEntry(void** iter, Entry** prev_entry);
+ void SyncEndEnumeration(void* iter);
+ void SyncOnExternalCacheHit(const std::string& key);
+
+ // Open or create an entry for the given |key| or |iter|.
+ EntryImpl* OpenEntryImpl(const std::string& key);
+ EntryImpl* CreateEntryImpl(const std::string& key);
+ EntryImpl* OpenNextEntryImpl(void** iter);
+ EntryImpl* OpenPrevEntryImpl(void** iter);
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Sets the cache type for this backend.
+ void SetType(net::CacheType type);
+
+ // Returns the full name for an external storage file.
+ base::FilePath GetFileName(Addr address) const;
+
+ // Returns the actual file used to store a given (non-external) address.
+ MappedFile* File(Addr address);
+
+ // Returns a weak pointer to the background queue.
+ base::WeakPtr<InFlightBackendIO> GetBackgroundQueue();
+
+ // Creates an external storage file.
+ bool CreateExternalFile(Addr* address);
+
+ // Creates a new storage block of size block_count.
+ bool CreateBlock(FileType block_type, int block_count,
+ Addr* block_address);
+
+ // Deletes a given storage block. deep set to true can be used to zero-fill
+ // the related storage in addition of releasing the related block.
+ void DeleteBlock(Addr block_address, bool deep);
+
+ // Retrieves a pointer to the LRU-related data.
+ LruData* GetLruData();
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(EntryImpl* entry, bool modified);
+
+ // A node was recovered from a crash, it may not be on the index, so this
+ // method checks it and takes the appropriate action.
+ void RecoveredEntry(CacheRankingsBlock* rankings);
+
+ // Permanently deletes an entry, but still keeps track of it.
+ void InternalDoomEntry(EntryImpl* entry);
+
+#if defined(NET_BUILD_STRESS_CACHE)
+ // Returns the address of the entry linked to the entry at a given |address|.
+ CacheAddr GetNextAddr(Addr address);
+
+ // Verifies that |entry| is not currently reachable through the index.
+ void NotLinked(EntryImpl* entry);
+#endif
+
+ // Removes all references to this entry.
+ void RemoveEntry(EntryImpl* entry);
+
+ // This method must be called when an entry is released for the last time, so
+ // the entry should not be used anymore. |address| is the cache address of the
+ // entry.
+ void OnEntryDestroyBegin(Addr address);
+
+ // This method must be called after all resources for an entry have been
+ // released.
+ void OnEntryDestroyEnd();
+
+ // If the data stored by the provided |rankings| points to an open entry,
+ // returns a pointer to that entry, otherwise returns NULL. Note that this
+ // method does NOT increase the ref counter for the entry.
+ EntryImpl* GetOpenEntry(CacheRankingsBlock* rankings) const;
+
+ // Returns the id being used on this run of the cache.
+ int32 GetCurrentEntryId() const;
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Logs requests that are denied due to being too big.
+ void TooMuchStorageRequested(int32 size);
+
+ // Returns true if a temporary buffer is allowed to be extended.
+ bool IsAllocAllowed(int current_size, int new_size);
+
+ // Tracks the release of |size| bytes by an entry buffer.
+ void BufferDeleted(int size);
+
+ // Only intended for testing the two previous methods.
+ int GetTotalBuffersSize() const {
+ return buffer_bytes_;
+ }
+
+ // Returns true if this instance seems to be under heavy load.
+ bool IsLoaded() const;
+
+ // Returns the full histogram name, for the given base |name| and experiment,
+ // and the current cache type. The name will be "DiskCache.t.name_e" where n
+ // is the cache type and e the provided |experiment|.
+ std::string HistogramName(const char* name, int experiment) const;
+
+ net::CacheType cache_type() const {
+ return cache_type_;
+ }
+
+ bool read_only() const {
+ return read_only_;
+ }
+
+ // Returns a weak pointer to this object.
+ base::WeakPtr<BackendImpl> GetWeakPtr();
+
+ // Returns true if we should send histograms for this user again. The caller
+ // must call this function only once per run (because it returns always the
+ // same thing on a given run).
+ bool ShouldReportAgain();
+
+ // Reports some data when we filled up the cache.
+ void FirstEviction();
+
+ // Reports a critical error (and disables the cache).
+ void CriticalError(int error);
+
+ // Reports an uncommon, recoverable error.
+ void ReportError(int error);
+
+ // Called when an interesting event should be logged (counted).
+ void OnEvent(Stats::Counters an_event);
+
+ // Keeps track of payload access (doesn't include metadata).
+ void OnRead(int bytes);
+ void OnWrite(int bytes);
+
+ // Timer callback to calculate usage statistics.
+ void OnStatsTimer();
+
+ // Handles the pending asynchronous IO count.
+ void IncrementIoCount();
+ void DecrementIoCount();
+
+ // Sets internal parameters to enable unit testing mode.
+ void SetUnitTestMode();
+
+ // Sets internal parameters to enable upgrade mode (for internal tools).
+ void SetUpgradeMode();
+
+ // Sets the eviction algorithm to version 2.
+ void SetNewEviction();
+
+ // Sets an explicit set of BackendFlags.
+ void SetFlags(uint32 flags);
+
+ // Clears the counter of references to test handling of corruptions.
+ void ClearRefCountForTest();
+
+ // Sends a dummy operation through the operation queue, for unit tests.
+ int FlushQueueForTest(const CompletionCallback& callback);
+
+ // Runs the provided task on the cache thread. The task will be automatically
+ // deleted after it runs.
+ int RunTaskForTest(const base::Closure& task,
+ const CompletionCallback& callback);
+
+ // Trims an entry (all if |empty| is true) from the list of deleted
+ // entries. This method should be called directly on the cache thread.
+ void TrimForTest(bool empty);
+
+ // Trims an entry (all if |empty| is true) from the list of deleted
+ // entries. This method should be called directly on the cache thread.
+ void TrimDeletedListForTest(bool empty);
+
+ // Performs a simple self-check, and returns the number of dirty items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Ensures the index is flushed to disk (a no-op on platforms with mmap).
+ void FlushIndex();
+
+ // Backend implementation.
+ virtual net::CacheType GetCacheType() const OVERRIDE;
+ virtual int32 GetEntryCount() const OVERRIDE;
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual void EndEnumeration(void** iter) OVERRIDE;
+ virtual void GetStats(StatsItems* stats) OVERRIDE;
+ virtual void OnExternalCacheHit(const std::string& key) OVERRIDE;
+
+ private:
+ typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap;
+
+ // Creates a new backing file for the cache index.
+ bool CreateBackingStore(disk_cache::File* file);
+ bool InitBackingStore(bool* file_created);
+ void AdjustMaxCacheSize(int table_len);
+
+ bool InitStats();
+ void StoreStats();
+
+ // Deletes the cache and starts again.
+ void RestartCache(bool failure);
+ void PrepareForRestart();
+
+ // Creates a new entry object. Returns zero on success, or a disk_cache error
+ // on failure.
+ int NewEntry(Addr address, EntryImpl** entry);
+
+ // Returns a given entry from the cache. The entry to match is determined by
+ // key and hash, and the returned entry may be the matched one or it's parent
+ // on the list of entries with the same hash (or bucket). To look for a parent
+ // of a given entry, |entry_addr| should be grabbed from that entry, so that
+ // if it doesn't match the entry on the index, we know that it was replaced
+ // with a new entry; in this case |*match_error| will be set to true and the
+ // return value will be NULL.
+ EntryImpl* MatchEntry(const std::string& key, uint32 hash, bool find_parent,
+ Addr entry_addr, bool* match_error);
+
+ // Opens the next or previous entry on a cache iteration.
+ EntryImpl* OpenFollowingEntry(bool forward, void** iter);
+
+ // Opens the next or previous entry on a single list. If successful,
+ // |from_entry| will be updated to point to the new entry, otherwise it will
+ // be set to NULL; in other words, it is used as an explicit iterator.
+ bool OpenFollowingEntryFromList(bool forward, Rankings::List list,
+ CacheRankingsBlock** from_entry,
+ EntryImpl** next_entry);
+
+ // Returns the entry that is pointed by |next|, from the given |list|.
+ EntryImpl* GetEnumeratedEntry(CacheRankingsBlock* next, Rankings::List list);
+
+ // Re-opens an entry that was previously deleted.
+ EntryImpl* ResurrectEntry(EntryImpl* deleted_entry);
+
+ void DestroyInvalidEntry(EntryImpl* entry);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ // Update the number of referenced cache entries.
+ void IncreaseNumRefs();
+ void DecreaseNumRefs();
+ void IncreaseNumEntries();
+ void DecreaseNumEntries();
+
+ // Dumps current cache statistics to the log.
+ void LogStats();
+
+ // Send UMA stats.
+ void ReportStats();
+
+ // Upgrades the index file to version 2.1.
+ void UpgradeTo2_1();
+
+ // Performs basic checks on the index file. Returns false on failure.
+ bool CheckIndex();
+
+ // Part of the self test. Returns the number or dirty entries, or an error.
+ int CheckAllEntries();
+
+ // Part of the self test. Returns false if the entry is corrupt.
+ bool CheckEntry(EntryImpl* cache_entry);
+
+ // Returns the maximum total memory for the memory buffers.
+ int MaxBuffersSize();
+
+ InFlightBackendIO background_queue_; // The controller of pending operations.
+ scoped_refptr<MappedFile> index_; // The main cache index.
+ base::FilePath path_; // Path to the folder used as backing storage.
+ Index* data_; // Pointer to the index data.
+ BlockFiles block_files_; // Set of files used to store all data.
+ Rankings rankings_; // Rankings to be able to trim the cache.
+ uint32 mask_; // Binary mask to map a hash to the hash table.
+ int32 max_size_; // Maximum data size for this instance.
+ Eviction eviction_; // Handler of the eviction algorithm.
+ EntriesMap open_entries_; // Map of open entries.
+ int num_refs_; // Number of referenced cache entries.
+ int max_refs_; // Max number of referenced cache entries.
+ int num_pending_io_; // Number of pending IO operations.
+ int entry_count_; // Number of entries accessed lately.
+ int byte_count_; // Number of bytes read/written lately.
+ int buffer_bytes_; // Total size of the temporary entries' buffers.
+ int up_ticks_; // The number of timer ticks received (OnStatsTimer).
+ net::CacheType cache_type_;
+ int uma_report_; // Controls transmission of UMA data.
+ uint32 user_flags_; // Flags set by the user.
+ bool init_; // controls the initialization of the system.
+ bool restarted_;
+ bool unit_test_;
+ bool read_only_; // Prevents updates of the rankings data (used by tools).
+ bool disabled_;
+ bool new_eviction_; // What eviction algorithm should be used.
+ bool first_timer_; // True if the timer has not been called.
+ bool user_load_; // True if we see a high load coming from the caller.
+
+ net::NetLog* net_log_;
+
+ Stats stats_; // Usage statistics.
+ scoped_ptr<base::RepeatingTimer<BackendImpl> > timer_; // Usage timer.
+ base::WaitableEvent done_; // Signals the end of background work.
+ scoped_refptr<TraceObject> trace_object_; // Initializes internal tracing.
+ base::WeakPtrFactory<BackendImpl> ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackendImpl);
+};
+
+// Returns the preferred max cache size given the available disk space.
+NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BACKEND_IMPL_H_
diff --git a/chromium/net/disk_cache/backend_unittest.cc b/chromium/net/disk_cache/backend_unittest.cc
new file mode 100644
index 00000000000..7eeeee1fd83
--- /dev/null
+++ b/chromium/net/disk_cache/backend_unittest.cc
@@ -0,0 +1,3415 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/file_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/port.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "net/base/cache_type.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/mapped_file.h"
+#include "net/disk_cache/mem_backend_impl.h"
+#include "net/disk_cache/simple/simple_backend_impl.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_test_util.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "net/disk_cache/tracing_cache_backend.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+using base::Time;
+
+namespace {
+
+const char kExistingEntryKey[] = "existing entry key";
+
+scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
+ const base::Thread& cache_thread,
+ base::FilePath& cache_path) {
+ net::TestCompletionCallback cb;
+
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ cache_path, cache_thread.message_loop_proxy(), NULL));
+ int rv = cache->Init(cb.callback());
+ if (cb.GetResult(rv) != net::OK)
+ return scoped_ptr<disk_cache::BackendImpl>();
+
+ disk_cache::Entry* entry = NULL;
+ rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
+ if (cb.GetResult(rv) != net::OK)
+ return scoped_ptr<disk_cache::BackendImpl>();
+ entry->Close();
+
+ return cache.Pass();
+}
+
+} // namespace
+
+// Tests that can run with different types of caches.
+class DiskCacheBackendTest : public DiskCacheTestWithCache {
+ protected:
+ void BackendBasics();
+ void BackendKeying();
+ void BackendShutdownWithPendingFileIO(bool fast);
+ void BackendShutdownWithPendingIO(bool fast);
+ void BackendShutdownWithPendingCreate(bool fast);
+ void BackendSetSize();
+ void BackendLoad();
+ void BackendChain();
+ void BackendValidEntry();
+ void BackendInvalidEntry();
+ void BackendInvalidEntryRead();
+ void BackendInvalidEntryWithLoad();
+ void BackendTrimInvalidEntry();
+ void BackendTrimInvalidEntry2();
+ void BackendEnumerations();
+ void BackendEnumerations2();
+ void BackendInvalidEntryEnumeration();
+ void BackendFixEnumerators();
+ void BackendDoomRecent();
+
+ // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
+ // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
+ // There are 4 entries after doomed_start and 2 after doomed_end.
+ void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
+
+ void BackendDoomBetween();
+ void BackendTransaction(const std::string& name, int num_entries, bool load);
+ void BackendRecoverInsert();
+ void BackendRecoverRemove();
+ void BackendRecoverWithEviction();
+ void BackendInvalidEntry2();
+ void BackendInvalidEntry3();
+ void BackendInvalidEntry7();
+ void BackendInvalidEntry8();
+ void BackendInvalidEntry9(bool eviction);
+ void BackendInvalidEntry10(bool eviction);
+ void BackendInvalidEntry11(bool eviction);
+ void BackendTrimInvalidEntry12();
+ void BackendDoomAll();
+ void BackendDoomAll2();
+ void BackendInvalidRankings();
+ void BackendInvalidRankings2();
+ void BackendDisable();
+ void BackendDisable2();
+ void BackendDisable3();
+ void BackendDisable4();
+ void TracingBackendBasics();
+
+ bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
+ bool EnumerateAndMatchKeys(int max_to_open,
+ void** iter,
+ std::set<std::string>* keys_to_match,
+ size_t* count);
+};
+
+void DiskCacheBackendTest::BackendBasics() {
+ InitCache();
+ disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
+ EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
+ EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
+ ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
+ ASSERT_TRUE(NULL != entry1);
+ ASSERT_TRUE(NULL != entry2);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ disk_cache::Entry* entry3 = NULL;
+ ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
+ ASSERT_TRUE(NULL != entry3);
+ EXPECT_TRUE(entry2 == entry3);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ EXPECT_EQ(net::OK, DoomEntry("some other key"));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ entry1->Close();
+ entry2->Close();
+ entry3->Close();
+
+ EXPECT_EQ(net::OK, DoomEntry("the first key"));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
+ ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(net::OK, DoomEntry("some other key"));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ entry2->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Basics) {
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
+ SetNewEviction();
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
+ SetMemoryOnlyMode();
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheBasics) {
+ SetCacheType(net::APP_CACHE);
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendBasics();
+}
+
+void DiskCacheBackendTest::BackendKeying() {
+ InitCache();
+ const char* kName1 = "the first key";
+ const char* kName2 = "the first Key";
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
+
+ ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
+ EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
+ entry2->Close();
+
+ char buffer[30];
+ base::strlcpy(buffer, kName1, arraysize(buffer));
+ ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
+ ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
+ ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ // Now verify long keys.
+ char buffer2[20000];
+ memset(buffer2, 's', sizeof(buffer2));
+ buffer2[1023] = '\0';
+ ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
+ entry2->Close();
+
+ buffer2[1023] = 'g';
+ buffer2[19999] = '\0';
+ ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
+ entry2->Close();
+ entry1->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Keying) {
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
+ SetNewEviction();
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
+ SetMemoryOnlyMode();
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheKeying) {
+ SetCacheType(net::APP_CACHE);
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendKeying();
+}
+
+TEST_F(DiskCacheTest, CreateBackend) {
+ net::TestCompletionCallback cb;
+
+ {
+ ASSERT_TRUE(CleanupCacheDir());
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ // Test the private factory method(s).
+ scoped_ptr<disk_cache::Backend> cache;
+ cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
+ ASSERT_TRUE(cache.get());
+ cache.reset();
+
+ // Now test the public API.
+ int rv =
+ disk_cache::CreateCacheBackend(net::DISK_CACHE,
+ net::CACHE_BACKEND_DEFAULT,
+ cache_path_,
+ 0,
+ false,
+ cache_thread.message_loop_proxy().get(),
+ NULL,
+ &cache,
+ cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ ASSERT_TRUE(cache.get());
+ cache.reset();
+
+ rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
+ net::CACHE_BACKEND_DEFAULT,
+ base::FilePath(), 0,
+ false, NULL, NULL, &cache,
+ cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ ASSERT_TRUE(cache.get());
+ cache.reset();
+ }
+
+ base::MessageLoop::current()->RunUntilIdle();
+}
+
+// Tests that |BackendImpl| fails to initialize with a missing file.
+TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
+ ASSERT_TRUE(CopyTestCache("bad_entry"));
+ base::FilePath filename = cache_path_.AppendASCII("data_1");
+ base::DeleteFile(filename, false);
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ net::TestCompletionCallback cb;
+
+ bool prev = base::ThreadRestrictions::SetIOAllowed(false);
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ cache_path_, cache_thread.message_loop_proxy().get(), NULL));
+ int rv = cache->Init(cb.callback());
+ EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
+ base::ThreadRestrictions::SetIOAllowed(prev);
+
+ cache.reset();
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, ExternalFiles) {
+ InitCache();
+ // First, let's create a file on the folder.
+ base::FilePath filename = cache_path_.AppendASCII("f_000001");
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize));
+
+ // Now let's create a file with the cache.
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry));
+ ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
+ entry->Close();
+
+ // And verify that the first file is still there.
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ ASSERT_EQ(kSize, file_util::ReadFile(filename, buffer2->data(), kSize));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
+}
+
+// Tests that we deal with file-level pending operations at destruction time.
+void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
+ net::TestCompletionCallback cb;
+ int rv;
+
+ {
+ ASSERT_TRUE(CleanupCacheDir());
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ uint32 flags = disk_cache::kNoBuffering;
+ if (!fast)
+ flags |= disk_cache::kNoRandom;
+
+ UseCurrentThread();
+ CreateBackend(flags, NULL);
+
+ disk_cache::EntryImpl* entry;
+ rv = cache_->CreateEntry(
+ "some key", reinterpret_cast<disk_cache::Entry**>(&entry),
+ cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ const int kSize = 25000;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+
+ for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
+ // We are using the current thread as the cache thread because we want to
+ // be able to call directly this method to make sure that the OS (instead
+ // of us switching thread) is returning IO pending.
+ rv =
+ entry->WriteDataImpl(0, i, buffer.get(), kSize, cb.callback(), false);
+ if (rv == net::ERR_IO_PENDING)
+ break;
+ EXPECT_EQ(kSize, rv);
+ }
+
+ // Don't call Close() to avoid going through the queue or we'll deadlock
+ // waiting for the operation to finish.
+ entry->Release();
+
+ // The cache destructor will see one pending operation here.
+ cache_.reset();
+
+ if (rv == net::ERR_IO_PENDING) {
+ if (fast)
+ EXPECT_FALSE(cb.have_result());
+ else
+ EXPECT_TRUE(cb.have_result());
+ }
+ }
+
+ base::MessageLoop::current()->RunUntilIdle();
+
+#if defined(OS_WIN)
+ // Wait for the actual operation to complete, or we'll keep a file handle that
+ // may cause issues later. Note that on Posix systems even though this test
+ // uses a single thread, the actual IO is posted to a worker thread and the
+ // cache destructor breaks the link to reach cb when the operation completes.
+ rv = cb.GetResult(rv);
+#endif
+}
+
+TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
+ BackendShutdownWithPendingFileIO(false);
+}
+
+// Here and below, tests that simulate crashes are not compiled in LeakSanitizer
+// builds because they contain a lot of intentional memory leaks.
+// The wrapper scripts used to run tests under Valgrind Memcheck and
+// Heapchecker will also disable these tests under those tools. See:
+// tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
+// tools/heapcheck/net_unittests.gtest-heapcheck.txt
+#if !defined(LEAK_SANITIZER)
+// We'll be leaking from this test.
+TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
+ // The integrity test sets kNoRandom so there's a version mismatch if we don't
+ // force new eviction.
+ SetNewEviction();
+ BackendShutdownWithPendingFileIO(true);
+}
+#endif
+
+// Tests that we deal with background-thread pending operations.
+void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
+ net::TestCompletionCallback cb;
+
+ {
+ ASSERT_TRUE(CleanupCacheDir());
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ uint32 flags = disk_cache::kNoBuffering;
+ if (!fast)
+ flags |= disk_cache::kNoRandom;
+
+ CreateBackend(flags, &cache_thread);
+
+ disk_cache::Entry* entry;
+ int rv = cache_->CreateEntry("some key", &entry, cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ entry->Close();
+
+ // The cache destructor will see one pending operation here.
+ cache_.reset();
+ }
+
+ base::MessageLoop::current()->RunUntilIdle();
+}
+
+TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
+ BackendShutdownWithPendingIO(false);
+}
+
+#if !defined(LEAK_SANITIZER)
+// We'll be leaking from this test.
+TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
+ // The integrity test sets kNoRandom so there's a version mismatch if we don't
+ // force new eviction.
+ SetNewEviction();
+ BackendShutdownWithPendingIO(true);
+}
+#endif
+
+// Tests that we deal with create-type pending operations.
+void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
+ net::TestCompletionCallback cb;
+
+ {
+ ASSERT_TRUE(CleanupCacheDir());
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ disk_cache::BackendFlags flags =
+ fast ? disk_cache::kNone : disk_cache::kNoRandom;
+ CreateBackend(flags, &cache_thread);
+
+ disk_cache::Entry* entry;
+ int rv = cache_->CreateEntry("some key", &entry, cb.callback());
+ ASSERT_EQ(net::ERR_IO_PENDING, rv);
+
+ cache_.reset();
+ EXPECT_FALSE(cb.have_result());
+ }
+
+ base::MessageLoop::current()->RunUntilIdle();
+}
+
+TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
+ BackendShutdownWithPendingCreate(false);
+}
+
+#if !defined(LEAK_SANITIZER)
+// We'll be leaking an entry from this test.
+TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
+ // The integrity test sets kNoRandom so there's a version mismatch if we don't
+ // force new eviction.
+ SetNewEviction();
+ BackendShutdownWithPendingCreate(true);
+}
+#endif
+
+TEST_F(DiskCacheTest, TruncatedIndex) {
+ ASSERT_TRUE(CleanupCacheDir());
+ base::FilePath index = cache_path_.AppendASCII("index");
+ ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5));
+
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ net::TestCompletionCallback cb;
+
+ scoped_ptr<disk_cache::Backend> backend;
+ int rv =
+ disk_cache::CreateCacheBackend(net::DISK_CACHE,
+ net::CACHE_BACKEND_BLOCKFILE,
+ cache_path_,
+ 0,
+ false,
+ cache_thread.message_loop_proxy().get(),
+ NULL,
+ &backend,
+ cb.callback());
+ ASSERT_NE(net::OK, cb.GetResult(rv));
+
+ ASSERT_FALSE(backend);
+}
+
+void DiskCacheBackendTest::BackendSetSize() {
+ const int cache_size = 0x10000; // 64 kB
+ SetMaxSize(cache_size);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
+ memset(buffer->data(), 0, cache_size);
+ EXPECT_EQ(cache_size / 10,
+ WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
+ << "normal file";
+
+ EXPECT_EQ(net::ERR_FAILED,
+ WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
+ << "file size above the limit";
+
+ // By doubling the total size, we make this file cacheable.
+ SetMaxSize(cache_size * 2);
+ EXPECT_EQ(cache_size / 5,
+ WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
+
+ // Let's fill up the cache!.
+ SetMaxSize(cache_size * 10);
+ EXPECT_EQ(cache_size * 3 / 4,
+ WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
+ entry->Close();
+ FlushQueueForTest();
+
+ SetMaxSize(cache_size);
+
+ // The cache is 95% full.
+
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10,
+ WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
+
+ disk_cache::Entry* entry2;
+ ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
+ EXPECT_EQ(cache_size / 10,
+ WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
+ entry2->Close(); // This will trigger the cache trim.
+
+ EXPECT_NE(net::OK, OpenEntry(first, &entry2));
+
+ FlushQueueForTest(); // Make sure that we are done trimming the cache.
+ FlushQueueForTest(); // We may have posted two tasks to evict stuff.
+
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, SetSize) {
+ BackendSetSize();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
+ SetNewEviction();
+ BackendSetSize();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
+ SetMemoryOnlyMode();
+ BackendSetSize();
+}
+
+void DiskCacheBackendTest::BackendLoad() {
+ InitCache();
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ disk_cache::Entry* entries[100];
+ for (int i = 0; i < 100; i++) {
+ std::string key = GenerateKey(true);
+ ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
+ }
+ EXPECT_EQ(100, cache_->GetEntryCount());
+
+ for (int i = 0; i < 100; i++) {
+ int source1 = rand() % 100;
+ int source2 = rand() % 100;
+ disk_cache::Entry* temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < 100; i++) {
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
+ EXPECT_TRUE(entry == entries[i]);
+ entry->Close();
+ entries[i]->Doom();
+ entries[i]->Close();
+ }
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, Load) {
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
+ SetNewEviction();
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
+ SetMaxSize(0x100000);
+ SetMemoryOnlyMode();
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheLoad) {
+ SetCacheType(net::APP_CACHE);
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
+ SetCacheType(net::SHADER_CACHE);
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+// Tests the chaining of an entry to the current head.
+void DiskCacheBackendTest::BackendChain() {
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ InitCache();
+
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Chain) {
+ BackendChain();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionChain) {
+ SetNewEviction();
+ BackendChain();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheChain) {
+ SetCacheType(net::APP_CACHE);
+ BackendChain();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendChain();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
+ SetNewEviction();
+ InitCache();
+
+ disk_cache::Entry* entry;
+ for (int i = 0; i < 100; i++) {
+ std::string name(base::StringPrintf("Key %d", i));
+ ASSERT_EQ(net::OK, CreateEntry(name, &entry));
+ entry->Close();
+ if (i < 90) {
+ // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
+ ASSERT_EQ(net::OK, OpenEntry(name, &entry));
+ entry->Close();
+ }
+ }
+
+ // The first eviction must come from list 1 (10% limit), the second must come
+ // from list 0.
+ TrimForTest(false);
+ EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
+ TrimForTest(false);
+ EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
+
+ // Double check that we still have the list tails.
+ ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
+ entry->Close();
+}
+
+// Before looking for invalid entries, let's check a valid entry.
+void DiskCacheBackendTest::BackendValidEntry() {
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
+ entry->Close();
+ SimulateCrash();
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ memset(buffer2->data(), 0, kSize);
+ EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
+ entry->Close();
+ EXPECT_STREQ(buffer1->data(), buffer2->data());
+}
+
+TEST_F(DiskCacheBackendTest, ValidEntry) {
+ BackendValidEntry();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
+ SetNewEviction();
+ BackendValidEntry();
+}
+
+// The same logic of the previous test (ValidEntry), but this time force the
+// entry to be invalid, simulating a crash in the middle.
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntry() {
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ memset(buffer->data(), 0, kSize);
+ base::strlcpy(buffer->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+ SimulateCrash();
+
+ EXPECT_NE(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+#if !defined(LEAK_SANITIZER)
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntry) {
+ BackendInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
+ SetNewEviction();
+ BackendInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
+ SetCacheType(net::APP_CACHE);
+ BackendInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendInvalidEntry();
+}
+
+// Almost the same test, but this time crash the cache after reading an entry.
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntryRead() {
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ memset(buffer->data(), 0, kSize);
+ base::strlcpy(buffer->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
+
+ SimulateCrash();
+
+ if (type_ == net::APP_CACHE) {
+ // Reading an entry and crashing should not make it dirty.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ entry->Close();
+ } else {
+ EXPECT_NE(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ }
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
+ BackendInvalidEntryRead();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
+ SetNewEviction();
+ BackendInvalidEntryRead();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
+ SetCacheType(net::APP_CACHE);
+ BackendInvalidEntryRead();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendInvalidEntryRead();
+}
+
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ InitCache();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 100;
+ disk_cache::Entry* entries[kNumEntries];
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ disk_cache::Entry* temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ std::string keys[kNumEntries];
+ for (int i = 0; i < kNumEntries; i++) {
+ keys[i] = entries[i]->GetKey();
+ if (i < kNumEntries / 2)
+ entries[i]->Close();
+ }
+
+ SimulateCrash();
+
+ for (int i = kNumEntries / 2; i < kNumEntries; i++) {
+ disk_cache::Entry* entry;
+ EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
+ entry->Close();
+ }
+
+ EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
+ BackendInvalidEntryWithLoad();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
+ SetNewEviction();
+ BackendInvalidEntryWithLoad();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
+ SetCacheType(net::APP_CACHE);
+ BackendInvalidEntryWithLoad();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendInvalidEntryWithLoad();
+}
+
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendTrimInvalidEntry() {
+ const int kSize = 0x3000; // 12 kB
+ SetMaxSize(kSize * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ memset(buffer->data(), 0, kSize);
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+
+ // Simulate a crash.
+ SimulateCrash();
+
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ SetMaxSize(kSize);
+ entry->Close(); // Trim the cache.
+ FlushQueueForTest();
+
+ // If we evicted the entry in less than 20mS, we have one entry in the cache;
+ // if it took more than that, we posted a task and we'll delete the second
+ // entry too.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ // This may be not thread-safe in general, but for now it's OK so add some
+ // ThreadSanitizer annotations to ignore data races on cache_.
+ // See http://crbug.com/55970
+ ANNOTATE_IGNORE_READS_BEGIN();
+ EXPECT_GE(1, cache_->GetEntryCount());
+ ANNOTATE_IGNORE_READS_END();
+
+ EXPECT_NE(net::OK, OpenEntry(first, &entry));
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
+ BackendTrimInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
+ SetNewEviction();
+ BackendTrimInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
+ SetMask(0xf); // 16-entry table.
+
+ const int kSize = 0x3000; // 12 kB
+ SetMaxSize(kSize * 40);
+ InitCache();
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ memset(buffer->data(), 0, kSize);
+ disk_cache::Entry* entry;
+
+ // Writing 32 entries to this cache chains most of them.
+ for (int i = 0; i < 32; i++) {
+ std::string key(base::StringPrintf("some key %d", i));
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ // Note that we are not closing the entries.
+ }
+
+ // Simulate a crash.
+ SimulateCrash();
+
+ ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+
+ FlushQueueForTest();
+ EXPECT_EQ(33, cache_->GetEntryCount());
+ SetMaxSize(kSize);
+
+ // For the new eviction code, all corrupt entries are on the second list so
+ // they are not going away that easy.
+ if (new_eviction_) {
+ EXPECT_EQ(net::OK, DoomAllEntries());
+ }
+
+ entry->Close(); // Trim the cache.
+ FlushQueueForTest();
+
+ // We may abort the eviction before cleaning up everything.
+ base::MessageLoop::current()->RunUntilIdle();
+ FlushQueueForTest();
+ // If it's not clear enough: we may still have eviction tasks running at this
+ // time, so the number of entries is changing while we read it.
+ ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ EXPECT_GE(30, cache_->GetEntryCount());
+ ANNOTATE_IGNORE_READS_AND_WRITES_END();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
+ BackendTrimInvalidEntry2();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
+ SetNewEviction();
+ BackendTrimInvalidEntry2();
+}
+#endif // !defined(LEAK_SANITIZER)
+
+void DiskCacheBackendTest::BackendEnumerations() {
+ InitCache();
+ Time initial = Time::Now();
+
+ const int kNumEntries = 100;
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Close();
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+ Time final = Time::Now();
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ int count = 0;
+ Time last_modified[kNumEntries];
+ Time last_used[kNumEntries];
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ ASSERT_TRUE(NULL != entry);
+ if (count < kNumEntries) {
+ last_modified[count] = entry->GetLastModified();
+ last_used[count] = entry->GetLastUsed();
+ EXPECT_TRUE(initial <= last_modified[count]);
+ EXPECT_TRUE(final >= last_modified[count]);
+ }
+
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(kNumEntries, count);
+
+ iter = NULL;
+ count = 0;
+ // The previous enumeration should not have changed the timestamps.
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ ASSERT_TRUE(NULL != entry);
+ if (count < kNumEntries) {
+ EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
+ EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
+ }
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(kNumEntries, count);
+}
+
+TEST_F(DiskCacheBackendTest, Enumerations) {
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
+ SetNewEviction();
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
+ SetMemoryOnlyMode();
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
+ SetCacheType(net::APP_CACHE);
+ BackendEnumerations();
+}
+
+// Verifies enumerations while entries are open.
+void DiskCacheBackendTest::BackendEnumerations2() {
+ InitCache();
+ const std::string first("first");
+ const std::string second("second");
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
+ entry1->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
+ entry2->Close();
+ FlushQueueForTest();
+
+ // Make sure that the timestamp is not the same.
+ AddDelay();
+ ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
+ EXPECT_EQ(entry2->GetKey(), second);
+
+ // Two entries and the iterator pointing at "first".
+ entry1->Close();
+ entry2->Close();
+
+ // The iterator should still be valid, so we should not crash.
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
+ EXPECT_EQ(entry2->GetKey(), first);
+ entry2->Close();
+ cache_->EndEnumeration(&iter);
+
+ // Modify the oldest entry and get the newest element.
+ ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
+ EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
+ if (type_ == net::APP_CACHE) {
+ // The list is not updated.
+ EXPECT_EQ(entry2->GetKey(), second);
+ } else {
+ EXPECT_EQ(entry2->GetKey(), first);
+ }
+
+ entry1->Close();
+ entry2->Close();
+ cache_->EndEnumeration(&iter);
+}
+
+TEST_F(DiskCacheBackendTest, Enumerations2) {
+ BackendEnumerations2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
+ SetNewEviction();
+ BackendEnumerations2();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
+ SetMemoryOnlyMode();
+ BackendEnumerations2();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
+ SetCacheType(net::APP_CACHE);
+ BackendEnumerations2();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendEnumerations2();
+}
+
+// Verify that ReadData calls do not update the LRU cache
+// when using the SHADER_CACHE type.
+TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
+ SetCacheType(net::SHADER_CACHE);
+ InitCache();
+ const std::string first("first");
+ const std::string second("second");
+ disk_cache::Entry *entry1, *entry2;
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
+
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
+ entry2->Close();
+
+ FlushQueueForTest();
+
+ // Make sure that the timestamp is not the same.
+ AddDelay();
+
+ // Read from the last item in the LRU.
+ EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
+ entry1->Close();
+
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
+ EXPECT_EQ(entry2->GetKey(), second);
+ entry2->Close();
+ cache_->EndEnumeration(&iter);
+}
+
+#if !defined(LEAK_SANITIZER)
+// Verify handling of invalid entries while doing enumerations.
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry *entry, *entry1, *entry2;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
+ entry1->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
+ EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
+
+ std::string key2("Another key");
+ ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
+ entry2->Close();
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ SimulateCrash();
+
+ void* iter = NULL;
+ int count = 0;
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ ASSERT_TRUE(NULL != entry);
+ EXPECT_EQ(key2, entry->GetKey());
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(1, count);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
+ BackendInvalidEntryEnumeration();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
+ SetNewEviction();
+ BackendInvalidEntryEnumeration();
+}
+#endif // !defined(LEAK_SANITIZER)
+
+// Tests that if for some reason entries are modified close to existing cache
+// iterators, we don't generate fatal errors or reset the cache.
+void DiskCacheBackendTest::BackendFixEnumerators() {
+ InitCache();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 10;
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Close();
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+
+ disk_cache::Entry *entry1, *entry2;
+ void* iter1 = NULL;
+ void* iter2 = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ // Let's go to the middle of the list.
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ if (entry1)
+ entry1->Close();
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+ entry2->Close();
+ }
+
+ // Messing up with entry1 will modify entry2->next.
+ entry1->Doom();
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+
+ // The link entry2->entry1 should be broken.
+ EXPECT_NE(entry2->GetKey(), entry1->GetKey());
+ entry1->Close();
+ entry2->Close();
+
+ // And the second iterator should keep working.
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+ entry2->Close();
+
+ cache_->EndEnumeration(&iter1);
+ cache_->EndEnumeration(&iter2);
+}
+
+TEST_F(DiskCacheBackendTest, FixEnumerators) {
+ BackendFixEnumerators();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
+ SetNewEviction();
+ BackendFixEnumerators();
+}
+
+void DiskCacheBackendTest::BackendDoomRecent() {
+ InitCache();
+
+ disk_cache::Entry *entry;
+ ASSERT_EQ(net::OK, CreateEntry("first", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("second", &entry));
+ entry->Close();
+ FlushQueueForTest();
+
+ AddDelay();
+ Time middle = Time::Now();
+
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
+ entry->Close();
+ FlushQueueForTest();
+
+ AddDelay();
+ Time final = Time::Now();
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_EQ(net::OK, DoomEntriesSince(final));
+ ASSERT_EQ(4, cache_->GetEntryCount());
+
+ EXPECT_EQ(net::OK, DoomEntriesSince(middle));
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ ASSERT_EQ(net::OK, OpenEntry("second", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, DoomRecent) {
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
+ SetNewEviction();
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
+ SetMemoryOnlyMode();
+ BackendDoomRecent();
+}
+
+void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
+ base::Time* doomed_end) {
+ InitCache();
+
+ const int kSize = 50;
+ // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
+ const int kOffset = 10 + 1024 * 1024;
+
+ disk_cache::Entry* entry0 = NULL;
+ disk_cache::Entry* entry1 = NULL;
+ disk_cache::Entry* entry2 = NULL;
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+
+ ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
+ ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
+ ASSERT_EQ(kSize,
+ WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
+ entry0->Close();
+
+ FlushQueueForTest();
+ AddDelay();
+ if (doomed_start)
+ *doomed_start = base::Time::Now();
+
+ // Order in rankings list:
+ // first_part1, first_part2, second_part1, second_part2
+ ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
+ ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
+ ASSERT_EQ(kSize,
+ WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
+ entry1->Close();
+
+ ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
+ ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
+ ASSERT_EQ(kSize,
+ WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
+ entry2->Close();
+
+ FlushQueueForTest();
+ AddDelay();
+ if (doomed_end)
+ *doomed_end = base::Time::Now();
+
+ // Order in rankings list:
+ // third_part1, fourth_part1, third_part2, fourth_part2
+ disk_cache::Entry* entry3 = NULL;
+ disk_cache::Entry* entry4 = NULL;
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
+ ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
+ ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
+ ASSERT_EQ(kSize,
+ WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
+ ASSERT_EQ(kSize,
+ WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
+ entry3->Close();
+ entry4->Close();
+
+ FlushQueueForTest();
+ AddDelay();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
+ SetMemoryOnlyMode();
+ base::Time start;
+ InitSparseCache(&start, NULL);
+ DoomEntriesSince(start);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
+ base::Time start;
+ InitSparseCache(&start, NULL);
+ DoomEntriesSince(start);
+ // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
+ // MemBackendImpl does not. Thats why expected value differs here from
+ // MemoryOnlyDoomEntriesSinceSparse.
+ EXPECT_EQ(3, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
+ SetMemoryOnlyMode();
+ InitSparseCache(NULL, NULL);
+ EXPECT_EQ(net::OK, DoomAllEntries());
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomAllSparse) {
+ InitSparseCache(NULL, NULL);
+ EXPECT_EQ(net::OK, DoomAllEntries());
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+void DiskCacheBackendTest::BackendDoomBetween() {
+ InitCache();
+
+ disk_cache::Entry *entry;
+ ASSERT_EQ(net::OK, CreateEntry("first", &entry));
+ entry->Close();
+ FlushQueueForTest();
+
+ AddDelay();
+ Time middle_start = Time::Now();
+
+ ASSERT_EQ(net::OK, CreateEntry("second", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry));
+ entry->Close();
+ FlushQueueForTest();
+
+ AddDelay();
+ Time middle_end = Time::Now();
+
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
+ entry->Close();
+ FlushQueueForTest();
+
+ AddDelay();
+ Time final = Time::Now();
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
+ entry->Close();
+
+ EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
+ ASSERT_EQ(1, cache_->GetEntryCount());
+
+ ASSERT_EQ(net::OK, OpenEntry("first", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, DoomBetween) {
+ BackendDoomBetween();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
+ SetNewEviction();
+ BackendDoomBetween();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
+ SetMemoryOnlyMode();
+ BackendDoomBetween();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
+ SetMemoryOnlyMode();
+ base::Time start, end;
+ InitSparseCache(&start, &end);
+ DoomEntriesBetween(start, end);
+ EXPECT_EQ(3, cache_->GetEntryCount());
+
+ start = end;
+ end = base::Time::Now();
+ DoomEntriesBetween(start, end);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
+ base::Time start, end;
+ InitSparseCache(&start, &end);
+ DoomEntriesBetween(start, end);
+ EXPECT_EQ(9, cache_->GetEntryCount());
+
+ start = end;
+ end = base::Time::Now();
+ DoomEntriesBetween(start, end);
+ EXPECT_EQ(3, cache_->GetEntryCount());
+}
+
+void DiskCacheBackendTest::BackendTransaction(const std::string& name,
+ int num_entries, bool load) {
+ success_ = false;
+ ASSERT_TRUE(CopyTestCache(name));
+ DisableFirstCleanup();
+
+ uint32 mask;
+ if (load) {
+ mask = 0xf;
+ SetMaxSize(0x100000);
+ } else {
+ // Clear the settings from the previous run.
+ mask = 0;
+ SetMaxSize(0);
+ }
+ SetMask(mask);
+
+ InitCache();
+ ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
+
+ std::string key("the first key");
+ disk_cache::Entry* entry1;
+ ASSERT_NE(net::OK, OpenEntry(key, &entry1));
+
+ int actual = cache_->GetEntryCount();
+ if (num_entries != actual) {
+ ASSERT_TRUE(load);
+ // If there is a heavy load, inserting an entry will make another entry
+ // dirty (on the hash bucket) so two entries are removed.
+ ASSERT_EQ(num_entries - 1, actual);
+ }
+
+ cache_.reset();
+ cache_impl_ = NULL;
+
+ ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
+ success_ = true;
+}
+
+void DiskCacheBackendTest::BackendRecoverInsert() {
+ // Tests with an empty cache.
+ BackendTransaction("insert_empty1", 0, false);
+ ASSERT_TRUE(success_) << "insert_empty1";
+ BackendTransaction("insert_empty2", 0, false);
+ ASSERT_TRUE(success_) << "insert_empty2";
+ BackendTransaction("insert_empty3", 0, false);
+ ASSERT_TRUE(success_) << "insert_empty3";
+
+ // Tests with one entry on the cache.
+ BackendTransaction("insert_one1", 1, false);
+ ASSERT_TRUE(success_) << "insert_one1";
+ BackendTransaction("insert_one2", 1, false);
+ ASSERT_TRUE(success_) << "insert_one2";
+ BackendTransaction("insert_one3", 1, false);
+ ASSERT_TRUE(success_) << "insert_one3";
+
+ // Tests with one hundred entries on the cache, tiny index.
+ BackendTransaction("insert_load1", 100, true);
+ ASSERT_TRUE(success_) << "insert_load1";
+ BackendTransaction("insert_load2", 100, true);
+ ASSERT_TRUE(success_) << "insert_load2";
+}
+
+TEST_F(DiskCacheBackendTest, RecoverInsert) {
+ BackendRecoverInsert();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
+ SetNewEviction();
+ BackendRecoverInsert();
+}
+
+void DiskCacheBackendTest::BackendRecoverRemove() {
+ // Removing the only element.
+ BackendTransaction("remove_one1", 0, false);
+ ASSERT_TRUE(success_) << "remove_one1";
+ BackendTransaction("remove_one2", 0, false);
+ ASSERT_TRUE(success_) << "remove_one2";
+ BackendTransaction("remove_one3", 0, false);
+ ASSERT_TRUE(success_) << "remove_one3";
+
+ // Removing the head.
+ BackendTransaction("remove_head1", 1, false);
+ ASSERT_TRUE(success_) << "remove_head1";
+ BackendTransaction("remove_head2", 1, false);
+ ASSERT_TRUE(success_) << "remove_head2";
+ BackendTransaction("remove_head3", 1, false);
+ ASSERT_TRUE(success_) << "remove_head3";
+
+ // Removing the tail.
+ BackendTransaction("remove_tail1", 1, false);
+ ASSERT_TRUE(success_) << "remove_tail1";
+ BackendTransaction("remove_tail2", 1, false);
+ ASSERT_TRUE(success_) << "remove_tail2";
+ BackendTransaction("remove_tail3", 1, false);
+ ASSERT_TRUE(success_) << "remove_tail3";
+
+ // Removing with one hundred entries on the cache, tiny index.
+ BackendTransaction("remove_load1", 100, true);
+ ASSERT_TRUE(success_) << "remove_load1";
+ BackendTransaction("remove_load2", 100, true);
+ ASSERT_TRUE(success_) << "remove_load2";
+ BackendTransaction("remove_load3", 100, true);
+ ASSERT_TRUE(success_) << "remove_load3";
+
+ // This case cannot be reverted.
+ BackendTransaction("remove_one4", 0, false);
+ ASSERT_TRUE(success_) << "remove_one4";
+ BackendTransaction("remove_head4", 1, false);
+ ASSERT_TRUE(success_) << "remove_head4";
+}
+
+TEST_F(DiskCacheBackendTest, RecoverRemove) {
+ BackendRecoverRemove();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
+ SetNewEviction();
+ BackendRecoverRemove();
+}
+
+void DiskCacheBackendTest::BackendRecoverWithEviction() {
+ success_ = false;
+ ASSERT_TRUE(CopyTestCache("insert_load1"));
+ DisableFirstCleanup();
+
+ SetMask(0xf);
+ SetMaxSize(0x1000);
+
+ // We should not crash here.
+ InitCache();
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
+ BackendRecoverWithEviction();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
+ SetNewEviction();
+ BackendRecoverWithEviction();
+}
+
+// Tests that the |BackendImpl| fails to start with the wrong cache version.
+TEST_F(DiskCacheTest, WrongVersion) {
+ ASSERT_TRUE(CopyTestCache("wrong_version"));
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ net::TestCompletionCallback cb;
+
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ cache_path_, cache_thread.message_loop_proxy().get(), NULL));
+ int rv = cache->Init(cb.callback());
+ ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
+}
+
+class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
+ public:
+ virtual ~BadEntropyProvider() {}
+
+ virtual double GetEntropyForTrial(const std::string& trial_name,
+ uint32 randomization_seed) const OVERRIDE {
+ return 0.5;
+ }
+};
+
+// Tests that the disk cache successfully joins the control group, dropping the
+// existing cache in favour of a new empty cache.
+TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ scoped_ptr<disk_cache::BackendImpl> cache =
+ CreateExistingEntryCache(cache_thread, cache_path_);
+ ASSERT_TRUE(cache.get());
+ cache.reset();
+
+ // Instantiate the SimpleCacheTrial, forcing this run into the
+ // ExperimentControl group.
+ base::FieldTrialList field_trial_list(new BadEntropyProvider());
+ base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
+ "ExperimentControl");
+ net::TestCompletionCallback cb;
+ scoped_ptr<disk_cache::Backend> base_cache;
+ int rv =
+ disk_cache::CreateCacheBackend(net::DISK_CACHE,
+ net::CACHE_BACKEND_BLOCKFILE,
+ cache_path_,
+ 0,
+ true,
+ cache_thread.message_loop_proxy().get(),
+ NULL,
+ &base_cache,
+ cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ EXPECT_EQ(0, base_cache->GetEntryCount());
+}
+
+// Tests that the disk cache can restart in the control group preserving
+// existing entries.
+TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
+ // Instantiate the SimpleCacheTrial, forcing this run into the
+ // ExperimentControl group.
+ base::FieldTrialList field_trial_list(new BadEntropyProvider());
+ base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
+ "ExperimentControl");
+
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ scoped_ptr<disk_cache::BackendImpl> cache =
+ CreateExistingEntryCache(cache_thread, cache_path_);
+ ASSERT_TRUE(cache.get());
+
+ net::TestCompletionCallback cb;
+
+ const int kRestartCount = 5;
+ for (int i=0; i < kRestartCount; ++i) {
+ cache.reset(new disk_cache::BackendImpl(
+ cache_path_, cache_thread.message_loop_proxy(), NULL));
+ int rv = cache->Init(cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ EXPECT_EQ(1, cache->GetEntryCount());
+
+ disk_cache::Entry* entry = NULL;
+ rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+ EXPECT_TRUE(entry);
+ entry->Close();
+ }
+}
+
+// Tests that the disk cache can leave the control group preserving existing
+// entries.
+TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ {
+ // Instantiate the SimpleCacheTrial, forcing this run into the
+ // ExperimentControl group.
+ base::FieldTrialList field_trial_list(new BadEntropyProvider());
+ base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
+ "ExperimentControl");
+
+ scoped_ptr<disk_cache::BackendImpl> cache =
+ CreateExistingEntryCache(cache_thread, cache_path_);
+ ASSERT_TRUE(cache.get());
+ }
+
+ // Instantiate the SimpleCacheTrial, forcing this run into the
+ // ExperimentNo group.
+ base::FieldTrialList field_trial_list(new BadEntropyProvider());
+ base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
+ net::TestCompletionCallback cb;
+
+ const int kRestartCount = 5;
+ for (int i = 0; i < kRestartCount; ++i) {
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ cache_path_, cache_thread.message_loop_proxy(), NULL));
+ int rv = cache->Init(cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ EXPECT_EQ(1, cache->GetEntryCount());
+
+ disk_cache::Entry* entry = NULL;
+ rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+ EXPECT_TRUE(entry);
+ entry->Close();
+ }
+}
+
+// Tests that the cache is properly restarted on recovery error.
+TEST_F(DiskCacheBackendTest, DeleteOld) {
+ ASSERT_TRUE(CopyTestCache("wrong_version"));
+ SetNewEviction();
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ net::TestCompletionCallback cb;
+ bool prev = base::ThreadRestrictions::SetIOAllowed(false);
+ base::FilePath path(cache_path_);
+ int rv =
+ disk_cache::CreateCacheBackend(net::DISK_CACHE,
+ net::CACHE_BACKEND_BLOCKFILE,
+ path,
+ 0,
+ true,
+ cache_thread.message_loop_proxy().get(),
+ NULL,
+ &cache_,
+ cb.callback());
+ path.clear(); // Make sure path was captured by the previous call.
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ base::ThreadRestrictions::SetIOAllowed(prev);
+ cache_.reset();
+ EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
+}
+
+// We want to be able to deal with messed up entries on disk.
+void DiskCacheBackendTest::BackendInvalidEntry2() {
+ ASSERT_TRUE(CopyTestCache("bad_entry"));
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
+ EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
+ entry1->Close();
+
+ // CheckCacheIntegrity will fail at this point.
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry2) {
+ BackendInvalidEntry2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
+ SetNewEviction();
+ BackendInvalidEntry2();
+}
+
+// Tests that we don't crash or hang when enumerating this cache.
+void DiskCacheBackendTest::BackendInvalidEntry3() {
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ entry->Close();
+ }
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry3) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry3"));
+ BackendInvalidEntry3();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry4"));
+ SetNewEviction();
+ BackendInvalidEntry3();
+ DisableIntegrityCheck();
+}
+
+// Test that we handle a dirty entry on the LRU list, already replaced with
+// the same key, and with hash collisions.
+TEST_F(DiskCacheBackendTest, InvalidEntry4) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry3"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ TrimForTest(false);
+}
+
+// Test that we handle a dirty entry on the deleted list, already replaced with
+// the same key, and with hash collisions.
+TEST_F(DiskCacheBackendTest, InvalidEntry5) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry4"));
+ SetNewEviction();
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ TrimDeletedListForTest(false);
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry6) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry5"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ // There is a dirty entry (but marked as clean) at the end, pointing to a
+ // deleted entry through the hash collision list. We should not re-insert the
+ // deleted entry into the index table.
+
+ TrimForTest(false);
+ // The cache should be clean (as detected by CheckCacheIntegrity).
+}
+
+// Tests that we don't hang when there is a loop on the hash collision list.
+// The test cache could be a result of bug 69135.
+TEST_F(DiskCacheBackendTest, BadNextEntry1) {
+ ASSERT_TRUE(CopyTestCache("list_loop2"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ // The second entry points at itselft, and the first entry is not accessible
+ // though the index, but it is at the head of the LRU.
+
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
+ entry->Close();
+
+ TrimForTest(false);
+ TrimForTest(false);
+ ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
+ entry->Close();
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+// Tests that we don't hang when there is a loop on the hash collision list.
+// The test cache could be a result of bug 69135.
+TEST_F(DiskCacheBackendTest, BadNextEntry2) {
+ ASSERT_TRUE(CopyTestCache("list_loop3"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ // There is a wide loop of 5 entries.
+
+ disk_cache::Entry* entry;
+ ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings3"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+
+ // The second entry is dirty, but removing it should not corrupt the list.
+ disk_cache::Entry* entry;
+ ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
+
+ // This should not delete the cache.
+ entry->Doom();
+ FlushQueueForTest();
+ entry->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
+ entry->Close();
+}
+
+// Tests handling of corrupt entries by keeping the rankings node around, with
+// a fatal failure.
+void DiskCacheBackendTest::BackendInvalidEntry7() {
+ const int kSize = 0x3000; // 12 kB.
+ SetMaxSize(kSize * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+
+ // Corrupt this entry.
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+
+ entry_impl->rankings()->Data()->next = 0;
+ entry_impl->rankings()->Store();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ // This should detect the bad entry.
+ EXPECT_NE(net::OK, OpenEntry(second, &entry));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+
+ // We should delete the cache. The list still has a corrupt node.
+ void* iter = NULL;
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry7) {
+ BackendInvalidEntry7();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
+ SetNewEviction();
+ BackendInvalidEntry7();
+}
+
+// Tests handling of corrupt entries by keeping the rankings node around, with
+// a non fatal failure.
+void DiskCacheBackendTest::BackendInvalidEntry8() {
+ const int kSize = 0x3000; // 12 kB
+ SetMaxSize(kSize * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+
+ // Corrupt this entry.
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+
+ entry_impl->rankings()->Data()->contents = 0;
+ entry_impl->rankings()->Store();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ // This should detect the bad entry.
+ EXPECT_NE(net::OK, OpenEntry(second, &entry));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+
+ // We should not delete the cache.
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry8) {
+ BackendInvalidEntry8();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
+ SetNewEviction();
+ BackendInvalidEntry8();
+}
+
+// Tests handling of corrupt entries detected by enumerations. Note that these
+// tests (xx9 to xx11) are basically just going though slightly different
+// codepaths so they are tighlty coupled with the code, but that is better than
+// not testing error handling code.
+void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
+ const int kSize = 0x3000; // 12 kB.
+ SetMaxSize(kSize * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+
+ // Corrupt this entry.
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+
+ entry_impl->entry()->Data()->state = 0xbad;
+ entry_impl->entry()->Store();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ if (eviction) {
+ TrimForTest(false);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ TrimForTest(false);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ } else {
+ // We should detect the problem through the list, but we should not delete
+ // the entry, just fail the iteration.
+ void* iter = NULL;
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+
+ // Now a full iteration will work, and return one entry.
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+
+ // This should detect what's left of the bad entry.
+ EXPECT_NE(net::OK, OpenEntry(second, &entry));
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ }
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry9) {
+ BackendInvalidEntry9(false);
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
+ SetNewEviction();
+ BackendInvalidEntry9(false);
+}
+
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
+ BackendInvalidEntry9(true);
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
+ SetNewEviction();
+ BackendInvalidEntry9(true);
+}
+
+// Tests handling of corrupt entries detected by enumerations.
+void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
+ const int kSize = 0x3000; // 12 kB.
+ SetMaxSize(kSize * 10);
+ SetNewEviction();
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(first, &entry));
+ EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+
+ // Corrupt this entry.
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+
+ entry_impl->entry()->Data()->state = 0xbad;
+ entry_impl->entry()->Store();
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry));
+ entry->Close();
+ EXPECT_EQ(3, cache_->GetEntryCount());
+
+ // We have:
+ // List 0: third -> second (bad).
+ // List 1: first.
+
+ if (eviction) {
+ // Detection order: second -> first -> third.
+ TrimForTest(false);
+ EXPECT_EQ(3, cache_->GetEntryCount());
+ TrimForTest(false);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ TrimForTest(false);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ } else {
+ // Detection order: third -> second -> first.
+ // We should detect the problem through the list, but we should not delete
+ // the entry.
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ EXPECT_EQ(first, entry->GetKey());
+ entry->Close();
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+ }
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry10) {
+ BackendInvalidEntry10(false);
+}
+
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
+ BackendInvalidEntry10(true);
+}
+
+// Tests handling of corrupt entries detected by enumerations.
+void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
+ const int kSize = 0x3000; // 12 kB.
+ SetMaxSize(kSize * 10);
+ SetNewEviction();
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(first, &entry));
+ EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(second, &entry));
+ EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
+
+ // Corrupt this entry.
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+
+ entry_impl->entry()->Data()->state = 0xbad;
+ entry_impl->entry()->Store();
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry));
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(3, cache_->GetEntryCount());
+
+ // We have:
+ // List 0: third.
+ // List 1: second (bad) -> first.
+
+ if (eviction) {
+ // Detection order: third -> first -> second.
+ TrimForTest(false);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ TrimForTest(false);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ TrimForTest(false);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ } else {
+ // Detection order: third -> second.
+ // We should detect the problem through the list, but we should not delete
+ // the entry, just fail the iteration.
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+
+ // Now a full iteration will work, and return two entries.
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+ }
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry11) {
+ BackendInvalidEntry11(false);
+}
+
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
+ BackendInvalidEntry11(true);
+}
+
+// Tests handling of corrupt entries in the middle of a long eviction run.
+void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
+ const int kSize = 0x3000; // 12 kB
+ SetMaxSize(kSize * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry));
+
+ // Corrupt this entry.
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+
+ entry_impl->entry()->Data()->state = 0xbad;
+ entry_impl->entry()->Store();
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry));
+ entry->Close();
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
+ TrimForTest(true);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ entry->Close();
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
+ BackendTrimInvalidEntry12();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
+ SetNewEviction();
+ BackendTrimInvalidEntry12();
+}
+
+// We want to be able to deal with messed up entries on disk.
+void DiskCacheBackendTest::BackendInvalidRankings2() {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry *entry1, *entry2;
+ EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
+ ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
+ entry2->Close();
+
+ // CheckCacheIntegrity will fail at this point.
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidRankings2) {
+ BackendInvalidRankings2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
+ SetNewEviction();
+ BackendInvalidRankings2();
+}
+
+// If the LRU is corrupt, we delete the cache.
+void DiskCacheBackendTest::BackendInvalidRankings() {
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
+ FlushQueueForTest(); // Allow the restart to finish.
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ InitCache();
+ BackendInvalidRankings();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ BackendInvalidRankings();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendInvalidRankings();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendInvalidRankings();
+}
+
+// If the LRU is corrupt and we have open entries, we disable the cache.
+void DiskCacheBackendTest::BackendDisable() {
+ disk_cache::Entry *entry1, *entry2;
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
+
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
+
+ entry1->Close();
+ FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
+ FlushQueueForTest(); // This one actually allows that task to complete.
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ InitCache();
+ BackendDisable();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ BackendDisable();
+}
+
+TEST_F(DiskCacheBackendTest, DisableFailure) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable();
+}
+
+// This is another type of corruption on the LRU; disable the cache.
+void DiskCacheBackendTest::BackendDisable2() {
+ EXPECT_EQ(8, cache_->GetEntryCount());
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ int count = 0;
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ ASSERT_TRUE(NULL != entry);
+ entry->Close();
+ count++;
+ ASSERT_LT(count, 9);
+ };
+
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess2) {
+ ASSERT_TRUE(CopyTestCache("list_loop"));
+ DisableFirstCleanup();
+ InitCache();
+ BackendDisable2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
+ ASSERT_TRUE(CopyTestCache("list_loop"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ BackendDisable2();
+}
+
+TEST_F(DiskCacheBackendTest, DisableFailure2) {
+ ASSERT_TRUE(CopyTestCache("list_loop"));
+ DisableFirstCleanup();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
+ ASSERT_TRUE(CopyTestCache("list_loop"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable2();
+}
+
+// If the index size changes when we disable the cache, we should not crash.
+void DiskCacheBackendTest::BackendDisable3() {
+ disk_cache::Entry *entry1, *entry2;
+ void* iter = NULL;
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
+ entry1->Close();
+
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
+ FlushQueueForTest();
+
+ ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
+ entry2->Close();
+
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess3) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ InitCache();
+ BackendDisable3();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ SetNewEviction();
+ InitCache();
+ BackendDisable3();
+}
+
+// If we disable the cache, already open entries should work as far as possible.
+void DiskCacheBackendTest::BackendDisable4() {
+ disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
+ void* iter = NULL;
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
+
+ char key2[2000];
+ char key3[20000];
+ CacheTestFillBuffer(key2, sizeof(key2), true);
+ CacheTestFillBuffer(key3, sizeof(key3), true);
+ key2[sizeof(key2) - 1] = '\0';
+ key3[sizeof(key3) - 1] = '\0';
+ ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
+ ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
+
+ const int kBufSize = 20000;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
+ memset(buf->data(), 0, kBufSize);
+ EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
+ EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
+
+ // This line should disable the cache but not delete it.
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+
+ EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
+
+ EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
+ EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
+ EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
+
+ EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
+ EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
+ EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
+
+ std::string key = entry2->GetKey();
+ EXPECT_EQ(sizeof(key2) - 1, key.size());
+ key = entry3->GetKey();
+ EXPECT_EQ(sizeof(key3) - 1, key.size());
+
+ entry1->Close();
+ entry2->Close();
+ entry3->Close();
+ FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
+ FlushQueueForTest(); // This one actually allows that task to complete.
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess4) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ InitCache();
+ BackendDisable4();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ InitCache();
+ BackendDisable4();
+}
+
+TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
+ MessageLoopHelper helper;
+
+ ASSERT_TRUE(CleanupCacheDir());
+ scoped_ptr<disk_cache::BackendImpl> cache;
+ cache.reset(new disk_cache::BackendImpl(
+ cache_path_, base::MessageLoopProxy::current().get(), NULL));
+ ASSERT_TRUE(NULL != cache.get());
+ cache->SetUnitTestMode();
+ ASSERT_EQ(net::OK, cache->SyncInit());
+
+ // Wait for a callback that never comes... about 2 secs :). The message loop
+ // has to run to allow invocation of the usage timer.
+ helper.WaitUntilCacheIoFinished(1);
+}
+
+TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
+ InitCache();
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry));
+ entry->Close();
+ FlushQueueForTest();
+
+ disk_cache::StatsItems stats;
+ cache_->GetStats(&stats);
+ EXPECT_FALSE(stats.empty());
+
+ disk_cache::StatsItems::value_type hits("Create hit", "0x1");
+ EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
+
+ cache_.reset();
+
+ // Now open the cache and verify that the stats are still there.
+ DisableFirstCleanup();
+ InitCache();
+ EXPECT_EQ(1, cache_->GetEntryCount());
+
+ stats.clear();
+ cache_->GetStats(&stats);
+ EXPECT_FALSE(stats.empty());
+
+ EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
+}
+
+void DiskCacheBackendTest::BackendDoomAll() {
+ InitCache();
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
+ ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
+ entry1->Close();
+ entry2->Close();
+
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_EQ(net::OK, DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ // We should stop posting tasks at some point (if we post any).
+ base::MessageLoop::current()->RunUntilIdle();
+
+ disk_cache::Entry *entry3, *entry4;
+ EXPECT_NE(net::OK, OpenEntry("third", &entry3));
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
+
+ EXPECT_EQ(net::OK, DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ entry1->Close();
+ entry2->Close();
+ entry3->Doom(); // The entry should be already doomed, but this must work.
+ entry3->Close();
+ entry4->Close();
+
+ // Now try with all references released.
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
+ entry1->Close();
+ entry2->Close();
+
+ ASSERT_EQ(2, cache_->GetEntryCount());
+ EXPECT_EQ(net::OK, DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ EXPECT_EQ(net::OK, DoomAllEntries());
+}
+
+TEST_F(DiskCacheBackendTest, DoomAll) {
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
+ SetNewEviction();
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
+ SetMemoryOnlyMode();
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
+ SetCacheType(net::APP_CACHE);
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
+ SetCacheType(net::SHADER_CACHE);
+ BackendDoomAll();
+}
+
+// If the index size changes when we doom the cache, we should not crash.
+void DiskCacheBackendTest::BackendDoomAll2() {
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ EXPECT_EQ(net::OK, DoomAllEntries());
+
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
+ entry->Close();
+
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomAll2) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ InitCache();
+ BackendDoomAll2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
+ ASSERT_TRUE(CopyTestCache("bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ SetNewEviction();
+ InitCache();
+ BackendDoomAll2();
+}
+
+// We should be able to create the same entry on multiple simultaneous instances
+// of the cache.
+TEST_F(DiskCacheTest, MultipleInstances) {
+ base::ScopedTempDir store1, store2;
+ ASSERT_TRUE(store1.CreateUniqueTempDir());
+ ASSERT_TRUE(store2.CreateUniqueTempDir());
+
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ net::TestCompletionCallback cb;
+
+ const int kNumberOfCaches = 2;
+ scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
+
+ int rv =
+ disk_cache::CreateCacheBackend(net::DISK_CACHE,
+ net::CACHE_BACKEND_DEFAULT,
+ store1.path(),
+ 0,
+ false,
+ cache_thread.message_loop_proxy().get(),
+ NULL,
+ &cache[0],
+ cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
+ net::CACHE_BACKEND_DEFAULT,
+ store2.path(),
+ 0,
+ false,
+ cache_thread.message_loop_proxy().get(),
+ NULL,
+ &cache[1],
+ cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
+
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ for (int i = 0; i < kNumberOfCaches; i++) {
+ rv = cache[i]->CreateEntry(key, &entry, cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ entry->Close();
+ }
+}
+
+// Test the six regions of the curve that determines the max cache size.
+TEST_F(DiskCacheTest, AutomaticMaxSize) {
+ const int kDefaultSize = 80 * 1024 * 1024;
+ int64 large_size = kDefaultSize;
+ int64 largest_size = kint32max;
+
+ // Region 1: expected = available * 0.8
+ EXPECT_EQ((kDefaultSize - 1) * 8 / 10,
+ disk_cache::PreferedCacheSize(large_size - 1));
+ EXPECT_EQ(kDefaultSize * 8 / 10,
+ disk_cache::PreferedCacheSize(large_size));
+ EXPECT_EQ(kDefaultSize - 1,
+ disk_cache::PreferedCacheSize(large_size * 10 / 8 - 1));
+
+ // Region 2: expected = default_size
+ EXPECT_EQ(kDefaultSize,
+ disk_cache::PreferedCacheSize(large_size * 10 / 8));
+ EXPECT_EQ(kDefaultSize,
+ disk_cache::PreferedCacheSize(large_size * 10 - 1));
+
+ // Region 3: expected = available * 0.1
+ EXPECT_EQ(kDefaultSize,
+ disk_cache::PreferedCacheSize(large_size * 10));
+ EXPECT_EQ((kDefaultSize * 25 - 1) / 10,
+ disk_cache::PreferedCacheSize(large_size * 25 - 1));
+
+ // Region 4: expected = default_size * 2.5
+ EXPECT_EQ(kDefaultSize * 25 / 10,
+ disk_cache::PreferedCacheSize(large_size * 25));
+ EXPECT_EQ(kDefaultSize * 25 / 10,
+ disk_cache::PreferedCacheSize(large_size * 100 - 1));
+ EXPECT_EQ(kDefaultSize * 25 / 10,
+ disk_cache::PreferedCacheSize(large_size * 100));
+ EXPECT_EQ(kDefaultSize * 25 / 10,
+ disk_cache::PreferedCacheSize(large_size * 250 - 1));
+
+ // Region 5: expected = available * 0.1
+ EXPECT_EQ(kDefaultSize * 25 / 10,
+ disk_cache::PreferedCacheSize(large_size * 250));
+ EXPECT_EQ(kint32max - 1,
+ disk_cache::PreferedCacheSize(largest_size * 100 - 1));
+
+ // Region 6: expected = kint32max
+ EXPECT_EQ(kint32max,
+ disk_cache::PreferedCacheSize(largest_size * 100));
+ EXPECT_EQ(kint32max,
+ disk_cache::PreferedCacheSize(largest_size * 10000));
+}
+
+// Tests that we can "migrate" a running instance from one experiment group to
+// another.
+TEST_F(DiskCacheBackendTest, Histograms) {
+ InitCache();
+ disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
+
+ for (int i = 1; i < 3; i++) {
+ CACHE_UMA(HOURS, "FillupTime", i, 28);
+ }
+}
+
+// Make sure that we keep the total memory used by the internal buffers under
+// control.
+TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 200;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, true);
+
+ for (int i = 0; i < 10; i++) {
+ SCOPED_TRACE(i);
+ // Allocate 2MB for this entry.
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
+ EXPECT_EQ(kSize,
+ WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
+ EXPECT_EQ(kSize,
+ WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
+
+ // Delete one of the buffers and truncate the other.
+ EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
+ EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
+
+ // Delete the second buffer, writing 10 bytes to disk.
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ }
+
+ entry->Close();
+ EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
+}
+
+// This test assumes at least 150MB of system memory.
+TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
+ InitCache();
+
+ const int kOneMB = 1024 * 1024;
+ EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
+ EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
+
+ EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
+ EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
+
+ EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
+ EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
+
+ cache_impl_->BufferDeleted(kOneMB);
+ EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
+
+ // Check the upper limit.
+ EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
+
+ for (int i = 0; i < 30; i++)
+ cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
+
+ EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
+}
+
+// Tests that sharing of external files works and we are able to delete the
+// files when we need to.
+TEST_F(DiskCacheBackendTest, FileSharing) {
+ InitCache();
+
+ disk_cache::Addr address(0x80000001);
+ ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
+ base::FilePath name = cache_impl_->GetFileName(address);
+
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
+ file->Init(name);
+
+#if defined(OS_WIN)
+ DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ DWORD access = GENERIC_READ | GENERIC_WRITE;
+ base::win::ScopedHandle file2(CreateFile(
+ name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
+ EXPECT_FALSE(file2.IsValid());
+
+ sharing |= FILE_SHARE_DELETE;
+ file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
+ OPEN_EXISTING, 0, NULL));
+ EXPECT_TRUE(file2.IsValid());
+#endif
+
+ EXPECT_TRUE(base::DeleteFile(name, false));
+
+ // We should be able to use the file.
+ const int kSize = 200;
+ char buffer1[kSize];
+ char buffer2[kSize];
+ memset(buffer1, 't', kSize);
+ memset(buffer2, 0, kSize);
+ EXPECT_TRUE(file->Write(buffer1, kSize, 0));
+ EXPECT_TRUE(file->Read(buffer2, kSize, 0));
+ EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
+
+ EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
+}
+
+TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
+ InitCache();
+
+ disk_cache::Entry* entry;
+
+ for (int i = 0; i < 2; ++i) {
+ std::string key = base::StringPrintf("key%d", i);
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Close();
+ }
+
+ // Ping the oldest entry.
+ cache_->OnExternalCacheHit("key0");
+
+ TrimForTest(false);
+
+ // Make sure the older key remains.
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
+ SetCacheType(net::SHADER_CACHE);
+ InitCache();
+
+ disk_cache::Entry* entry;
+
+ for (int i = 0; i < 2; ++i) {
+ std::string key = base::StringPrintf("key%d", i);
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Close();
+ }
+
+ // Ping the oldest entry.
+ cache_->OnExternalCacheHit("key0");
+
+ TrimForTest(false);
+
+ // Make sure the older key remains.
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
+ entry->Close();
+}
+
+void DiskCacheBackendTest::TracingBackendBasics() {
+ InitCache();
+ cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
+ cache_impl_ = NULL;
+ EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
+ if (!simple_cache_mode_) {
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ }
+
+ net::TestCompletionCallback cb;
+ disk_cache::Entry* entry = NULL;
+ EXPECT_NE(net::OK, OpenEntry("key", &entry));
+ EXPECT_TRUE(NULL == entry);
+
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry));
+ EXPECT_TRUE(NULL != entry);
+
+ disk_cache::Entry* same_entry = NULL;
+ ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
+ EXPECT_TRUE(NULL != same_entry);
+
+ if (!simple_cache_mode_) {
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ }
+ entry->Close();
+ entry = NULL;
+ same_entry->Close();
+ same_entry = NULL;
+}
+
+TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
+ TracingBackendBasics();
+}
+
+// The simple cache backend isn't intended to work on windows, which has very
+// different file system guarantees from Windows.
+#if !defined(OS_WIN)
+
+TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
+ SetSimpleCacheMode();
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
+ SetSimpleCacheMode();
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
+ SetSimpleCacheMode();
+ SetCacheType(net::APP_CACHE);
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
+ SetSimpleCacheMode();
+ BackendSetSize();
+}
+
+// MacOS has a default open file limit of 256 files, which is incompatible with
+// this simple cache test.
+#if defined(OS_MACOSX)
+#define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
+#else
+#define SIMPLE_MAYBE_MACOS(TestName) TestName
+#endif
+
+TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
+ SetMaxSize(0x100000);
+ SetSimpleCacheMode();
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
+ SetSimpleCacheMode();
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleDoomBetween) {
+ SetSimpleCacheMode();
+ BackendDoomBetween();
+}
+
+// See http://crbug.com/237450.
+TEST_F(DiskCacheBackendTest, FLAKY_SimpleCacheDoomAll) {
+ SetSimpleCacheMode();
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, FLAKY_SimpleCacheAppCacheOnlyDoomAll) {
+ SetCacheType(net::APP_CACHE);
+ SetSimpleCacheMode();
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
+ SetSimpleCacheMode();
+ TracingBackendBasics();
+ // TODO(pasko): implement integrity checking on the Simple Backend.
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char* key = "the first key";
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ ASSERT_TRUE(entry != NULL);
+ entry->Close();
+ entry = NULL;
+
+ // To make sure the file creation completed we need to call open again so that
+ // we block until it actually created the files.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ASSERT_TRUE(entry != NULL);
+ entry->Close();
+ entry = NULL;
+
+ // Delete one of the files in the entry.
+ base::FilePath to_delete_file = cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 0));
+ EXPECT_TRUE(base::PathExists(to_delete_file));
+ EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
+
+ // Failing to open the entry should delete the rest of these files.
+ ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
+
+ // Confirm the rest of the files are gone.
+ for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
+ base::FilePath
+ should_be_gone_file(cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, i)));
+ EXPECT_FALSE(base::PathExists(should_be_gone_file));
+ }
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char* key = "the first key";
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ disk_cache::Entry* null = NULL;
+ ASSERT_NE(null, entry);
+ entry->Close();
+ entry = NULL;
+
+ // To make sure the file creation completed we need to call open again so that
+ // we block until it actually created the files.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ASSERT_NE(null, entry);
+ entry->Close();
+ entry = NULL;
+
+ // Write an invalid header on stream 1.
+ base::FilePath entry_file1_path = cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 1));
+
+ disk_cache::SimpleFileHeader header;
+ header.initial_magic_number = GG_UINT64_C(0xbadf00d);
+ EXPECT_EQ(
+ implicit_cast<int>(sizeof(header)),
+ file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
+ sizeof(header)));
+ ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
+}
+
+// Tests that the Simple Cache Backend fails to initialize with non-matching
+// file structure on disk.
+TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
+ // Create a cache structure with the |BackendImpl|.
+ InitCache();
+ disk_cache::Entry* entry;
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry));
+ ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
+ entry->Close();
+ cache_.reset();
+
+ // Check that the |SimpleBackendImpl| does not favor this structure.
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ disk_cache::SimpleBackendImpl* simple_cache =
+ new disk_cache::SimpleBackendImpl(cache_path_,
+ 0,
+ net::DISK_CACHE,
+ cache_thread.message_loop_proxy().get(),
+ NULL);
+ net::TestCompletionCallback cb;
+ int rv = simple_cache->Init(cb.callback());
+ EXPECT_NE(net::OK, cb.GetResult(rv));
+ delete simple_cache;
+ DisableIntegrityCheck();
+}
+
+// Tests that the |BackendImpl| refuses to initialize on top of the files
+// generated by the Simple Cache Backend.
+TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
+ // Create a cache structure with the |SimpleBackendImpl|.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* entry;
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry));
+ ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
+ entry->Close();
+ cache_.reset();
+
+ // Check that the |BackendImpl| does not favor this structure.
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
+ cache_path_, base::MessageLoopProxy::current().get(), NULL);
+ cache->SetUnitTestMode();
+ net::TestCompletionCallback cb;
+ int rv = cache->Init(cb.callback());
+ EXPECT_NE(net::OK, cb.GetResult(rv));
+ delete cache;
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
+ SetSimpleCacheMode();
+ BackendFixEnumerators();
+}
+
+// Creates entries based on random keys. Stores these keys in |key_pool|.
+bool DiskCacheBackendTest::CreateSetOfRandomEntries(
+ std::set<std::string>* key_pool) {
+ const int kNumEntries = 10;
+
+ for (int i = 0; i < kNumEntries; ++i) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ if (CreateEntry(key, &entry) != net::OK)
+ return false;
+ key_pool->insert(key);
+ entry->Close();
+ }
+ return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
+}
+
+// Performs iteration over the backend and checks that the keys of entries
+// opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
+// will be opened, if it is positive. Otherwise, iteration will continue until
+// OpenNextEntry stops returning net::OK.
+bool DiskCacheBackendTest::EnumerateAndMatchKeys(
+ int max_to_open,
+ void** iter,
+ std::set<std::string>* keys_to_match,
+ size_t* count) {
+ disk_cache::Entry* entry;
+
+ while (OpenNextEntry(iter, &entry) == net::OK) {
+ if (!entry)
+ return false;
+ EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
+ entry->Close();
+ ++(*count);
+ if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
+ break;
+ };
+
+ return true;
+}
+
+// Tests basic functionality of the SimpleBackend implementation of the
+// enumeration API.
+TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
+ SetSimpleCacheMode();
+ InitCache();
+ std::set<std::string> key_pool;
+ ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
+
+ // Check that enumeration returns all entries.
+ std::set<std::string> keys_to_match(key_pool);
+ void* iter = NULL;
+ size_t count = 0;
+ ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
+ cache_->EndEnumeration(&iter);
+ EXPECT_EQ(key_pool.size(), count);
+ EXPECT_TRUE(keys_to_match.empty());
+
+ // Check that opening entries does not affect enumeration.
+ keys_to_match = key_pool;
+ iter = NULL;
+ count = 0;
+ disk_cache::Entry* entry_opened_before;
+ ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
+ ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
+ &iter,
+ &keys_to_match,
+ &count));
+
+ disk_cache::Entry* entry_opened_middle;
+ ASSERT_EQ(net::OK,
+ OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
+ ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
+ cache_->EndEnumeration(&iter);
+ entry_opened_before->Close();
+ entry_opened_middle->Close();
+
+ EXPECT_EQ(key_pool.size(), count);
+ EXPECT_TRUE(keys_to_match.empty());
+}
+
+// Tests that the enumerations are not affected by dooming an entry in the
+// middle.
+TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
+ SetSimpleCacheMode();
+ InitCache();
+ std::set<std::string> key_pool;
+ ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
+
+ // Check that enumeration returns all entries but the doomed one.
+ std::set<std::string> keys_to_match(key_pool);
+ void* iter = NULL;
+ size_t count = 0;
+ ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
+ &iter,
+ &keys_to_match,
+ &count));
+
+ std::string key_to_delete = *(keys_to_match.begin());
+ DoomEntry(key_to_delete);
+ keys_to_match.erase(key_to_delete);
+ key_pool.erase(key_to_delete);
+ ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
+ cache_->EndEnumeration(&iter);
+
+ EXPECT_EQ(key_pool.size(), count);
+ EXPECT_TRUE(keys_to_match.empty());
+}
+
+// Tests that enumerations are not affected by corrupt files.
+TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
+ SetSimpleCacheMode();
+ InitCache();
+ std::set<std::string> key_pool;
+ ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
+
+ // Create a corrupt entry. The write/read sequence ensures that the entry will
+ // have been created before corrupting the platform files, in the case of
+ // optimistic operations.
+ const std::string key = "the key";
+ disk_cache::Entry* corrupted_entry;
+
+ ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
+ ASSERT_TRUE(corrupted_entry);
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ ASSERT_EQ(kSize,
+ WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
+ ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
+ corrupted_entry->Close();
+
+ EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
+ key, cache_path_));
+ EXPECT_EQ(key_pool.size() + 1,
+ implicit_cast<size_t>(cache_->GetEntryCount()));
+
+ // Check that enumeration returns all entries but the corrupt one.
+ std::set<std::string> keys_to_match(key_pool);
+ void* iter = NULL;
+ size_t count = 0;
+ ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
+ cache_->EndEnumeration(&iter);
+
+ EXPECT_EQ(key_pool.size(), count);
+ EXPECT_TRUE(keys_to_match.empty());
+}
+
+#endif // !defined(OS_WIN)
diff --git a/chromium/net/disk_cache/bitmap.cc b/chromium/net/disk_cache/bitmap.cc
new file mode 100644
index 00000000000..6d469dfe3dc
--- /dev/null
+++ b/chromium/net/disk_cache/bitmap.cc
@@ -0,0 +1,311 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/bitmap.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+
+namespace {
+
+// Returns the number of trailing zeros.
+int FindLSBSetNonZero(uint32 word) {
+ // Get the LSB, put it on the exponent of a 32 bit float and remove the
+ // mantisa and the bias. This code requires IEEE 32 bit float compliance.
+ float f = static_cast<float>(word & -static_cast<int>(word));
+
+ // We use a union to go around strict-aliasing complains.
+ union {
+ float ieee_float;
+ uint32 as_uint;
+ } x;
+
+ x.ieee_float = f;
+ return (x.as_uint >> 23) - 0x7f;
+}
+
+// Returns the index of the first bit set to |value| from |word|. This code
+// assumes that we'll be able to find that bit.
+int FindLSBNonEmpty(uint32 word, bool value) {
+ // If we are looking for 0, negate |word| and look for 1.
+ if (!value)
+ word = ~word;
+
+ return FindLSBSetNonZero(word);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+Bitmap::Bitmap(int num_bits, bool clear_bits)
+ : num_bits_(num_bits),
+ array_size_(RequiredArraySize(num_bits)),
+ alloc_(true) {
+ map_ = new uint32[array_size_];
+
+ // Initialize all of the bits.
+ if (clear_bits)
+ Clear();
+}
+
+Bitmap::Bitmap(uint32* map, int num_bits, int num_words)
+ : map_(map),
+ num_bits_(num_bits),
+ // If size is larger than necessary, trim because array_size_ is used
+ // as a bound by various methods.
+ array_size_(std::min(RequiredArraySize(num_bits), num_words)),
+ alloc_(false) {
+}
+
+Bitmap::~Bitmap() {
+ if (alloc_)
+ delete [] map_;
+}
+
+void Bitmap::Resize(int num_bits, bool clear_bits) {
+ DCHECK(alloc_ || !map_);
+ const int old_maxsize = num_bits_;
+ const int old_array_size = array_size_;
+ array_size_ = RequiredArraySize(num_bits);
+
+ if (array_size_ != old_array_size) {
+ uint32* new_map = new uint32[array_size_];
+ // Always clear the unused bits in the last word.
+ new_map[array_size_ - 1] = 0;
+ memcpy(new_map, map_,
+ sizeof(*map_) * std::min(array_size_, old_array_size));
+ if (alloc_)
+ delete[] map_; // No need to check for NULL.
+ map_ = new_map;
+ alloc_ = true;
+ }
+
+ num_bits_ = num_bits;
+ if (old_maxsize < num_bits_ && clear_bits) {
+ SetRange(old_maxsize, num_bits_, false);
+ }
+}
+
+void Bitmap::Set(int index, bool value) {
+ DCHECK_LT(index, num_bits_);
+ DCHECK_GE(index, 0);
+ const int i = index & (kIntBits - 1);
+ const int j = index / kIntBits;
+ if (value)
+ map_[j] |= (1 << i);
+ else
+ map_[j] &= ~(1 << i);
+}
+
+bool Bitmap::Get(int index) const {
+ DCHECK_LT(index, num_bits_);
+ DCHECK_GE(index, 0);
+ const int i = index & (kIntBits-1);
+ const int j = index / kIntBits;
+ return ((map_[j] & (1 << i)) != 0);
+}
+
+void Bitmap::Toggle(int index) {
+ DCHECK_LT(index, num_bits_);
+ DCHECK_GE(index, 0);
+ const int i = index & (kIntBits - 1);
+ const int j = index / kIntBits;
+ map_[j] ^= (1 << i);
+}
+
+void Bitmap::SetMapElement(int array_index, uint32 value) {
+ DCHECK_LT(array_index, array_size_);
+ DCHECK_GE(array_index, 0);
+ map_[array_index] = value;
+}
+
+uint32 Bitmap::GetMapElement(int array_index) const {
+ DCHECK_LT(array_index, array_size_);
+ DCHECK_GE(array_index, 0);
+ return map_[array_index];
+}
+
+void Bitmap::SetMap(const uint32* map, int size) {
+ memcpy(map_, map, std::min(size, array_size_) * sizeof(*map_));
+}
+
+void Bitmap::SetRange(int begin, int end, bool value) {
+ DCHECK_LE(begin, end);
+ int start_offset = begin & (kIntBits - 1);
+ if (start_offset) {
+ // Set the bits in the first word.
+ int len = std::min(end - begin, kIntBits - start_offset);
+ SetWordBits(begin, len, value);
+ begin += len;
+ }
+
+ if (begin == end)
+ return;
+
+ // Now set the bits in the last word.
+ int end_offset = end & (kIntBits - 1);
+ end -= end_offset;
+ SetWordBits(end, end_offset, value);
+
+ // Set all the words in the middle.
+ memset(map_ + (begin / kIntBits), (value ? 0xFF : 0x00),
+ ((end / kIntBits) - (begin / kIntBits)) * sizeof(*map_));
+}
+
+// Return true if any bit between begin inclusive and end exclusive
+// is set. 0 <= begin <= end <= bits() is required.
+bool Bitmap::TestRange(int begin, int end, bool value) const {
+ DCHECK_LT(begin, num_bits_);
+ DCHECK_LE(end, num_bits_);
+ DCHECK_LE(begin, end);
+ DCHECK_GE(begin, 0);
+ DCHECK_GE(end, 0);
+
+ // Return false immediately if the range is empty.
+ if (begin >= end || end <= 0)
+ return false;
+
+ // Calculate the indices of the words containing the first and last bits,
+ // along with the positions of the bits within those words.
+ int word = begin / kIntBits;
+ int offset = begin & (kIntBits - 1);
+ int last_word = (end - 1) / kIntBits;
+ int last_offset = (end - 1) & (kIntBits - 1);
+
+ // If we are looking for zeros, negate the data from the map.
+ uint32 this_word = map_[word];
+ if (!value)
+ this_word = ~this_word;
+
+ // If the range spans multiple words, discard the extraneous bits of the
+ // first word by shifting to the right, and then test the remaining bits.
+ if (word < last_word) {
+ if (this_word >> offset)
+ return true;
+ offset = 0;
+
+ word++;
+ // Test each of the "middle" words that lies completely within the range.
+ while (word < last_word) {
+ this_word = map_[word++];
+ if (!value)
+ this_word = ~this_word;
+ if (this_word)
+ return true;
+ }
+ }
+
+ // Test the portion of the last word that lies within the range. (This logic
+ // also handles the case where the entire range lies within a single word.)
+ const uint32 mask = ((2 << (last_offset - offset)) - 1) << offset;
+
+ this_word = map_[last_word];
+ if (!value)
+ this_word = ~this_word;
+
+ return (this_word & mask) != 0;
+}
+
+bool Bitmap::FindNextBit(int* index, int limit, bool value) const {
+ DCHECK_LT(*index, num_bits_);
+ DCHECK_LE(limit, num_bits_);
+ DCHECK_LE(*index, limit);
+ DCHECK_GE(*index, 0);
+ DCHECK_GE(limit, 0);
+
+ const int bit_index = *index;
+ if (bit_index >= limit || limit <= 0)
+ return false;
+
+ // From now on limit != 0, since if it was we would have returned false.
+ int word_index = bit_index >> kLogIntBits;
+ uint32 one_word = map_[word_index];
+
+ // Simple optimization where we can immediately return true if the first
+ // bit is set. This helps for cases where many bits are set, and doesn't
+ // hurt too much if not.
+ if (Get(bit_index) == value)
+ return true;
+
+ const int first_bit_offset = bit_index & (kIntBits - 1);
+
+ // First word is special - we need to mask off leading bits.
+ uint32 mask = 0xFFFFFFFF << first_bit_offset;
+ if (value) {
+ one_word &= mask;
+ } else {
+ one_word |= ~mask;
+ }
+
+ uint32 empty_value = value ? 0 : 0xFFFFFFFF;
+
+ // Loop through all but the last word. Note that 'limit' is one
+ // past the last bit we want to check, and we don't want to read
+ // past the end of "words". E.g. if num_bits_ == 32 only words[0] is
+ // valid, so we want to avoid reading words[1] when limit == 32.
+ const int last_word_index = (limit - 1) >> kLogIntBits;
+ while (word_index < last_word_index) {
+ if (one_word != empty_value) {
+ *index = (word_index << kLogIntBits) + FindLSBNonEmpty(one_word, value);
+ return true;
+ }
+ one_word = map_[++word_index];
+ }
+
+ // Last word is special - we may need to mask off trailing bits. Note that
+ // 'limit' is one past the last bit we want to check, and if limit is a
+ // multiple of 32 we want to check all bits in this word.
+ const int last_bit_offset = (limit - 1) & (kIntBits - 1);
+ mask = 0xFFFFFFFE << last_bit_offset;
+ if (value) {
+ one_word &= ~mask;
+ } else {
+ one_word |= mask;
+ }
+ if (one_word != empty_value) {
+ *index = (word_index << kLogIntBits) + FindLSBNonEmpty(one_word, value);
+ return true;
+ }
+ return false;
+}
+
+int Bitmap::FindBits(int* index, int limit, bool value) const {
+ DCHECK_LT(*index, num_bits_);
+ DCHECK_LE(limit, num_bits_);
+ DCHECK_LE(*index, limit);
+ DCHECK_GE(*index, 0);
+ DCHECK_GE(limit, 0);
+
+ if (!FindNextBit(index, limit, value))
+ return false;
+
+ // Now see how many bits have the same value.
+ int end = *index;
+ if (!FindNextBit(&end, limit, !value))
+ return limit - *index;
+
+ return end - *index;
+}
+
+void Bitmap::SetWordBits(int start, int len, bool value) {
+ DCHECK_LT(len, kIntBits);
+ DCHECK_GE(len, 0);
+ if (!len)
+ return;
+
+ int word = start / kIntBits;
+ int offset = start % kIntBits;
+
+ uint32 to_add = 0xffffffff << len;
+ to_add = (~to_add) << offset;
+ if (value) {
+ map_[word] |= to_add;
+ } else {
+ map_[word] &= ~to_add;
+ }
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/bitmap.h b/chromium/net/disk_cache/bitmap.h
new file mode 100644
index 00000000000..81c434c39b4
--- /dev/null
+++ b/chromium/net/disk_cache/bitmap.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_BITMAP_H_
+#define NET_DISK_CACHE_BITMAP_H_
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+
+// This class provides support for simple maps of bits.
+class NET_EXPORT_PRIVATE Bitmap {
+ public:
+ Bitmap() : map_(NULL), num_bits_(0), array_size_(0), alloc_(false) {}
+
+ // This constructor will allocate on a uint32 boundary. If |clear_bits| is
+ // false, the bitmap bits will not be initialized.
+ Bitmap(int num_bits, bool clear_bits);
+
+ // Constructs a Bitmap with the actual storage provided by the caller. |map|
+ // has to be valid until this object destruction. |num_bits| is the number of
+ // bits in the bitmap, and |num_words| is the size of |map| in 32-bit words.
+ Bitmap(uint32* map, int num_bits, int num_words);
+
+ ~Bitmap();
+
+ // Resizes the bitmap.
+ // If |num_bits| < Size(), the extra bits will be discarded.
+ // If |num_bits| > Size(), the extra bits will be filled with zeros if
+ // |clear_bits| is true.
+ // This object cannot be using memory provided during construction.
+ void Resize(int num_bits, bool clear_bits);
+
+ // Returns the number of bits in the bitmap.
+ int Size() const { return num_bits_; }
+
+ // Returns the number of 32-bit words in the bitmap.
+ int ArraySize() const { return array_size_; }
+
+ // Sets all the bits to true or false.
+ void SetAll(bool value) {
+ memset(map_, (value ? 0xFF : 0x00), array_size_ * sizeof(*map_));
+ }
+
+ // Clears all bits in the bitmap
+ void Clear() { SetAll(false); }
+
+ // Sets the value, gets the value or toggles the value of a given bit.
+ void Set(int index, bool value);
+ bool Get(int index) const;
+ void Toggle(int index);
+
+ // Directly sets an element of the internal map. Requires |array_index| <
+ // ArraySize();
+ void SetMapElement(int array_index, uint32 value);
+
+ // Gets an entry of the internal map. Requires array_index <
+ // ArraySize()
+ uint32 GetMapElement(int array_index) const;
+
+ // Directly sets the whole internal map. |size| is the number of 32-bit words
+ // to set from |map|. If |size| > array_size(), it ignores the end of |map|.
+ void SetMap(const uint32* map, int size);
+
+ // Gets a pointer to the internal map.
+ const uint32* GetMap() const { return map_; }
+
+ // Sets a range of bits to |value|.
+ void SetRange(int begin, int end, bool value);
+
+ // Returns true if any bit between begin inclusive and end exclusive is set.
+ // 0 <= |begin| <= |end| <= Size() is required.
+ bool TestRange(int begin, int end, bool value) const;
+
+ // Scans bits starting at bit *|index|, looking for a bit set to |value|. If
+ // it finds that bit before reaching bit index |limit|, sets *|index| to the
+ // bit index and returns true. Otherwise returns false.
+ // Requires |limit| <= Size().
+ //
+ // Note that to use these methods in a loop you must increment the index
+ // after each use, as in:
+ //
+ // for (int index = 0 ; map.FindNextBit(&index, limit, value) ; ++index) {
+ // DoSomethingWith(index);
+ // }
+ bool FindNextBit(int* index, int limit, bool value) const;
+
+ // Finds the first offset >= *|index| and < |limit| that has its bit set.
+ // See FindNextBit() for more info.
+ bool FindNextSetBitBeforeLimit(int* index, int limit) const {
+ return FindNextBit(index, limit, true);
+ }
+
+ // Finds the first offset >= *|index| that has its bit set.
+ // See FindNextBit() for more info.
+ bool FindNextSetBit(int *index) const {
+ return FindNextSetBitBeforeLimit(index, num_bits_);
+ }
+
+ // Scans bits starting at bit *|index|, looking for a bit set to |value|. If
+ // it finds that bit before reaching bit index |limit|, sets *|index| to the
+ // bit index and then counts the number of consecutive bits set to |value|
+ // (before reaching |limit|), and returns that count. If no bit is found
+ // returns 0. Requires |limit| <= Size().
+ int FindBits(int* index, int limit, bool value) const;
+
+ // Returns number of allocated words required for a bitmap of size |num_bits|.
+ static int RequiredArraySize(int num_bits) {
+ // Force at least one allocated word.
+ if (num_bits <= kIntBits)
+ return 1;
+
+ return (num_bits + kIntBits - 1) >> kLogIntBits;
+ }
+
+ private:
+ static const int kIntBits = sizeof(uint32) * 8;
+ static const int kLogIntBits = 5; // 2^5 == 32 bits per word.
+
+ // Sets |len| bits from |start| to |value|. All the bits to be set should be
+ // stored in the same word, and len < kIntBits.
+ void SetWordBits(int start, int len, bool value);
+
+ uint32* map_; // The bitmap.
+ int num_bits_; // The upper bound of the bitmap.
+ int array_size_; // The physical size (in uint32s) of the bitmap.
+ bool alloc_; // Whether or not we allocated the memory.
+
+ DISALLOW_COPY_AND_ASSIGN(Bitmap);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BITMAP_H_
diff --git a/chromium/net/disk_cache/bitmap_unittest.cc b/chromium/net/disk_cache/bitmap_unittest.cc
new file mode 100644
index 00000000000..d80ea742682
--- /dev/null
+++ b/chromium/net/disk_cache/bitmap_unittest.cc
@@ -0,0 +1,293 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/bitmap.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(BitmapTest, OverAllocate) {
+ // Test that we don't over allocate on boundaries.
+ disk_cache::Bitmap map32(32, false);
+ EXPECT_EQ(1, map32.ArraySize());
+
+ disk_cache::Bitmap map64(64, false);
+ EXPECT_EQ(2, map64.ArraySize());
+}
+
+TEST(BitmapTest, DefaultConstructor) {
+ // Verify that the default constructor doesn't allocate a bitmap.
+ disk_cache::Bitmap map;
+ EXPECT_EQ(0, map.Size());
+ EXPECT_EQ(0, map.ArraySize());
+ EXPECT_TRUE(NULL == map.GetMap());
+}
+
+TEST(BitmapTest, Basics) {
+ disk_cache::Bitmap bitmap(80, true);
+ const uint32 kValue = 0x74f10060;
+
+ // Test proper allocation size.
+ EXPECT_EQ(80, bitmap.Size());
+ EXPECT_EQ(3, bitmap.ArraySize());
+
+ // Test Set/GetMapElement.
+ EXPECT_EQ(0U, bitmap.GetMapElement(1));
+ bitmap.SetMapElement(1, kValue);
+ EXPECT_EQ(kValue, bitmap.GetMapElement(1));
+
+ // Test Set/Get.
+ EXPECT_TRUE(bitmap.Get(48));
+ EXPECT_FALSE(bitmap.Get(49));
+ EXPECT_FALSE(bitmap.Get(50));
+ bitmap.Set(49, true);
+ EXPECT_TRUE(bitmap.Get(48));
+ EXPECT_TRUE(bitmap.Get(49));
+ EXPECT_FALSE(bitmap.Get(50));
+ bitmap.Set(49, false);
+ EXPECT_TRUE(bitmap.Get(48));
+ EXPECT_FALSE(bitmap.Get(49));
+ EXPECT_FALSE(bitmap.Get(50));
+
+ for (int i = 0; i < 80; i++)
+ bitmap.Set(i, (i % 7) == 0);
+ for (int i = 0; i < 80; i++)
+ EXPECT_EQ(bitmap.Get(i), (i % 7) == 0);
+}
+
+TEST(BitmapTest, Toggle) {
+ static const int kSize = 100;
+ disk_cache::Bitmap map(kSize, true);
+ for (int i = 0; i < 100; i += 3)
+ map.Toggle(i);
+ for (int i = 0; i < 100; i += 9)
+ map.Toggle(i);
+ for (int i = 0; i < 100; ++i)
+ EXPECT_EQ((i % 3 == 0) && (i % 9 != 0), map.Get(i));
+}
+
+TEST(BitmapTest, Resize) {
+ const int kSize1 = 50;
+ const int kSize2 = 100;
+ const int kSize3 = 30;
+ disk_cache::Bitmap map(kSize1, true);
+ map.Resize(kSize1, true);
+ EXPECT_EQ(kSize1, map.Size());
+ EXPECT_FALSE(map.Get(0));
+ EXPECT_FALSE(map.Get(kSize1 - 1));
+
+ map.Resize(kSize2, true);
+ EXPECT_FALSE(map.Get(kSize1 - 1));
+ EXPECT_FALSE(map.Get(kSize1));
+ EXPECT_FALSE(map.Get(kSize2 - 1));
+ EXPECT_EQ(kSize2, map.Size());
+
+ map.Resize(kSize3, true);
+ EXPECT_FALSE(map.Get(kSize3 - 1));
+ EXPECT_EQ(kSize3, map.Size());
+}
+
+TEST(BitmapTest, Map) {
+ // Tests Set/GetMap and the constructor that takes an array.
+ const int kMapSize = 80;
+ char local_map[kMapSize];
+ for (int i = 0; i < kMapSize; i++)
+ local_map[i] = static_cast<char>(i);
+
+ disk_cache::Bitmap bitmap(kMapSize * 8, false);
+ bitmap.SetMap(reinterpret_cast<uint32*>(local_map), kMapSize / 4);
+ for (int i = 0; i < kMapSize; i++) {
+ if (i % 2)
+ EXPECT_TRUE(bitmap.Get(i * 8));
+ else
+ EXPECT_FALSE(bitmap.Get(i * 8));
+ }
+
+ EXPECT_EQ(0, memcmp(local_map, bitmap.GetMap(), kMapSize));
+
+ // Now let's create a bitmap that shares local_map as storage.
+ disk_cache::Bitmap bitmap2(reinterpret_cast<uint32*>(local_map),
+ kMapSize * 8, kMapSize / 4);
+ EXPECT_EQ(0, memcmp(local_map, bitmap2.GetMap(), kMapSize));
+
+ local_map[kMapSize / 2] = 'a';
+ EXPECT_EQ(0, memcmp(local_map, bitmap2.GetMap(), kMapSize));
+ EXPECT_NE(0, memcmp(local_map, bitmap.GetMap(), kMapSize));
+}
+
+TEST(BitmapTest, SetAll) {
+ // Tests SetAll and Clear.
+ const int kMapSize = 80;
+ char ones[kMapSize];
+ char zeros[kMapSize];
+ memset(ones, 0xff, kMapSize);
+ memset(zeros, 0, kMapSize);
+
+ disk_cache::Bitmap map(kMapSize * 8, true);
+ EXPECT_EQ(0, memcmp(zeros, map.GetMap(), kMapSize));
+ map.SetAll(true);
+ EXPECT_EQ(0, memcmp(ones, map.GetMap(), kMapSize));
+ map.SetAll(false);
+ EXPECT_EQ(0, memcmp(zeros, map.GetMap(), kMapSize));
+ map.SetAll(true);
+ map.Clear();
+ EXPECT_EQ(0, memcmp(zeros, map.GetMap(), kMapSize));
+}
+
+TEST(BitmapTest, Range) {
+ // Tests SetRange() and TestRange().
+ disk_cache::Bitmap map(100, true);
+ EXPECT_FALSE(map.TestRange(0, 100, true));
+ map.Set(50, true);
+ EXPECT_TRUE(map.TestRange(0, 100, true));
+
+ map.SetAll(false);
+ EXPECT_FALSE(map.TestRange(0, 1, true));
+ EXPECT_FALSE(map.TestRange(30, 31, true));
+ EXPECT_FALSE(map.TestRange(98, 99, true));
+ EXPECT_FALSE(map.TestRange(99, 100, true));
+ EXPECT_FALSE(map.TestRange(0, 100, true));
+
+ EXPECT_TRUE(map.TestRange(0, 1, false));
+ EXPECT_TRUE(map.TestRange(31, 32, false));
+ EXPECT_TRUE(map.TestRange(32, 33, false));
+ EXPECT_TRUE(map.TestRange(99, 100, false));
+ EXPECT_TRUE(map.TestRange(0, 32, false));
+
+ map.SetRange(11, 21, true);
+ for (int i = 0; i < 100; i++)
+ EXPECT_EQ(map.Get(i), (i >= 11) && (i < 21));
+
+ EXPECT_TRUE(map.TestRange(0, 32, true));
+ EXPECT_TRUE(map.TestRange(0, 100, true));
+ EXPECT_TRUE(map.TestRange(11, 21, true));
+ EXPECT_TRUE(map.TestRange(15, 16, true));
+ EXPECT_TRUE(map.TestRange(5, 12, true));
+ EXPECT_TRUE(map.TestRange(5, 11, false));
+ EXPECT_TRUE(map.TestRange(20, 60, true));
+ EXPECT_TRUE(map.TestRange(21, 60, false));
+
+ map.SetAll(true);
+ EXPECT_FALSE(map.TestRange(0, 100, false));
+
+ map.SetRange(70, 99, false);
+ EXPECT_TRUE(map.TestRange(69, 99, false));
+ EXPECT_TRUE(map.TestRange(70, 100, false));
+ EXPECT_FALSE(map.TestRange(70, 99, true));
+}
+
+TEST(BitmapTest, FindNextSetBitBeforeLimit) {
+ // Test FindNextSetBitBeforeLimit. Only check bits from 111 to 277 (limit
+ // bit == 278). Should find all multiples of 27 in that range.
+ disk_cache::Bitmap map(500, true);
+ for (int i = 0; i < 500; i++)
+ map.Set(i, (i % 27) == 0);
+
+ int find_me = 135; // First one expected.
+ for (int index = 111; map.FindNextSetBitBeforeLimit(&index, 278);
+ ++index) {
+ EXPECT_EQ(index, find_me);
+ find_me += 27;
+ }
+ EXPECT_EQ(find_me, 297); // The next find_me after 278.
+}
+
+TEST(BitmapTest, FindNextSetBitBeforeLimitAligned) {
+ // Test FindNextSetBitBeforeLimit on aligned scans.
+ disk_cache::Bitmap map(256, true);
+ for (int i = 0; i < 256; i++)
+ map.Set(i, (i % 32) == 0);
+ for (int i = 0; i < 256; i += 32) {
+ int index = i + 1;
+ EXPECT_FALSE(map.FindNextSetBitBeforeLimit(&index, i + 32));
+ }
+}
+
+TEST(BitmapTest, FindNextSetBit) {
+ // Test FindNextSetBit. Check all bits in map. Should find multiples
+ // of 7 from 0 to 98.
+ disk_cache::Bitmap map(100, true);
+ for (int i = 0; i < 100; i++)
+ map.Set(i, (i % 7) == 0);
+
+ int find_me = 0; // First one expected.
+ for (int index = 0; map.FindNextSetBit(&index); ++index) {
+ EXPECT_EQ(index, find_me);
+ find_me += 7;
+ }
+ EXPECT_EQ(find_me, 105); // The next find_me after 98.
+}
+
+TEST(BitmapTest, FindNextBit) {
+ // Almost the same test as FindNextSetBit, but find zeros instead of ones.
+ disk_cache::Bitmap map(100, false);
+ map.SetAll(true);
+ for (int i = 0; i < 100; i++)
+ map.Set(i, (i % 7) != 0);
+
+ int find_me = 0; // First one expected.
+ for (int index = 0; map.FindNextBit(&index, 100, false); ++index) {
+ EXPECT_EQ(index, find_me);
+ find_me += 7;
+ }
+ EXPECT_EQ(find_me, 105); // The next find_me after 98.
+}
+
+TEST(BitmapTest, SimpleFindBits) {
+ disk_cache::Bitmap bitmap(64, true);
+ bitmap.SetMapElement(0, 0x7ff10060);
+
+ // Bit at index off.
+ int index = 0;
+ EXPECT_EQ(5, bitmap.FindBits(&index, 63, false));
+ EXPECT_EQ(0, index);
+
+ EXPECT_EQ(2, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(5, index);
+
+ index = 0;
+ EXPECT_EQ(2, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(5, index);
+
+ index = 6;
+ EXPECT_EQ(9, bitmap.FindBits(&index, 63, false));
+ EXPECT_EQ(7, index);
+
+ // Bit at index on.
+ index = 16;
+ EXPECT_EQ(1, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(16, index);
+
+ index = 17;
+ EXPECT_EQ(11, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(20, index);
+
+ index = 31;
+ EXPECT_EQ(0, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(31, index);
+
+ // With a limit.
+ index = 8;
+ EXPECT_EQ(0, bitmap.FindBits(&index, 16, true));
+}
+
+TEST(BitmapTest, MultiWordFindBits) {
+ disk_cache::Bitmap bitmap(500, true);
+ bitmap.SetMapElement(10, 0xff00);
+
+ int index = 0;
+ EXPECT_EQ(0, bitmap.FindBits(&index, 300, true));
+
+ EXPECT_EQ(8, bitmap.FindBits(&index, 500, true));
+ EXPECT_EQ(328, index);
+
+ bitmap.SetMapElement(10, 0xff000000);
+ bitmap.SetMapElement(11, 0xff);
+
+ index = 0;
+ EXPECT_EQ(16, bitmap.FindBits(&index, 500, true));
+ EXPECT_EQ(344, index);
+
+ index = 0;
+ EXPECT_EQ(4, bitmap.FindBits(&index, 348, true));
+ EXPECT_EQ(344, index);
+}
diff --git a/chromium/net/disk_cache/block_files.cc b/chromium/net/disk_cache/block_files.cc
new file mode 100644
index 00000000000..fc378e6c449
--- /dev/null
+++ b/chromium/net/disk_cache/block_files.cc
@@ -0,0 +1,695 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/block_files.h"
+
+#include "base/atomicops.h"
+#include "base/files/file_path.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/file_lock.h"
+#include "net/disk_cache/trace.h"
+
+using base::TimeTicks;
+
+namespace {
+
+const char* kBlockName = "data_";
+
+// This array is used to perform a fast lookup of the nibble bit pattern to the
+// type of entry that can be stored there (number of consecutive blocks).
+const char s_types[16] = {4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
+
+// Returns the type of block (number of consecutive blocks that can be stored)
+// for a given nibble of the bitmap.
+inline int GetMapBlockType(uint8 value) {
+ value &= 0xf;
+ return s_types[value];
+}
+
+} // namespace
+
+namespace disk_cache {
+
+BlockHeader::BlockHeader() : header_(NULL) {
+}
+
+BlockHeader::BlockHeader(BlockFileHeader* header) : header_(header) {
+}
+
+BlockHeader::BlockHeader(MappedFile* file)
+ : header_(reinterpret_cast<BlockFileHeader*>(file->buffer())) {
+}
+
+BlockHeader::BlockHeader(const BlockHeader& other) : header_(other.header_) {
+}
+
+BlockHeader::~BlockHeader() {
+}
+
+bool BlockHeader::CreateMapBlock(int target, int size, int* index) {
+ if (target <= 0 || target > kMaxNumBlocks ||
+ size <= 0 || size > kMaxNumBlocks) {
+ NOTREACHED();
+ return false;
+ }
+
+ TimeTicks start = TimeTicks::Now();
+ // We are going to process the map on 32-block chunks (32 bits), and on every
+ // chunk, iterate through the 8 nibbles where the new block can be located.
+ int current = header_->hints[target - 1];
+ for (int i = 0; i < header_->max_entries / 32; i++, current++) {
+ if (current == header_->max_entries / 32)
+ current = 0;
+ uint32 map_block = header_->allocation_map[current];
+
+ for (int j = 0; j < 8; j++, map_block >>= 4) {
+ if (GetMapBlockType(map_block) != target)
+ continue;
+
+ disk_cache::FileLock lock(header_);
+ int index_offset = j * 4 + 4 - target;
+ *index = current * 32 + index_offset;
+ DCHECK_EQ(*index / 4, (*index + size - 1) / 4);
+ uint32 to_add = ((1 << size) - 1) << index_offset;
+ header_->num_entries++;
+
+ // Note that there is no race in the normal sense here, but if we enforce
+ // the order of memory accesses between num_entries and allocation_map, we
+ // can assert that even if we crash here, num_entries will never be less
+ // than the actual number of used blocks.
+ base::subtle::MemoryBarrier();
+ header_->allocation_map[current] |= to_add;
+
+ header_->hints[target - 1] = current;
+ header_->empty[target - 1]--;
+ DCHECK_GE(header_->empty[target - 1], 0);
+ if (target != size) {
+ header_->empty[target - size - 1]++;
+ }
+ HISTOGRAM_TIMES("DiskCache.CreateBlock", TimeTicks::Now() - start);
+ return true;
+ }
+ }
+
+ // It is possible to have an undetected corruption (for example when the OS
+ // crashes), fix it here.
+ LOG(ERROR) << "Failing CreateMapBlock";
+ FixAllocationCounters();
+ return false;
+}
+
+void BlockHeader::DeleteMapBlock(int index, int size) {
+ if (size < 0 || size > kMaxNumBlocks) {
+ NOTREACHED();
+ return;
+ }
+ TimeTicks start = TimeTicks::Now();
+ int byte_index = index / 8;
+ uint8* byte_map = reinterpret_cast<uint8*>(header_->allocation_map);
+ uint8 map_block = byte_map[byte_index];
+
+ if (index % 8 >= 4)
+ map_block >>= 4;
+
+ // See what type of block will be available after we delete this one.
+ int bits_at_end = 4 - size - index % 4;
+ uint8 end_mask = (0xf << (4 - bits_at_end)) & 0xf;
+ bool update_counters = (map_block & end_mask) == 0;
+ uint8 new_value = map_block & ~(((1 << size) - 1) << (index % 4));
+ int new_type = GetMapBlockType(new_value);
+
+ disk_cache::FileLock lock(header_);
+ DCHECK((((1 << size) - 1) << (index % 8)) < 0x100);
+ uint8 to_clear = ((1 << size) - 1) << (index % 8);
+ DCHECK((byte_map[byte_index] & to_clear) == to_clear);
+ byte_map[byte_index] &= ~to_clear;
+
+ if (update_counters) {
+ if (bits_at_end)
+ header_->empty[bits_at_end - 1]--;
+ header_->empty[new_type - 1]++;
+ DCHECK_GE(header_->empty[bits_at_end - 1], 0);
+ }
+ base::subtle::MemoryBarrier();
+ header_->num_entries--;
+ DCHECK_GE(header_->num_entries, 0);
+ HISTOGRAM_TIMES("DiskCache.DeleteBlock", TimeTicks::Now() - start);
+}
+
+// Note that this is a simplified version of DeleteMapBlock().
+bool BlockHeader::UsedMapBlock(int index, int size) {
+ if (size < 0 || size > kMaxNumBlocks) {
+ NOTREACHED();
+ return false;
+ }
+ int byte_index = index / 8;
+ uint8* byte_map = reinterpret_cast<uint8*>(header_->allocation_map);
+ uint8 map_block = byte_map[byte_index];
+
+ if (index % 8 >= 4)
+ map_block >>= 4;
+
+ DCHECK((((1 << size) - 1) << (index % 8)) < 0x100);
+ uint8 to_clear = ((1 << size) - 1) << (index % 8);
+ return ((byte_map[byte_index] & to_clear) == to_clear);
+}
+
+void BlockHeader::FixAllocationCounters() {
+ for (int i = 0; i < kMaxNumBlocks; i++) {
+ header_->hints[i] = 0;
+ header_->empty[i] = 0;
+ }
+
+ for (int i = 0; i < header_->max_entries / 32; i++) {
+ uint32 map_block = header_->allocation_map[i];
+
+ for (int j = 0; j < 8; j++, map_block >>= 4) {
+ int type = GetMapBlockType(map_block);
+ if (type)
+ header_->empty[type -1]++;
+ }
+ }
+}
+
+bool BlockHeader::NeedToGrowBlockFile(int block_count) {
+ bool have_space = false;
+ int empty_blocks = 0;
+ for (int i = 0; i < kMaxNumBlocks; i++) {
+ empty_blocks += header_->empty[i] * (i + 1);
+ if (i >= block_count - 1 && header_->empty[i])
+ have_space = true;
+ }
+
+ if (header_->next_file && (empty_blocks < kMaxBlocks / 10)) {
+ // This file is almost full but we already created another one, don't use
+ // this file yet so that it is easier to find empty blocks when we start
+ // using this file again.
+ return true;
+ }
+ return !have_space;
+}
+
+int BlockHeader::EmptyBlocks() const {
+ int empty_blocks = 0;
+ for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
+ empty_blocks += header_->empty[i] * (i + 1);
+ if (header_->empty[i] < 0)
+ return 0;
+ }
+ return empty_blocks;
+}
+
+bool BlockHeader::ValidateCounters() const {
+ if (header_->max_entries < 0 || header_->max_entries > kMaxBlocks ||
+ header_->num_entries < 0)
+ return false;
+
+ int empty_blocks = EmptyBlocks();
+ if (empty_blocks + header_->num_entries > header_->max_entries)
+ return false;
+
+ return true;
+}
+
+int BlockHeader::Size() const {
+ return static_cast<int>(sizeof(*header_));
+}
+
+// ------------------------------------------------------------------------
+
+BlockFiles::BlockFiles(const base::FilePath& path)
+ : init_(false), zero_buffer_(NULL), path_(path) {
+}
+
+BlockFiles::~BlockFiles() {
+ if (zero_buffer_)
+ delete[] zero_buffer_;
+ CloseFiles();
+}
+
+bool BlockFiles::Init(bool create_files) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ thread_checker_.reset(new base::ThreadChecker);
+
+ block_files_.resize(kFirstAdditionalBlockFile);
+ for (int i = 0; i < kFirstAdditionalBlockFile; i++) {
+ if (create_files)
+ if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true))
+ return false;
+
+ if (!OpenBlockFile(i))
+ return false;
+
+ // Walk this chain of files removing empty ones.
+ if (!RemoveEmptyFile(static_cast<FileType>(i + 1)))
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+MappedFile* BlockFiles::GetFile(Addr address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ DCHECK(block_files_.size() >= 4);
+ DCHECK(address.is_block_file() || !address.is_initialized());
+ if (!address.is_initialized())
+ return NULL;
+
+ int file_index = address.FileNumber();
+ if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
+ !block_files_[file_index]) {
+ // We need to open the file
+ if (!OpenBlockFile(file_index))
+ return NULL;
+ }
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ return block_files_[file_index];
+}
+
+bool BlockFiles::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ if (block_type < RANKINGS || block_type > BLOCK_4K ||
+ block_count < 1 || block_count > 4)
+ return false;
+ if (!init_)
+ return false;
+
+ MappedFile* file = FileForNewBlock(block_type, block_count);
+ if (!file)
+ return false;
+
+ ScopedFlush flush(file);
+ BlockHeader header(file);
+
+ int target_size = 0;
+ for (int i = block_count; i <= 4; i++) {
+ if (header->empty[i - 1]) {
+ target_size = i;
+ break;
+ }
+ }
+
+ DCHECK(target_size);
+ int index;
+ if (!header.CreateMapBlock(target_size, block_count, &index))
+ return false;
+
+ Addr address(block_type, block_count, header->this_file, index);
+ block_address->set_value(address.value());
+ Trace("CreateBlock 0x%x", address.value());
+ return true;
+}
+
+void BlockFiles::DeleteBlock(Addr address, bool deep) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ if (!address.is_initialized() || address.is_separate_file())
+ return;
+
+ if (!zero_buffer_) {
+ zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4];
+ memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4);
+ }
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return;
+
+ Trace("DeleteBlock 0x%x", address.value());
+
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (deep)
+ file->Write(zero_buffer_, size, offset);
+
+ BlockHeader header(file);
+ header.DeleteMapBlock(address.start_block(), address.num_blocks());
+ file->Flush();
+
+ if (!header->num_entries) {
+ // This file is now empty. Let's try to delete it.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size)
+ type = RANKINGS;
+ RemoveEmptyFile(type); // Ignore failures.
+ }
+}
+
+void BlockFiles::CloseFiles() {
+ if (init_) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ }
+ init_ = false;
+ for (unsigned int i = 0; i < block_files_.size(); i++) {
+ if (block_files_[i]) {
+ block_files_[i]->Release();
+ block_files_[i] = NULL;
+ }
+ }
+ block_files_.clear();
+}
+
+void BlockFiles::ReportStats() {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ int used_blocks[kFirstAdditionalBlockFile];
+ int load[kFirstAdditionalBlockFile];
+ for (int i = 0; i < kFirstAdditionalBlockFile; i++) {
+ GetFileStats(i, &used_blocks[i], &load[i]);
+ }
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_0", used_blocks[0]);
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_1", used_blocks[1]);
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_2", used_blocks[2]);
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_3", used_blocks[3]);
+
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_0", load[0], 101);
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_1", load[1], 101);
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_2", load[2], 101);
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_3", load[3], 101);
+}
+
+bool BlockFiles::IsValid(Addr address) {
+#ifdef NDEBUG
+ return true;
+#else
+ if (!address.is_initialized() || address.is_separate_file())
+ return false;
+
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return false;
+
+ BlockHeader header(file);
+ bool rv = header.UsedMapBlock(address.start_block(), address.num_blocks());
+ DCHECK(rv);
+
+ static bool read_contents = false;
+ if (read_contents) {
+ scoped_ptr<char[]> buffer;
+ buffer.reset(new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]);
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ bool ok = file->Read(buffer.get(), size, offset);
+ DCHECK(ok);
+ }
+
+ return rv;
+#endif
+}
+
+bool BlockFiles::CreateBlockFile(int index, FileType file_type, bool force) {
+ base::FilePath name = Name(index);
+ int flags =
+ force ? base::PLATFORM_FILE_CREATE_ALWAYS : base::PLATFORM_FILE_CREATE;
+ flags |= base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+
+ scoped_refptr<File> file(new File(
+ base::CreatePlatformFile(name, flags, NULL, NULL)));
+ if (!file->IsValid())
+ return false;
+
+ BlockFileHeader header;
+ memset(&header, 0, sizeof(header));
+ header.magic = kBlockMagic;
+ header.version = kBlockVersion2;
+ header.entry_size = Addr::BlockSizeForFileType(file_type);
+ header.this_file = static_cast<int16>(index);
+ DCHECK(index <= kint16max && index >= 0);
+
+ return file->Write(&header, sizeof(header), 0);
+}
+
+bool BlockFiles::OpenBlockFile(int index) {
+ if (block_files_.size() - 1 < static_cast<unsigned int>(index)) {
+ DCHECK(index > 0);
+ int to_add = index - static_cast<int>(block_files_.size()) + 1;
+ block_files_.resize(block_files_.size() + to_add);
+ }
+
+ base::FilePath name = Name(index);
+ scoped_refptr<MappedFile> file(new MappedFile());
+
+ if (!file->Init(name, kBlockHeaderSize)) {
+ LOG(ERROR) << "Failed to open " << name.value();
+ return false;
+ }
+
+ size_t file_len = file->GetLength();
+ if (file_len < static_cast<size_t>(kBlockHeaderSize)) {
+ LOG(ERROR) << "File too small " << name.value();
+ return false;
+ }
+
+ BlockHeader header(file.get());
+ if (kBlockMagic != header->magic || kBlockVersion2 != header->version) {
+ LOG(ERROR) << "Invalid file version or magic " << name.value();
+ return false;
+ }
+
+ if (header->updating || !header.ValidateCounters()) {
+ // Last instance was not properly shutdown, or counters are out of sync.
+ if (!FixBlockFileHeader(file.get())) {
+ LOG(ERROR) << "Unable to fix block file " << name.value();
+ return false;
+ }
+ }
+
+ if (static_cast<int>(file_len) <
+ header->max_entries * header->entry_size + kBlockHeaderSize) {
+ LOG(ERROR) << "File too small " << name.value();
+ return false;
+ }
+
+ if (index == 0) {
+ // Load the links file into memory with a single read.
+ scoped_ptr<char[]> buf(new char[file_len]);
+ if (!file->Read(buf.get(), file_len, 0))
+ return false;
+ }
+
+ ScopedFlush flush(file.get());
+ DCHECK(!block_files_[index]);
+ file.swap(&block_files_[index]);
+ return true;
+}
+
+bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) {
+ if (kMaxBlocks == header->max_entries)
+ return false;
+
+ ScopedFlush flush(file);
+ DCHECK(!header->empty[3]);
+ int new_size = header->max_entries + 1024;
+ if (new_size > kMaxBlocks)
+ new_size = kMaxBlocks;
+
+ int new_size_bytes = new_size * header->entry_size + sizeof(*header);
+
+ if (!file->SetLength(new_size_bytes)) {
+ // Most likely we are trying to truncate the file, so the header is wrong.
+ if (header->updating < 10 && !FixBlockFileHeader(file)) {
+ // If we can't fix the file increase the lock guard so we'll pick it on
+ // the next start and replace it.
+ header->updating = 100;
+ return false;
+ }
+ return (header->max_entries >= new_size);
+ }
+
+ FileLock lock(header);
+ header->empty[3] = (new_size - header->max_entries) / 4; // 4 blocks entries
+ header->max_entries = new_size;
+
+ return true;
+}
+
+MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) {
+ COMPILE_ASSERT(RANKINGS == 1, invalid_file_type);
+ MappedFile* file = block_files_[block_type - 1];
+ BlockHeader header(file);
+
+ TimeTicks start = TimeTicks::Now();
+ while (header.NeedToGrowBlockFile(block_count)) {
+ if (kMaxBlocks == header->max_entries) {
+ file = NextFile(file);
+ if (!file)
+ return NULL;
+ header = BlockHeader(file);
+ continue;
+ }
+
+ if (!GrowBlockFile(file, header.Get()))
+ return NULL;
+ break;
+ }
+ HISTOGRAM_TIMES("DiskCache.GetFileForNewBlock", TimeTicks::Now() - start);
+ return file;
+}
+
+MappedFile* BlockFiles::NextFile(MappedFile* file) {
+ ScopedFlush flush(file);
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ int new_file = header->next_file;
+ if (!new_file) {
+ // RANKINGS is not reported as a type for small entries, but we may be
+ // extending the rankings block file.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (header->entry_size == Addr::BlockSizeForFileType(RANKINGS))
+ type = RANKINGS;
+
+ new_file = CreateNextBlockFile(type);
+ if (!new_file)
+ return NULL;
+
+ FileLock lock(header);
+ header->next_file = new_file;
+ }
+
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, new_file, 0);
+ return GetFile(address);
+}
+
+int BlockFiles::CreateNextBlockFile(FileType block_type) {
+ for (int i = kFirstAdditionalBlockFile; i <= kMaxBlockFile; i++) {
+ if (CreateBlockFile(i, block_type, false))
+ return i;
+ }
+ return 0;
+}
+
+// We walk the list of files for this particular block type, deleting the ones
+// that are empty.
+bool BlockFiles::RemoveEmptyFile(FileType block_type) {
+ MappedFile* file = block_files_[block_type - 1];
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ while (header->next_file) {
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, header->next_file, 0);
+ MappedFile* next_file = GetFile(address);
+ if (!next_file)
+ return false;
+
+ BlockFileHeader* next_header =
+ reinterpret_cast<BlockFileHeader*>(next_file->buffer());
+ if (!next_header->num_entries) {
+ DCHECK_EQ(next_header->entry_size, header->entry_size);
+ // Delete next_file and remove it from the chain.
+ int file_index = header->next_file;
+ header->next_file = next_header->next_file;
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ file->Flush();
+
+ // We get a new handle to the file and release the old one so that the
+ // file gets unmmaped... so we can delete it.
+ base::FilePath name = Name(file_index);
+ scoped_refptr<File> this_file(new File(false));
+ this_file->Init(name);
+ block_files_[file_index]->Release();
+ block_files_[file_index] = NULL;
+
+ int failure = DeleteCacheFile(name) ? 0 : 1;
+ UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed2", failure);
+ if (failure)
+ LOG(ERROR) << "Failed to delete " << name.value() << " from the cache.";
+ continue;
+ }
+
+ header = next_header;
+ file = next_file;
+ }
+ return true;
+}
+
+// Note that we expect to be called outside of a FileLock... however, we cannot
+// DCHECK on header->updating because we may be fixing a crash.
+bool BlockFiles::FixBlockFileHeader(MappedFile* file) {
+ ScopedFlush flush(file);
+ BlockHeader header(file);
+ int file_size = static_cast<int>(file->GetLength());
+ if (file_size < header.Size())
+ return false; // file_size > 2GB is also an error.
+
+ const int kMinBlockSize = 36;
+ const int kMaxBlockSize = 4096;
+ if (header->entry_size < kMinBlockSize ||
+ header->entry_size > kMaxBlockSize || header->num_entries < 0)
+ return false;
+
+ // Make sure that we survive crashes.
+ header->updating = 1;
+ int expected = header->entry_size * header->max_entries + header.Size();
+ if (file_size != expected) {
+ int max_expected = header->entry_size * kMaxBlocks + header.Size();
+ if (file_size < expected || header->empty[3] || file_size > max_expected) {
+ NOTREACHED();
+ LOG(ERROR) << "Unexpected file size";
+ return false;
+ }
+ // We were in the middle of growing the file.
+ int num_entries = (file_size - header.Size()) / header->entry_size;
+ header->max_entries = num_entries;
+ }
+
+ header.FixAllocationCounters();
+ int empty_blocks = header.EmptyBlocks();
+ if (empty_blocks + header->num_entries > header->max_entries)
+ header->num_entries = header->max_entries - empty_blocks;
+
+ if (!header.ValidateCounters())
+ return false;
+
+ header->updating = 0;
+ return true;
+}
+
+// We are interested in the total number of blocks used by this file type, and
+// the max number of blocks that we can store (reported as the percentage of
+// used blocks). In order to find out the number of used blocks, we have to
+// substract the empty blocks from the total blocks for each file in the chain.
+void BlockFiles::GetFileStats(int index, int* used_count, int* load) {
+ int max_blocks = 0;
+ *used_count = 0;
+ *load = 0;
+ for (;;) {
+ if (!block_files_[index] && !OpenBlockFile(index))
+ return;
+
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(block_files_[index]->buffer());
+
+ max_blocks += header->max_entries;
+ int used = header->max_entries;
+ for (int i = 0; i < 4; i++) {
+ used -= header->empty[i] * (i + 1);
+ DCHECK_GE(used, 0);
+ }
+ *used_count += used;
+
+ if (!header->next_file)
+ break;
+ index = header->next_file;
+ }
+ if (max_blocks)
+ *load = *used_count * 100 / max_blocks;
+}
+
+base::FilePath BlockFiles::Name(int index) {
+ // The file format allows for 256 files.
+ DCHECK(index < 256 || index >= 0);
+ std::string tmp = base::StringPrintf("%s%d", kBlockName, index);
+ return path_.AppendASCII(tmp);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/block_files.h b/chromium/net/disk_cache/block_files.h
new file mode 100644
index 00000000000..353c5663df0
--- /dev/null
+++ b/chromium/net/disk_cache/block_files.h
@@ -0,0 +1,152 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_BLOCK_FILES_H_
+#define NET_DISK_CACHE_BLOCK_FILES_H_
+
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/disk_format_base.h"
+#include "net/disk_cache/mapped_file.h"
+
+namespace base {
+class ThreadChecker;
+}
+
+namespace disk_cache {
+
+// An instance of this class represents the header of a block file in memory.
+// Note that this class doesn't perform any file operation.
+class NET_EXPORT_PRIVATE BlockHeader {
+ public:
+ BlockHeader();
+ explicit BlockHeader(BlockFileHeader* header);
+ explicit BlockHeader(MappedFile* file);
+ BlockHeader(const BlockHeader& other);
+ ~BlockHeader();
+
+ // Creates a new entry on the allocation map, updating the apropriate
+ // counters. |target| is the type of block to use (number of empty blocks),
+ // and |size| is the actual number of blocks to use.
+ bool CreateMapBlock(int target, int size, int* index);
+
+ // Deletes the block pointed by |index|.
+ void DeleteMapBlock(int index, int block_size);
+
+ // Returns true if the specified block is used.
+ bool UsedMapBlock(int index, int size);
+
+ // Restores the "empty counters" and allocation hints.
+ void FixAllocationCounters();
+
+ // Returns true if the current block file should not be used as-is to store
+ // more records. |block_count| is the number of blocks to allocate.
+ bool NeedToGrowBlockFile(int block_count);
+
+ // Returns the number of empty blocks for this file.
+ int EmptyBlocks() const;
+
+ // Returns true if the counters look OK.
+ bool ValidateCounters() const;
+
+ // Returns the size of the wrapped structure (BlockFileHeader).
+ int Size() const;
+
+ BlockFileHeader* operator->() { return header_; }
+ void operator=(const BlockHeader& other) { header_ = other.header_; }
+ BlockFileHeader* Get() { return header_; }
+
+ private:
+ BlockFileHeader* header_;
+};
+
+typedef std::vector<BlockHeader> BlockFilesBitmaps;
+
+// This class handles the set of block-files open by the disk cache.
+class NET_EXPORT_PRIVATE BlockFiles {
+ public:
+ explicit BlockFiles(const base::FilePath& path);
+ ~BlockFiles();
+
+ // Performs the object initialization. create_files indicates if the backing
+ // files should be created or just open.
+ bool Init(bool create_files);
+
+ // Returns the file that stores a given address.
+ MappedFile* GetFile(Addr address);
+
+ // Creates a new entry on a block file. block_type indicates the size of block
+ // to be used (as defined on cache_addr.h), block_count is the number of
+ // blocks to allocate, and block_address is the address of the new entry.
+ bool CreateBlock(FileType block_type, int block_count, Addr* block_address);
+
+ // Removes an entry from the block files. If deep is true, the storage is zero
+ // filled; otherwise the entry is removed but the data is not altered (must be
+ // already zeroed).
+ void DeleteBlock(Addr address, bool deep);
+
+ // Close all the files and set the internal state to be initializad again. The
+ // cache is being purged.
+ void CloseFiles();
+
+ // Sends UMA stats.
+ void ReportStats();
+
+ // Returns true if the blocks pointed by a given address are currently used.
+ // This method is only intended for debugging.
+ bool IsValid(Addr address);
+
+ private:
+ // Set force to true to overwrite the file if it exists.
+ bool CreateBlockFile(int index, FileType file_type, bool force);
+ bool OpenBlockFile(int index);
+
+ // Attemp to grow this file. Fails if the file cannot be extended anymore.
+ bool GrowBlockFile(MappedFile* file, BlockFileHeader* header);
+
+ // Returns the appropriate file to use for a new block.
+ MappedFile* FileForNewBlock(FileType block_type, int block_count);
+
+ // Returns the next block file on this chain, creating new files if needed.
+ MappedFile* NextFile(MappedFile* file);
+
+ // Creates an empty block file and returns its index.
+ int CreateNextBlockFile(FileType block_type);
+
+ // Removes a chained block file that is now empty.
+ bool RemoveEmptyFile(FileType block_type);
+
+ // Restores the header of a potentially inconsistent file.
+ bool FixBlockFileHeader(MappedFile* file);
+
+ // Retrieves stats for the given file index.
+ void GetFileStats(int index, int* used_count, int* load);
+
+ // Returns the filename for a given file index.
+ base::FilePath Name(int index);
+
+ bool init_;
+ char* zero_buffer_; // Buffer to speed-up cleaning deleted entries.
+ base::FilePath path_; // Path to the backing folder.
+ std::vector<MappedFile*> block_files_; // The actual files.
+ scoped_ptr<base::ThreadChecker> thread_checker_;
+
+ FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_ZeroSizeFile);
+ FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_TruncatedFile);
+ FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_InvalidFile);
+ FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_Stats);
+
+ DISALLOW_COPY_AND_ASSIGN(BlockFiles);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BLOCK_FILES_H_
diff --git a/chromium/net/disk_cache/block_files_unittest.cc b/chromium/net/disk_cache/block_files_unittest.cc
new file mode 100644
index 00000000000..fa7c5dbb742
--- /dev/null
+++ b/chromium/net/disk_cache/block_files_unittest.cc
@@ -0,0 +1,350 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "base/files/file_enumerator.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+namespace {
+
+// Returns the number of files in this folder.
+int NumberOfFiles(const base::FilePath& path) {
+ base::FileEnumerator iter(path, false, base::FileEnumerator::FILES);
+ int count = 0;
+ for (base::FilePath file = iter.Next(); !file.value().empty();
+ file = iter.Next()) {
+ count++;
+ }
+ return count;
+}
+
+} // namespace;
+
+namespace disk_cache {
+
+TEST_F(DiskCacheTest, BlockFiles_Grow) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kMaxSize = 35000;
+ Addr address[kMaxSize];
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kMaxSize; i++) {
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[i]));
+ }
+ EXPECT_EQ(6, NumberOfFiles(cache_path_));
+
+ // Make sure we don't keep adding files.
+ for (int i = 0; i < kMaxSize * 4; i += 2) {
+ int target = i % kMaxSize;
+ files.DeleteBlock(address[target], false);
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[target]));
+ }
+ EXPECT_EQ(6, NumberOfFiles(cache_path_));
+}
+
+// We should be able to delete empty block files.
+TEST_F(DiskCacheTest, BlockFiles_Shrink) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kMaxSize = 35000;
+ Addr address[kMaxSize];
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kMaxSize; i++) {
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[i]));
+ }
+
+ // Now delete all the blocks, so that we can delete the two extra files.
+ for (int i = 0; i < kMaxSize; i++) {
+ files.DeleteBlock(address[i], false);
+ }
+ EXPECT_EQ(4, NumberOfFiles(cache_path_));
+}
+
+// Handling of block files not properly closed.
+TEST_F(DiskCacheTest, BlockFiles_Recover) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kNumEntries = 2000;
+ CacheAddr entries[kNumEntries];
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ for (int i = 0; i < kNumEntries; i++) {
+ Addr address(0);
+ int size = (rand() % 4) + 1;
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, size, &address));
+ entries[i] = address.value();
+ }
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ CacheAddr temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ Addr address(entries[i]);
+ files.DeleteBlock(address, false);
+ }
+
+ // At this point, there are kNumEntries / 2 entries on the file, randomly
+ // distributed both on location and size.
+
+ Addr address(entries[kNumEntries / 2]);
+ MappedFile* file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ int max_entries = header->max_entries;
+ int empty_1 = header->empty[0];
+ int empty_2 = header->empty[1];
+ int empty_3 = header->empty[2];
+ int empty_4 = header->empty[3];
+
+ // Corrupt the file.
+ header->max_entries = header->empty[0] = 0;
+ header->empty[1] = header->empty[2] = header->empty[3] = 0;
+ header->updating = -1;
+
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+
+ // The file must have been fixed.
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ EXPECT_EQ(max_entries, header->max_entries);
+ EXPECT_EQ(empty_1, header->empty[0]);
+ EXPECT_EQ(empty_2, header->empty[1]);
+ EXPECT_EQ(empty_3, header->empty[2]);
+ EXPECT_EQ(empty_4, header->empty[3]);
+}
+
+// Handling of truncated files.
+TEST_F(DiskCacheTest, BlockFiles_ZeroSizeFile) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ base::FilePath filename = files.Name(0);
+ files.CloseFiles();
+ // Truncate one of the files.
+ {
+ scoped_refptr<File> file(new File);
+ ASSERT_TRUE(file->Init(filename));
+ EXPECT_TRUE(file->SetLength(0));
+ }
+
+ // Initializing should fail, not crash.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// Handling of truncated files (non empty).
+TEST_F(DiskCacheTest, BlockFiles_TruncatedFile) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+ Addr address;
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 2, &address));
+
+ base::FilePath filename = files.Name(0);
+ files.CloseFiles();
+ // Truncate one of the files.
+ {
+ scoped_refptr<File> file(new File);
+ ASSERT_TRUE(file->Init(filename));
+ EXPECT_TRUE(file->SetLength(15000));
+ }
+
+ // Initializing should fail, not crash.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// Tests detection of out of sync counters.
+TEST_F(DiskCacheTest, BlockFiles_Counters) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ // Create a block of size 2.
+ Addr address(0);
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 2, &address));
+
+ MappedFile* file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+ ASSERT_EQ(0, header->updating);
+
+ // Alter the counters so that the free space doesn't add up.
+ header->empty[2] = 50; // 50 free blocks of size 3.
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ // The file must have been fixed.
+ ASSERT_EQ(0, header->empty[2]);
+
+ // Change the number of entries.
+ header->num_entries = 3;
+ header->updating = 1;
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ // The file must have been "fixed".
+ ASSERT_EQ(2, header->num_entries);
+
+ // Change the number of entries.
+ header->num_entries = -1;
+ header->updating = 1;
+ files.CloseFiles();
+
+ // Detect the error.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// An invalid file can be detected after init.
+TEST_F(DiskCacheTest, BlockFiles_InvalidFile) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ // Let's access block 10 of file 5. (There is no file).
+ Addr addr(BLOCK_256, 1, 5, 10);
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+
+ // Let's create an invalid file.
+ base::FilePath filename(files.Name(5));
+ char header[kBlockHeaderSize];
+ memset(header, 'a', kBlockHeaderSize);
+ EXPECT_EQ(kBlockHeaderSize,
+ file_util::WriteFile(filename, header, kBlockHeaderSize));
+
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+
+ // The file should not have been changed (it is still invalid).
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+}
+
+// Tests that we generate the correct file stats.
+TEST_F(DiskCacheTest, BlockFiles_Stats) {
+ ASSERT_TRUE(CopyTestCache("remove_load1"));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(false));
+ int used, load;
+
+ files.GetFileStats(0, &used, &load);
+ EXPECT_EQ(101, used);
+ EXPECT_EQ(9, load);
+
+ files.GetFileStats(1, &used, &load);
+ EXPECT_EQ(203, used);
+ EXPECT_EQ(19, load);
+
+ files.GetFileStats(2, &used, &load);
+ EXPECT_EQ(0, used);
+ EXPECT_EQ(0, load);
+}
+
+// Tests that we add and remove blocks correctly.
+TEST_F(DiskCacheTest, AllocationMap) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ // Create a bunch of entries.
+ const int kSize = 100;
+ Addr address[kSize];
+ for (int i = 0; i < kSize; i++) {
+ SCOPED_TRACE(i);
+ int block_size = i % 4 + 1;
+ EXPECT_TRUE(files.CreateBlock(BLOCK_1K, block_size, &address[i]));
+ EXPECT_EQ(BLOCK_1K, address[i].file_type());
+ EXPECT_EQ(block_size, address[i].num_blocks());
+ int start = address[i].start_block();
+ EXPECT_EQ(start / 4, (start + block_size - 1) / 4);
+ }
+
+ for (int i = 0; i < kSize; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_TRUE(files.IsValid(address[i]));
+ }
+
+ // The first part of the allocation map should be completely filled. We used
+ // 10 bits per each four entries, so 250 bits total.
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(files.GetFile(address[0])->buffer());
+ uint8* buffer = reinterpret_cast<uint8*>(&header->allocation_map);
+ for (int i =0; i < 29; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(0xff, buffer[i]);
+ }
+
+ for (int i = 0; i < kSize; i++) {
+ SCOPED_TRACE(i);
+ files.DeleteBlock(address[i], false);
+ }
+
+ // The allocation map should be empty.
+ for (int i =0; i < 50; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(0, buffer[i]);
+ }
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/cache_creator.cc b/chromium/net/disk_cache/cache_creator.cc
new file mode 100644
index 00000000000..07a26c9d858
--- /dev/null
+++ b/chromium/net/disk_cache/cache_creator.cc
@@ -0,0 +1,163 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/stringprintf.h"
+#include "net/base/cache_type.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/mem_backend_impl.h"
+#include "net/disk_cache/simple/simple_backend_impl.h"
+
+#ifdef USE_TRACING_CACHE_BACKEND
+#include "net/disk_cache/tracing_cache_backend.h"
+#endif
+
+namespace {
+
+// Builds an instance of the backend depending on platform, type, experiments
+// etc. Takes care of the retry state. This object will self-destroy when
+// finished.
+class CacheCreator {
+ public:
+ CacheCreator(const base::FilePath& path, bool force, int max_bytes,
+ net::CacheType type, net::BackendType backend_type, uint32 flags,
+ base::MessageLoopProxy* thread, net::NetLog* net_log,
+ scoped_ptr<disk_cache::Backend>* backend,
+ const net::CompletionCallback& callback);
+
+ // Creates the backend.
+ int Run();
+
+ private:
+ ~CacheCreator();
+
+ void DoCallback(int result);
+
+ void OnIOComplete(int result);
+
+ const base::FilePath path_;
+ bool force_;
+ bool retry_;
+ int max_bytes_;
+ net::CacheType type_;
+ net::BackendType backend_type_;
+ uint32 flags_;
+ scoped_refptr<base::MessageLoopProxy> thread_;
+ scoped_ptr<disk_cache::Backend>* backend_;
+ net::CompletionCallback callback_;
+ scoped_ptr<disk_cache::Backend> created_cache_;
+ net::NetLog* net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(CacheCreator);
+};
+
+CacheCreator::CacheCreator(
+ const base::FilePath& path, bool force, int max_bytes,
+ net::CacheType type, net::BackendType backend_type, uint32 flags,
+ base::MessageLoopProxy* thread, net::NetLog* net_log,
+ scoped_ptr<disk_cache::Backend>* backend,
+ const net::CompletionCallback& callback)
+ : path_(path),
+ force_(force),
+ retry_(false),
+ max_bytes_(max_bytes),
+ type_(type),
+ backend_type_(backend_type),
+ flags_(flags),
+ thread_(thread),
+ backend_(backend),
+ callback_(callback),
+ net_log_(net_log) {
+}
+
+CacheCreator::~CacheCreator() {
+}
+
+int CacheCreator::Run() {
+ // TODO(gavinp,pasko): While simple backend development proceeds, we're only
+ // testing it against net::DISK_CACHE. Turn it on for more cache types as
+ // appropriate.
+ if (backend_type_ == net::CACHE_BACKEND_SIMPLE && type_ == net::DISK_CACHE) {
+ disk_cache::SimpleBackendImpl* simple_cache =
+ new disk_cache::SimpleBackendImpl(path_, max_bytes_, type_,
+ thread_.get(), net_log_);
+ created_cache_.reset(simple_cache);
+ return simple_cache->Init(
+ base::Bind(&CacheCreator::OnIOComplete, base::Unretained(this)));
+ }
+ disk_cache::BackendImpl* new_cache =
+ new disk_cache::BackendImpl(path_, thread_.get(), net_log_);
+ created_cache_.reset(new_cache);
+ new_cache->SetMaxSize(max_bytes_);
+ new_cache->SetType(type_);
+ new_cache->SetFlags(flags_);
+ int rv = new_cache->Init(
+ base::Bind(&CacheCreator::OnIOComplete, base::Unretained(this)));
+ DCHECK_EQ(net::ERR_IO_PENDING, rv);
+ return rv;
+}
+
+void CacheCreator::DoCallback(int result) {
+ DCHECK_NE(net::ERR_IO_PENDING, result);
+ if (result == net::OK) {
+#ifndef USE_TRACING_CACHE_BACKEND
+ *backend_ = created_cache_.Pass();
+#else
+ *backend_.reset(
+ new disk_cache::TracingCacheBackend(created_cache_.Pass()));
+#endif
+ } else {
+ LOG(ERROR) << "Unable to create cache";
+ }
+ callback_.Run(result);
+ delete this;
+}
+
+// If the initialization of the cache fails, and |force| is true, we will
+// discard the whole cache and create a new one.
+void CacheCreator::OnIOComplete(int result) {
+ if (result == net::OK || !force_ || retry_)
+ return DoCallback(result);
+
+ // This is a failure and we are supposed to try again, so delete the object,
+ // delete all the files, and try again.
+ retry_ = true;
+ created_cache_.reset();
+ if (!disk_cache::DelayedCacheCleanup(path_))
+ return DoCallback(result);
+
+ // The worker thread will start deleting files soon, but the original folder
+ // is not there anymore... let's create a new set of files.
+ int rv = Run();
+ DCHECK_EQ(net::ERR_IO_PENDING, rv);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+int CreateCacheBackend(net::CacheType type,
+ net::BackendType backend_type,
+ const base::FilePath& path,
+ int max_bytes,
+ bool force, base::MessageLoopProxy* thread,
+ net::NetLog* net_log, scoped_ptr<Backend>* backend,
+ const net::CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ if (type == net::MEMORY_CACHE) {
+ *backend = disk_cache::MemBackendImpl::CreateBackend(max_bytes, net_log);
+ return *backend ? net::OK : net::ERR_FAILED;
+ }
+ DCHECK(thread);
+ CacheCreator* creator = new CacheCreator(path, force, max_bytes, type,
+ backend_type, kNone,
+ thread, net_log, backend, callback);
+ return creator->Run();
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/cache_util.cc b/chromium/net/disk_cache/cache_util.cc
new file mode 100644
index 00000000000..7389960a16a
--- /dev/null
+++ b/chromium/net/disk_cache/cache_util.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/cache_util.h"
+
+#include "base/file_util.h"
+#include "base/files/file_enumerator.h"
+#include "base/location.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/worker_pool.h"
+
+namespace {
+
+const int kMaxOldFolders = 100;
+
+// Returns a fully qualified name from path and name, using a given name prefix
+// and index number. For instance, if the arguments are "/foo", "bar" and 5, it
+// will return "/foo/old_bar_005".
+base::FilePath GetPrefixedName(const base::FilePath& path,
+ const std::string& name,
+ int index) {
+ std::string tmp = base::StringPrintf("%s%s_%03d", "old_",
+ name.c_str(), index);
+ return path.AppendASCII(tmp);
+}
+
+// This is a simple callback to cleanup old caches.
+void CleanupCallback(const base::FilePath& path, const std::string& name) {
+ for (int i = 0; i < kMaxOldFolders; i++) {
+ base::FilePath to_delete = GetPrefixedName(path, name, i);
+ disk_cache::DeleteCache(to_delete, true);
+ }
+}
+
+// Returns a full path to rename the current cache, in order to delete it. path
+// is the current folder location, and name is the current folder name.
+base::FilePath GetTempCacheName(const base::FilePath& path,
+ const std::string& name) {
+ // We'll attempt to have up to kMaxOldFolders folders for deletion.
+ for (int i = 0; i < kMaxOldFolders; i++) {
+ base::FilePath to_delete = GetPrefixedName(path, name, i);
+ if (!base::PathExists(to_delete))
+ return to_delete;
+ }
+ return base::FilePath();
+}
+
+} // namespace
+
+namespace disk_cache {
+
+void DeleteCache(const base::FilePath& path, bool remove_folder) {
+ if (remove_folder) {
+ if (!base::DeleteFile(path, /* recursive */ true))
+ LOG(WARNING) << "Unable to delete cache folder.";
+ return;
+ }
+
+ base::FileEnumerator iter(
+ path,
+ /* recursive */ false,
+ base::FileEnumerator::FILES | base::FileEnumerator::DIRECTORIES);
+ for (base::FilePath file = iter.Next(); !file.value().empty();
+ file = iter.Next()) {
+ if (!base::DeleteFile(file, /* recursive */ true)) {
+ LOG(WARNING) << "Unable to delete cache.";
+ return;
+ }
+ }
+}
+
+// In order to process a potentially large number of files, we'll rename the
+// cache directory to old_ + original_name + number, (located on the same parent
+// directory), and use a worker thread to delete all the files on all the stale
+// cache directories. The whole process can still fail if we are not able to
+// rename the cache directory (for instance due to a sharing violation), and in
+// that case a cache for this profile (on the desired path) cannot be created.
+bool DelayedCacheCleanup(const base::FilePath& full_path) {
+ // GetTempCacheName() and MoveCache() use synchronous file
+ // operations.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ base::FilePath current_path = full_path.StripTrailingSeparators();
+
+ base::FilePath path = current_path.DirName();
+ base::FilePath name = current_path.BaseName();
+#if defined(OS_POSIX)
+ std::string name_str = name.value();
+#elif defined(OS_WIN)
+ // We created this file so it should only contain ASCII.
+ std::string name_str = WideToASCII(name.value());
+#endif
+
+ base::FilePath to_delete = GetTempCacheName(path, name_str);
+ if (to_delete.empty()) {
+ LOG(ERROR) << "Unable to get another cache folder";
+ return false;
+ }
+
+ if (!disk_cache::MoveCache(full_path, to_delete)) {
+ LOG(ERROR) << "Unable to move cache folder " << full_path.value() << " to "
+ << to_delete.value();
+ return false;
+ }
+
+ base::WorkerPool::PostTask(
+ FROM_HERE, base::Bind(&CleanupCallback, path, name_str), true);
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/cache_util.h b/chromium/net/disk_cache/cache_util.h
new file mode 100644
index 00000000000..2005ba5e240
--- /dev/null
+++ b/chromium/net/disk_cache/cache_util.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_CACHE_UTIL_H_
+#define NET_DISK_CACHE_CACHE_UTIL_H_
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace disk_cache {
+
+// Moves the cache files from the given path to another location.
+// Fails if the destination exists already, or if it doesn't have
+// permission for the operation. This is basically a rename operation
+// for the cache directory. Returns true if successful. On ChromeOS,
+// this moves the cache contents, and leaves the empty cache
+// directory.
+NET_EXPORT_PRIVATE bool MoveCache(const base::FilePath& from_path,
+ const base::FilePath& to_path);
+
+// Deletes the cache files stored on |path|, and optionally also attempts to
+// delete the folder itself.
+NET_EXPORT_PRIVATE void DeleteCache(const base::FilePath& path,
+ bool remove_folder);
+
+// Deletes a cache file.
+NET_EXPORT_PRIVATE bool DeleteCacheFile(const base::FilePath& name);
+
+// Renames cache directory synchronously and fires off a background cleanup
+// task. Used by cache creator itself or by backends for self-restart on error.
+bool DelayedCacheCleanup(const base::FilePath& full_path);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_CACHE_UTIL_H_
diff --git a/chromium/net/disk_cache/cache_util_posix.cc b/chromium/net/disk_cache/cache_util_posix.cc
new file mode 100644
index 00000000000..b33c560a000
--- /dev/null
+++ b/chromium/net/disk_cache/cache_util_posix.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/cache_util.h"
+
+#include "base/file_util.h"
+#include "base/files/file_enumerator.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+
+namespace disk_cache {
+
+bool MoveCache(const base::FilePath& from_path, const base::FilePath& to_path) {
+#if defined(OS_CHROMEOS)
+ // For ChromeOS, we don't actually want to rename the cache
+ // directory, because if we do, then it'll get recreated through the
+ // encrypted filesystem (with encrypted names), and we won't be able
+ // to see these directories anymore in an unmounted encrypted
+ // filesystem, so we just move each item in the cache to a new
+ // directory.
+ if (!file_util::CreateDirectory(to_path)) {
+ LOG(ERROR) << "Unable to create destination cache directory.";
+ return false;
+ }
+ base::FileEnumerator iter(from_path, false /* not recursive */,
+ base::FileEnumerator::DIRECTORIES | base::FileEnumerator::FILES);
+ for (base::FilePath name = iter.Next(); !name.value().empty();
+ name = iter.Next()) {
+ base::FilePath destination = to_path.Append(name.BaseName());
+ if (!base::Move(name, destination)) {
+ LOG(ERROR) << "Unable to move cache item.";
+ return false;
+ }
+ }
+ return true;
+#else
+ return base::Move(from_path, to_path);
+#endif
+}
+
+bool DeleteCacheFile(const base::FilePath& name) {
+ return base::DeleteFile(name, false);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/cache_util_unittest.cc b/chromium/net/disk_cache/cache_util_unittest.cc
new file mode 100644
index 00000000000..d2e76054f7f
--- /dev/null
+++ b/chromium/net/disk_cache/cache_util_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "net/disk_cache/cache_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace disk_cache {
+
+class CacheUtilTest : public PlatformTest {
+ public:
+ virtual void SetUp() {
+ PlatformTest::SetUp();
+ ASSERT_TRUE(tmp_dir_.CreateUniqueTempDir());
+ cache_dir_ = tmp_dir_.path().Append(FILE_PATH_LITERAL("Cache"));
+ file1_ = base::FilePath(cache_dir_.Append(FILE_PATH_LITERAL("file01")));
+ file2_ = base::FilePath(cache_dir_.Append(FILE_PATH_LITERAL(".file02")));
+ dir1_ = base::FilePath(cache_dir_.Append(FILE_PATH_LITERAL("dir01")));
+ file3_ = base::FilePath(dir1_.Append(FILE_PATH_LITERAL("file03")));
+ ASSERT_TRUE(file_util::CreateDirectory(cache_dir_));
+ FILE *fp = file_util::OpenFile(file1_, "w");
+ ASSERT_TRUE(fp != NULL);
+ file_util::CloseFile(fp);
+ fp = file_util::OpenFile(file2_, "w");
+ ASSERT_TRUE(fp != NULL);
+ file_util::CloseFile(fp);
+ ASSERT_TRUE(file_util::CreateDirectory(dir1_));
+ fp = file_util::OpenFile(file3_, "w");
+ ASSERT_TRUE(fp != NULL);
+ file_util::CloseFile(fp);
+ dest_dir_ = tmp_dir_.path().Append(FILE_PATH_LITERAL("old_Cache_001"));
+ dest_file1_ = base::FilePath(dest_dir_.Append(FILE_PATH_LITERAL("file01")));
+ dest_file2_ =
+ base::FilePath(dest_dir_.Append(FILE_PATH_LITERAL(".file02")));
+ dest_dir1_ = base::FilePath(dest_dir_.Append(FILE_PATH_LITERAL("dir01")));
+ }
+
+ protected:
+ base::ScopedTempDir tmp_dir_;
+ base::FilePath cache_dir_;
+ base::FilePath file1_;
+ base::FilePath file2_;
+ base::FilePath dir1_;
+ base::FilePath file3_;
+ base::FilePath dest_dir_;
+ base::FilePath dest_file1_;
+ base::FilePath dest_file2_;
+ base::FilePath dest_dir1_;
+};
+
+TEST_F(CacheUtilTest, MoveCache) {
+ EXPECT_TRUE(disk_cache::MoveCache(cache_dir_, dest_dir_));
+ EXPECT_TRUE(base::PathExists(dest_dir_));
+ EXPECT_TRUE(base::PathExists(dest_file1_));
+ EXPECT_TRUE(base::PathExists(dest_file2_));
+ EXPECT_TRUE(base::PathExists(dest_dir1_));
+#if defined(OS_CHROMEOS)
+ EXPECT_TRUE(base::PathExists(cache_dir_)); // old cache dir stays
+#else
+ EXPECT_FALSE(base::PathExists(cache_dir_)); // old cache is gone
+#endif
+ EXPECT_FALSE(base::PathExists(file1_));
+ EXPECT_FALSE(base::PathExists(file2_));
+ EXPECT_FALSE(base::PathExists(dir1_));
+}
+
+TEST_F(CacheUtilTest, DeleteCache) {
+ disk_cache::DeleteCache(cache_dir_, false);
+ EXPECT_TRUE(base::PathExists(cache_dir_)); // cache dir stays
+ EXPECT_FALSE(base::PathExists(dir1_));
+ EXPECT_FALSE(base::PathExists(file1_));
+ EXPECT_FALSE(base::PathExists(file2_));
+ EXPECT_FALSE(base::PathExists(file3_));
+}
+
+TEST_F(CacheUtilTest, DeleteCacheAndDir) {
+ disk_cache::DeleteCache(cache_dir_, true);
+ EXPECT_FALSE(base::PathExists(cache_dir_)); // cache dir is gone
+ EXPECT_FALSE(base::PathExists(dir1_));
+ EXPECT_FALSE(base::PathExists(file1_));
+ EXPECT_FALSE(base::PathExists(file2_));
+ EXPECT_FALSE(base::PathExists(file3_));
+}
+
+TEST_F(CacheUtilTest, DeleteCacheFile) {
+ EXPECT_TRUE(disk_cache::DeleteCacheFile(file1_));
+ EXPECT_FALSE(base::PathExists(file1_));
+ EXPECT_TRUE(base::PathExists(cache_dir_)); // cache dir stays
+ EXPECT_TRUE(base::PathExists(dir1_));
+ EXPECT_TRUE(base::PathExists(file3_));
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/cache_util_win.cc b/chromium/net/disk_cache/cache_util_win.cc
new file mode 100644
index 00000000000..51ed2a032e2
--- /dev/null
+++ b/chromium/net/disk_cache/cache_util_win.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/cache_util.h"
+
+#include <windows.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/win/scoped_handle.h"
+
+namespace disk_cache {
+
+bool MoveCache(const base::FilePath& from_path, const base::FilePath& to_path) {
+ // I don't want to use the shell version of move because if something goes
+ // wrong, that version will attempt to move file by file and fail at the end.
+ if (!MoveFileEx(from_path.value().c_str(), to_path.value().c_str(), 0)) {
+ LOG(ERROR) << "Unable to move the cache: " << GetLastError();
+ return false;
+ }
+ return true;
+}
+
+bool DeleteCacheFile(const base::FilePath& name) {
+ // We do a simple delete, without ever falling back to SHFileOperation, as the
+ // version from base does.
+ if (!DeleteFile(name.value().c_str())) {
+ // There is an error, but we share delete access so let's see if there is a
+ // file to open. Note that this code assumes that we have a handle to the
+ // file at all times (even now), so nobody can have a handle that prevents
+ // us from opening the file again (unless it was deleted).
+ DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+ DWORD access = SYNCHRONIZE;
+ base::win::ScopedHandle file(CreateFile(
+ name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
+ if (file.IsValid())
+ return false;
+
+ // Most likely there is no file to open... and that's what we wanted.
+ }
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/disk_cache.h b/chromium/net/disk_cache/disk_cache.h
new file mode 100644
index 00000000000..3ae8bf4369b
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache.h
@@ -0,0 +1,324 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines the public interface of the disk cache. For more details see
+// http://dev.chromium.org/developers/design-documents/network-stack/disk-cache
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_H_
+#define NET_DISK_CACHE_DISK_CACHE_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "net/base/cache_type.h"
+#include "net/base/completion_callback.h"
+#include "net/base/net_export.h"
+
+namespace base {
+class FilePath;
+class MessageLoopProxy;
+}
+
+namespace net {
+class IOBuffer;
+class NetLog;
+}
+
+namespace disk_cache {
+
+class Entry;
+class Backend;
+
+// Returns an instance of a Backend of the given |type|. |path| points to a
+// folder where the cached data will be stored (if appropriate). This cache
+// instance must be the only object that will be reading or writing files to
+// that folder. The returned object should be deleted when not needed anymore.
+// If |force| is true, and there is a problem with the cache initialization, the
+// files will be deleted and a new set will be created. |max_bytes| is the
+// maximum size the cache can grow to. If zero is passed in as |max_bytes|, the
+// cache will determine the value to use. |thread| can be used to perform IO
+// operations if a dedicated thread is required; a valid value is expected for
+// any backend that performs operations on a disk. The returned pointer can be
+// NULL if a fatal error is found. The actual return value of the function is a
+// net error code. If this function returns ERR_IO_PENDING, the |callback| will
+// be invoked when a backend is available or a fatal error condition is reached.
+// The pointer to receive the |backend| must remain valid until the operation
+// completes (the callback is notified).
+NET_EXPORT int CreateCacheBackend(net::CacheType type,
+ net::BackendType backend_type,
+ const base::FilePath& path,
+ int max_bytes,
+ bool force,
+ base::MessageLoopProxy* thread,
+ net::NetLog* net_log,
+ scoped_ptr<Backend>* backend,
+ const net::CompletionCallback& callback);
+
+// The root interface for a disk cache instance.
+class NET_EXPORT Backend {
+ public:
+ typedef net::CompletionCallback CompletionCallback;
+
+ // If the backend is destroyed when there are operations in progress (any
+ // callback that has not been invoked yet), this method cancels said
+ // operations so the callbacks are not invoked, possibly leaving the work
+ // half way (for instance, dooming just a few entries). Note that pending IO
+ // for a given Entry (as opposed to the Backend) will still generate a
+ // callback from within this method.
+ virtual ~Backend() {}
+
+ // Returns the type of this cache.
+ virtual net::CacheType GetCacheType() const = 0;
+
+ // Returns the number of entries in the cache.
+ virtual int32 GetEntryCount() const = 0;
+
+ // Opens an existing entry. Upon success, |entry| holds a pointer to an Entry
+ // object representing the specified disk cache entry. When the entry pointer
+ // is no longer needed, its Close method should be called. The return value is
+ // a net error code. If this method returns ERR_IO_PENDING, the |callback|
+ // will be invoked when the entry is available. The pointer to receive the
+ // |entry| must remain valid until the operation completes.
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) = 0;
+
+ // Creates a new entry. Upon success, the out param holds a pointer to an
+ // Entry object representing the newly created disk cache entry. When the
+ // entry pointer is no longer needed, its Close method should be called. The
+ // return value is a net error code. If this method returns ERR_IO_PENDING,
+ // the |callback| will be invoked when the entry is available. The pointer to
+ // receive the |entry| must remain valid until the operation completes.
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) = 0;
+
+ // Marks the entry, specified by the given key, for deletion. The return value
+ // is a net error code. If this method returns ERR_IO_PENDING, the |callback|
+ // will be invoked after the entry is doomed.
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) = 0;
+
+ // Marks all entries for deletion. The return value is a net error code. If
+ // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
+ // operation completes.
+ virtual int DoomAllEntries(const CompletionCallback& callback) = 0;
+
+ // Marks a range of entries for deletion. This supports unbounded deletes in
+ // either direction by using null Time values for either argument. The return
+ // value is a net error code. If this method returns ERR_IO_PENDING, the
+ // |callback| will be invoked when the operation completes.
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) = 0;
+
+ // Marks all entries accessed since |initial_time| for deletion. The return
+ // value is a net error code. If this method returns ERR_IO_PENDING, the
+ // |callback| will be invoked when the operation completes.
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) = 0;
+
+ // Enumerates the cache. Initialize |iter| to NULL before calling this method
+ // the first time. That will cause the enumeration to start at the head of
+ // the cache. For subsequent calls, pass the same |iter| pointer again without
+ // changing its value. This method returns ERR_FAILED when there are no more
+ // entries to enumerate. When the entry pointer is no longer needed, its
+ // Close method should be called. The return value is a net error code. If
+ // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
+ // |next_entry| is available. The pointer to receive the |next_entry| must
+ // remain valid until the operation completes.
+ //
+ // NOTE: This method does not modify the last_used field of the entry, and
+ // therefore it does not impact the eviction ranking of the entry. However,
+ // an enumeration will go through all entries on the cache only if the cache
+ // is not modified while the enumeration is taking place. Significantly
+ // altering the entry pointed by |iter| (for example, deleting the entry) will
+ // invalidate |iter|. Performing operations on an entry that modify the entry
+ // may result in loops in the iteration, skipped entries or similar.
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) = 0;
+
+ // Releases iter without returning the next entry. Whenever OpenNextEntry()
+ // returns true, but the caller is not interested in continuing the
+ // enumeration by calling OpenNextEntry() again, the enumeration must be
+ // ended by calling this method with iter returned by OpenNextEntry().
+ virtual void EndEnumeration(void** iter) = 0;
+
+ // Return a list of cache statistics.
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) = 0;
+
+ // Called whenever an external cache in the system reuses the resource
+ // referred to by |key|.
+ virtual void OnExternalCacheHit(const std::string& key) = 0;
+};
+
+// This interface represents an entry in the disk cache.
+class NET_EXPORT Entry {
+ public:
+ typedef net::CompletionCallback CompletionCallback;
+ typedef net::IOBuffer IOBuffer;
+
+ // Marks this cache entry for deletion.
+ virtual void Doom() = 0;
+
+ // Releases this entry. Calling this method does not cancel pending IO
+ // operations on this entry. Even after the last reference to this object has
+ // been released, pending completion callbacks may be invoked.
+ virtual void Close() = 0;
+
+ // Returns the key associated with this cache entry.
+ virtual std::string GetKey() const = 0;
+
+ // Returns the time when this cache entry was last used.
+ virtual base::Time GetLastUsed() const = 0;
+
+ // Returns the time when this cache entry was last modified.
+ virtual base::Time GetLastModified() const = 0;
+
+ // Returns the size of the cache data with the given index.
+ virtual int32 GetDataSize(int index) const = 0;
+
+ // Copies cached data into the given buffer of length |buf_len|. Returns the
+ // number of bytes read or a network error code. If this function returns
+ // ERR_IO_PENDING, the completion callback will be called on the current
+ // thread when the operation completes, and a reference to |buf| will be
+ // retained until the callback is called. Note that as long as the function
+ // does not complete immediately, the callback will always be invoked, even
+ // after Close has been called; in other words, the caller may close this
+ // entry without having to wait for all the callbacks, and still rely on the
+ // cleanup performed from the callback code.
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) = 0;
+
+ // Copies data from the given buffer of length |buf_len| into the cache.
+ // Returns the number of bytes written or a network error code. If this
+ // function returns ERR_IO_PENDING, the completion callback will be called
+ // on the current thread when the operation completes, and a reference to
+ // |buf| will be retained until the callback is called. Note that as long as
+ // the function does not complete immediately, the callback will always be
+ // invoked, even after Close has been called; in other words, the caller may
+ // close this entry without having to wait for all the callbacks, and still
+ // rely on the cleanup performed from the callback code.
+ // If truncate is true, this call will truncate the stored data at the end of
+ // what we are writing here.
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) = 0;
+
+ // Sparse entries support:
+ //
+ // A Backend implementation can support sparse entries, so the cache keeps
+ // track of which parts of the entry have been written before. The backend
+ // will never return data that was not written previously, so reading from
+ // such region will return 0 bytes read (or actually the number of bytes read
+ // before reaching that region).
+ //
+ // There are only two streams for sparse entries: a regular control stream
+ // (index 0) that must be accessed through the regular API (ReadData and
+ // WriteData), and one sparse stream that must me accessed through the sparse-
+ // aware API that follows. Calling a non-sparse aware method with an index
+ // argument other than 0 is a mistake that results in implementation specific
+ // behavior. Using a sparse-aware method with an entry that was not stored
+ // using the same API, or with a backend that doesn't support sparse entries
+ // will return ERR_CACHE_OPERATION_NOT_SUPPORTED.
+ //
+ // The storage granularity of the implementation should be at least 1 KB. In
+ // other words, storing less than 1 KB may result in an implementation
+ // dropping the data completely, and writing at offsets not aligned with 1 KB,
+ // or with lengths not a multiple of 1 KB may result in the first or last part
+ // of the data being discarded. However, two consecutive writes should not
+ // result in a hole in between the two parts as long as they are sequential
+ // (the second one starts where the first one ended), and there is no other
+ // write between them.
+ //
+ // The Backend implementation is free to evict any range from the cache at any
+ // moment, so in practice, the previously stated granularity of 1 KB is not
+ // as bad as it sounds.
+ //
+ // The sparse methods don't support multiple simultaneous IO operations to the
+ // same physical entry, so in practice a single object should be instantiated
+ // for a given key at any given time. Once an operation has been issued, the
+ // caller should wait until it completes before starting another one. This
+ // requirement includes the case when an entry is closed while some operation
+ // is in progress and another object is instantiated; any IO operation will
+ // fail while the previous operation is still in-flight. In order to deal with
+ // this requirement, the caller could either wait until the operation
+ // completes before closing the entry, or call CancelSparseIO() before closing
+ // the entry, and call ReadyForSparseIO() on the new entry and wait for the
+ // callback before issuing new operations.
+
+ // Behaves like ReadData() except that this method is used to access sparse
+ // entries.
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) = 0;
+
+ // Behaves like WriteData() except that this method is used to access sparse
+ // entries. |truncate| is not part of this interface because a sparse entry
+ // is not expected to be reused with new data. To delete the old data and
+ // start again, or to reduce the total size of the stream data (which implies
+ // that the content has changed), the whole entry should be doomed and
+ // re-created.
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) = 0;
+
+ // Returns information about the currently stored portion of a sparse entry.
+ // |offset| and |len| describe a particular range that should be scanned to
+ // find out if it is stored or not. |start| will contain the offset of the
+ // first byte that is stored within this range, and the return value is the
+ // minimum number of consecutive stored bytes. Note that it is possible that
+ // this entry has stored more than the returned value. This method returns a
+ // net error code whenever the request cannot be completed successfully. If
+ // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
+ // operation completes, and |start| must remain valid until that point.
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) = 0;
+
+ // Returns true if this entry could be a sparse entry or false otherwise. This
+ // is a quick test that may return true even if the entry is not really
+ // sparse. This method doesn't modify the state of this entry (it will not
+ // create sparse tracking data). GetAvailableRange or ReadSparseData can be
+ // used to perform a definitive test of whether an existing entry is sparse or
+ // not, but that method may modify the current state of the entry (making it
+ // sparse, for instance). The purpose of this method is to test an existing
+ // entry, but without generating actual IO to perform a thorough check.
+ virtual bool CouldBeSparse() const = 0;
+
+ // Cancels any pending sparse IO operation (if any). The completion callback
+ // of the operation in question will still be called when the operation
+ // finishes, but the operation will finish sooner when this method is used.
+ virtual void CancelSparseIO() = 0;
+
+ // Returns OK if this entry can be used immediately. If that is not the
+ // case, returns ERR_IO_PENDING and invokes the provided callback when this
+ // entry is ready to use. This method always returns OK for non-sparse
+ // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
+ // (by calling CancelSparseIO), but the cache is still busy with it. If there
+ // is a pending operation that has not been cancelled, this method will return
+ // OK although another IO operation cannot be issued at this time; in this
+ // case the caller should just wait for the regular callback to be invoked
+ // instead of using this method to provide another callback.
+ //
+ // Note that CancelSparseIO may have been called on another instance of this
+ // object that refers to the same physical disk entry.
+ // Note: This method is deprecated.
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) = 0;
+
+ protected:
+ virtual ~Entry() {}
+};
+
+struct EntryDeleter {
+ void operator()(Entry* entry) {
+ // Note that |entry| is ref-counted.
+ entry->Close();
+ }
+};
+
+// Automatically closes an entry when it goes out of scope.
+typedef scoped_ptr<Entry, EntryDeleter> ScopedEntryPtr;
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_CACHE_H_
diff --git a/chromium/net/disk_cache/disk_cache_perftest.cc b/chromium/net/disk_cache/disk_cache_perftest.cc
new file mode 100644
index 00000000000..f7a1b5969c6
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache_perftest.cc
@@ -0,0 +1,250 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/hash.h"
+#include "base/perftimer.h"
+#include "base/strings/string_util.h"
+#include "base/test/test_file_util.h"
+#include "base/threading/thread.h"
+#include "base/timer/timer.h"
+#include "net/base/cache_type.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+using base::Time;
+
+namespace {
+
+struct TestEntry {
+ std::string key;
+ int data_len;
+};
+typedef std::vector<TestEntry> TestEntries;
+
+const int kMaxSize = 16 * 1024 - 1;
+
+// Creates num_entries on the cache, and writes 200 bytes of metadata and up
+// to kMaxSize of data to each entry.
+bool TimeWrite(int num_entries, disk_cache::Backend* cache,
+ TestEntries* entries) {
+ const int kSize1 = 200;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kMaxSize));
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kMaxSize, false);
+
+ int expected = 0;
+
+ MessageLoopHelper helper;
+ CallbackTest callback(&helper, true);
+
+ PerfTimeLogger timer("Write disk cache entries");
+
+ for (int i = 0; i < num_entries; i++) {
+ TestEntry entry;
+ entry.key = GenerateKey(true);
+ entry.data_len = rand() % kMaxSize;
+ entries->push_back(entry);
+
+ disk_cache::Entry* cache_entry;
+ net::TestCompletionCallback cb;
+ int rv = cache->CreateEntry(entry.key, &cache_entry, cb.callback());
+ if (net::OK != cb.GetResult(rv))
+ break;
+ int ret = cache_entry->WriteData(
+ 0, 0, buffer1.get(), kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (kSize1 != ret)
+ break;
+
+ ret = cache_entry->WriteData(
+ 1, 0, buffer2.get(), entry.data_len,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)), false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entry.data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return (expected == helper.callbacks_called());
+}
+
+// Reads the data and metadata from each entry listed on |entries|.
+bool TimeRead(int num_entries, disk_cache::Backend* cache,
+ const TestEntries& entries, bool cold) {
+ const int kSize1 = 200;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kMaxSize));
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kMaxSize, false);
+
+ int expected = 0;
+
+ MessageLoopHelper helper;
+ CallbackTest callback(&helper, true);
+
+ const char* message = cold ? "Read disk cache entries (cold)" :
+ "Read disk cache entries (warm)";
+ PerfTimeLogger timer(message);
+
+ for (int i = 0; i < num_entries; i++) {
+ disk_cache::Entry* cache_entry;
+ net::TestCompletionCallback cb;
+ int rv = cache->OpenEntry(entries[i].key, &cache_entry, cb.callback());
+ if (net::OK != cb.GetResult(rv))
+ break;
+ int ret = cache_entry->ReadData(
+ 0, 0, buffer1.get(), kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (kSize1 != ret)
+ break;
+
+ ret = cache_entry->ReadData(
+ 1, 0, buffer2.get(), entries[i].data_len,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback)));
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entries[i].data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return (expected == helper.callbacks_called());
+}
+
+int BlockSize() {
+ // We can use form 1 to 4 blocks.
+ return (rand() & 0x3) + 1;
+}
+
+} // namespace
+
+TEST_F(DiskCacheTest, Hash) {
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ PerfTimeLogger timer("Hash disk cache keys");
+ for (int i = 0; i < 300000; i++) {
+ std::string key = GenerateKey(true);
+ base::Hash(key);
+ }
+ timer.Done();
+}
+
+TEST_F(DiskCacheTest, CacheBackendPerformance) {
+ base::Thread cache_thread("CacheThread");
+ ASSERT_TRUE(cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+
+ ASSERT_TRUE(CleanupCacheDir());
+ net::TestCompletionCallback cb;
+ scoped_ptr<disk_cache::Backend> cache;
+ int rv = disk_cache::CreateCacheBackend(
+ net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE, cache_path_, 0, false,
+ cache_thread.message_loop_proxy().get(), NULL, &cache, cb.callback());
+
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ TestEntries entries;
+ int num_entries = 1000;
+
+ EXPECT_TRUE(TimeWrite(num_entries, cache.get(), &entries));
+
+ base::MessageLoop::current()->RunUntilIdle();
+ cache.reset();
+
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ cache_path_.AppendASCII("index")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ cache_path_.AppendASCII("data_0")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ cache_path_.AppendASCII("data_1")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ cache_path_.AppendASCII("data_2")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ cache_path_.AppendASCII("data_3")));
+
+ rv = disk_cache::CreateCacheBackend(
+ net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE, cache_path_, 0, false,
+ cache_thread.message_loop_proxy().get(), NULL, &cache, cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ EXPECT_TRUE(TimeRead(num_entries, cache.get(), entries, true));
+
+ EXPECT_TRUE(TimeRead(num_entries, cache.get(), entries, false));
+
+ base::MessageLoop::current()->RunUntilIdle();
+}
+
+// Creating and deleting "entries" on a block-file is something quite frequent
+// (after all, almost everything is stored on block files). The operation is
+// almost free when the file is empty, but can be expensive if the file gets
+// fragmented, or if we have multiple files. This test measures that scenario,
+// by using multiple, highly fragmented files.
+TEST_F(DiskCacheTest, BlockFilesPerformance) {
+ ASSERT_TRUE(CleanupCacheDir());
+
+ disk_cache::BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 60000;
+ disk_cache::Addr* address = new disk_cache::Addr[kNumEntries];
+
+ PerfTimeLogger timer1("Fill three block-files");
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kNumEntries; i++) {
+ EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, BlockSize(),
+ &address[i]));
+ }
+
+ timer1.Done();
+ PerfTimeLogger timer2("Create and delete blocks");
+
+ for (int i = 0; i < 200000; i++) {
+ int entry = rand() * (kNumEntries / RAND_MAX + 1);
+ if (entry >= kNumEntries)
+ entry = 0;
+
+ files.DeleteBlock(address[entry], false);
+ EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, BlockSize(),
+ &address[entry]));
+ }
+
+ timer2.Done();
+ base::MessageLoop::current()->RunUntilIdle();
+ delete[] address;
+}
diff --git a/chromium/net/disk_cache/disk_cache_test_base.cc b/chromium/net/disk_cache/disk_cache_test_base.cc
new file mode 100644
index 00000000000..dc7bb6c5fb2
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache_test_base.cc
@@ -0,0 +1,307 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/disk_cache_test_base.h"
+
+#include "base/file_util.h"
+#include "base/path_service.h"
+#include "base/run_loop.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mem_backend_impl.h"
+#include "net/disk_cache/simple/simple_backend_impl.h"
+#include "net/disk_cache/simple/simple_index.h"
+
+DiskCacheTest::DiskCacheTest() {
+ CHECK(temp_dir_.CreateUniqueTempDir());
+ cache_path_ = temp_dir_.path();
+ if (!base::MessageLoop::current())
+ message_loop_.reset(new base::MessageLoopForIO());
+}
+
+DiskCacheTest::~DiskCacheTest() {
+}
+
+bool DiskCacheTest::CopyTestCache(const std::string& name) {
+ base::FilePath path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ path = path.AppendASCII("net");
+ path = path.AppendASCII("data");
+ path = path.AppendASCII("cache_tests");
+ path = path.AppendASCII(name);
+
+ if (!CleanupCacheDir())
+ return false;
+ return base::CopyDirectory(path, cache_path_, false);
+}
+
+bool DiskCacheTest::CleanupCacheDir() {
+ return DeleteCache(cache_path_);
+}
+
+void DiskCacheTest::TearDown() {
+ base::RunLoop().RunUntilIdle();
+}
+
+DiskCacheTestWithCache::DiskCacheTestWithCache()
+ : cache_impl_(NULL),
+ simple_cache_impl_(NULL),
+ mem_cache_(NULL),
+ mask_(0),
+ size_(0),
+ type_(net::DISK_CACHE),
+ memory_only_(false),
+ simple_cache_mode_(false),
+ simple_cache_wait_for_index_(true),
+ force_creation_(false),
+ new_eviction_(false),
+ first_cleanup_(true),
+ integrity_(true),
+ use_current_thread_(false),
+ cache_thread_("CacheThread") {
+}
+
+DiskCacheTestWithCache::~DiskCacheTestWithCache() {}
+
+void DiskCacheTestWithCache::InitCache() {
+ if (memory_only_)
+ InitMemoryCache();
+ else
+ InitDiskCache();
+
+ ASSERT_TRUE(NULL != cache_);
+ if (first_cleanup_)
+ ASSERT_EQ(0, cache_->GetEntryCount());
+}
+
+// We are expected to leak memory when simulating crashes.
+void DiskCacheTestWithCache::SimulateCrash() {
+ ASSERT_TRUE(!memory_only_);
+ net::TestCompletionCallback cb;
+ int rv = cache_impl_->FlushQueueForTest(cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ cache_impl_->ClearRefCountForTest();
+
+ cache_.reset();
+ EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
+
+ CreateBackend(disk_cache::kNoRandom, &cache_thread_);
+}
+
+void DiskCacheTestWithCache::SetTestMode() {
+ ASSERT_TRUE(!memory_only_);
+ cache_impl_->SetUnitTestMode();
+}
+
+void DiskCacheTestWithCache::SetMaxSize(int size) {
+ size_ = size;
+ if (simple_cache_impl_)
+ EXPECT_TRUE(simple_cache_impl_->SetMaxSize(size));
+
+ if (cache_impl_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size));
+
+ if (mem_cache_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size));
+}
+
+int DiskCacheTestWithCache::OpenEntry(const std::string& key,
+ disk_cache::Entry** entry) {
+ net::TestCompletionCallback cb;
+ int rv = cache_->OpenEntry(key, entry, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::CreateEntry(const std::string& key,
+ disk_cache::Entry** entry) {
+ net::TestCompletionCallback cb;
+ int rv = cache_->CreateEntry(key, entry, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::DoomEntry(const std::string& key) {
+ net::TestCompletionCallback cb;
+ int rv = cache_->DoomEntry(key, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::DoomAllEntries() {
+ net::TestCompletionCallback cb;
+ int rv = cache_->DoomAllEntries(cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time) {
+ net::TestCompletionCallback cb;
+ int rv = cache_->DoomEntriesBetween(initial_time, end_time, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::DoomEntriesSince(const base::Time initial_time) {
+ net::TestCompletionCallback cb;
+ int rv = cache_->DoomEntriesSince(initial_time, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::OpenNextEntry(void** iter,
+ disk_cache::Entry** next_entry) {
+ net::TestCompletionCallback cb;
+ int rv = cache_->OpenNextEntry(iter, next_entry, cb.callback());
+ return cb.GetResult(rv);
+}
+
+void DiskCacheTestWithCache::FlushQueueForTest() {
+ if (memory_only_ || !cache_impl_)
+ return;
+
+ net::TestCompletionCallback cb;
+ int rv = cache_impl_->FlushQueueForTest(cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+}
+
+void DiskCacheTestWithCache::RunTaskForTest(const base::Closure& closure) {
+ if (memory_only_ || !cache_impl_) {
+ closure.Run();
+ return;
+ }
+
+ net::TestCompletionCallback cb;
+ int rv = cache_impl_->RunTaskForTest(closure, cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+}
+
+int DiskCacheTestWithCache::ReadData(disk_cache::Entry* entry, int index,
+ int offset, net::IOBuffer* buf, int len) {
+ net::TestCompletionCallback cb;
+ int rv = entry->ReadData(index, offset, buf, len, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::WriteData(disk_cache::Entry* entry, int index,
+ int offset, net::IOBuffer* buf, int len,
+ bool truncate) {
+ net::TestCompletionCallback cb;
+ int rv = entry->WriteData(index, offset, buf, len, cb.callback(), truncate);
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::ReadSparseData(disk_cache::Entry* entry,
+ int64 offset, net::IOBuffer* buf,
+ int len) {
+ net::TestCompletionCallback cb;
+ int rv = entry->ReadSparseData(offset, buf, len, cb.callback());
+ return cb.GetResult(rv);
+}
+
+int DiskCacheTestWithCache::WriteSparseData(disk_cache::Entry* entry,
+ int64 offset,
+ net::IOBuffer* buf, int len) {
+ net::TestCompletionCallback cb;
+ int rv = entry->WriteSparseData(offset, buf, len, cb.callback());
+ return cb.GetResult(rv);
+}
+
+void DiskCacheTestWithCache::TrimForTest(bool empty) {
+ RunTaskForTest(base::Bind(&disk_cache::BackendImpl::TrimForTest,
+ base::Unretained(cache_impl_),
+ empty));
+}
+
+void DiskCacheTestWithCache::TrimDeletedListForTest(bool empty) {
+ RunTaskForTest(base::Bind(&disk_cache::BackendImpl::TrimDeletedListForTest,
+ base::Unretained(cache_impl_),
+ empty));
+}
+
+void DiskCacheTestWithCache::AddDelay() {
+ base::Time initial = base::Time::Now();
+ while (base::Time::Now() <= initial) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+ };
+}
+
+void DiskCacheTestWithCache::TearDown() {
+ base::RunLoop().RunUntilIdle();
+ cache_.reset();
+ if (cache_thread_.IsRunning())
+ cache_thread_.Stop();
+
+ if (!memory_only_ && !simple_cache_mode_ && integrity_) {
+ EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
+ }
+
+ PlatformTest::TearDown();
+}
+
+void DiskCacheTestWithCache::InitMemoryCache() {
+ mem_cache_ = new disk_cache::MemBackendImpl(NULL);
+ cache_.reset(mem_cache_);
+ ASSERT_TRUE(cache_);
+
+ if (size_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size_));
+
+ ASSERT_TRUE(mem_cache_->Init());
+}
+
+void DiskCacheTestWithCache::InitDiskCache() {
+ if (first_cleanup_)
+ ASSERT_TRUE(CleanupCacheDir());
+
+ if (!cache_thread_.IsRunning()) {
+ ASSERT_TRUE(cache_thread_.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ }
+ ASSERT_TRUE(cache_thread_.message_loop() != NULL);
+
+ CreateBackend(disk_cache::kNoRandom, &cache_thread_);
+}
+
+void DiskCacheTestWithCache::CreateBackend(uint32 flags, base::Thread* thread) {
+ base::MessageLoopProxy* runner;
+ if (use_current_thread_)
+ runner = base::MessageLoopProxy::current().get();
+ else
+ runner = thread->message_loop_proxy().get();
+
+ if (simple_cache_mode_) {
+ net::TestCompletionCallback cb;
+ scoped_ptr<disk_cache::SimpleBackendImpl> simple_backend(
+ new disk_cache::SimpleBackendImpl(
+ cache_path_, size_, type_, make_scoped_refptr(runner).get(), NULL));
+ int rv = simple_backend->Init(cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ simple_cache_impl_ = simple_backend.get();
+ cache_ = simple_backend.PassAs<disk_cache::Backend>();
+ if (simple_cache_wait_for_index_) {
+ net::TestCompletionCallback wait_for_index_cb;
+ rv = simple_cache_impl_->index()->ExecuteWhenReady(
+ wait_for_index_cb.callback());
+ ASSERT_EQ(net::OK, wait_for_index_cb.GetResult(rv));
+ }
+ return;
+ }
+
+ if (mask_)
+ cache_impl_ = new disk_cache::BackendImpl(cache_path_, mask_, runner, NULL);
+ else
+ cache_impl_ = new disk_cache::BackendImpl(cache_path_, runner, NULL);
+ cache_.reset(cache_impl_);
+ ASSERT_TRUE(cache_);
+ if (size_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size_));
+ if (new_eviction_)
+ cache_impl_->SetNewEviction();
+ cache_impl_->SetType(type_);
+ cache_impl_->SetFlags(flags);
+ net::TestCompletionCallback cb;
+ int rv = cache_impl_->Init(cb.callback());
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+}
diff --git a/chromium/net/disk_cache/disk_cache_test_base.h b/chromium/net/disk_cache/disk_cache_test_base.h
new file mode 100644
index 00000000000..b7249062895
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache_test_base.h
@@ -0,0 +1,176 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H_
+#define NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H_
+
+#include "base/basictypes.h"
+#include "base/files/file_path.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread.h"
+#include "net/base/cache_type.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace net {
+
+class IOBuffer;
+
+} // namespace net
+
+namespace disk_cache {
+
+class Backend;
+class BackendImpl;
+class Entry;
+class MemBackendImpl;
+class SimpleBackendImpl;
+
+} // namespace disk_cache
+
+// These tests can use the path service, which uses autoreleased objects on the
+// Mac, so this needs to be a PlatformTest. Even tests that do not require a
+// cache (and that do not need to be a DiskCacheTestWithCache) are susceptible
+// to this problem; all such tests should use TEST_F(DiskCacheTest, ...).
+class DiskCacheTest : public PlatformTest {
+ protected:
+ DiskCacheTest();
+ virtual ~DiskCacheTest();
+
+ // Copies a set of cache files from the data folder to the test folder.
+ bool CopyTestCache(const std::string& name);
+
+ // Deletes the contents of |cache_path_|.
+ bool CleanupCacheDir();
+
+ virtual void TearDown() OVERRIDE;
+
+ base::FilePath cache_path_;
+
+ private:
+ base::ScopedTempDir temp_dir_;
+ scoped_ptr<base::MessageLoop> message_loop_;
+};
+
+// Provides basic support for cache related tests.
+class DiskCacheTestWithCache : public DiskCacheTest {
+ protected:
+ DiskCacheTestWithCache();
+ virtual ~DiskCacheTestWithCache();
+
+ void CreateBackend(uint32 flags, base::Thread* thread);
+
+ void InitCache();
+ void SimulateCrash();
+ void SetTestMode();
+
+ void SetMemoryOnlyMode() {
+ memory_only_ = true;
+ }
+
+ void SetSimpleCacheMode() {
+ simple_cache_mode_ = true;
+ }
+
+ void SetMask(uint32 mask) {
+ mask_ = mask;
+ }
+
+ void SetMaxSize(int size);
+
+ // Deletes and re-creates the files on initialization errors.
+ void SetForceCreation() {
+ force_creation_ = true;
+ }
+
+ void SetNewEviction() {
+ new_eviction_ = true;
+ }
+
+ void DisableSimpleCacheWaitForIndex() {
+ simple_cache_wait_for_index_ = false;
+ }
+
+ void DisableFirstCleanup() {
+ first_cleanup_ = false;
+ }
+
+ void DisableIntegrityCheck() {
+ integrity_ = false;
+ }
+
+ void UseCurrentThread() {
+ use_current_thread_ = true;
+ }
+
+ void SetCacheType(net::CacheType type) {
+ type_ = type;
+ }
+
+ // Utility methods to access the cache and wait for each operation to finish.
+ int OpenEntry(const std::string& key, disk_cache::Entry** entry);
+ int CreateEntry(const std::string& key, disk_cache::Entry** entry);
+ int DoomEntry(const std::string& key);
+ int DoomAllEntries();
+ int DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ int DoomEntriesSince(const base::Time initial_time);
+ int OpenNextEntry(void** iter, disk_cache::Entry** next_entry);
+ void FlushQueueForTest();
+ void RunTaskForTest(const base::Closure& closure);
+ int ReadData(disk_cache::Entry* entry, int index, int offset,
+ net::IOBuffer* buf, int len);
+ int WriteData(disk_cache::Entry* entry, int index, int offset,
+ net::IOBuffer* buf, int len, bool truncate);
+ int ReadSparseData(disk_cache::Entry* entry, int64 offset, net::IOBuffer* buf,
+ int len);
+ int WriteSparseData(disk_cache::Entry* entry, int64 offset,
+ net::IOBuffer* buf, int len);
+
+ // Asks the cache to trim an entry. If |empty| is true, the whole cache is
+ // deleted.
+ void TrimForTest(bool empty);
+
+ // Asks the cache to trim an entry from the deleted list. If |empty| is
+ // true, the whole list is deleted.
+ void TrimDeletedListForTest(bool empty);
+
+ // Makes sure that some time passes before continuing the test. Time::Now()
+ // before and after this method will not be the same.
+ void AddDelay();
+
+ // DiskCacheTest:
+ virtual void TearDown() OVERRIDE;
+
+ // cache_ will always have a valid object, regardless of how the cache was
+ // initialized. The implementation pointers can be NULL.
+ scoped_ptr<disk_cache::Backend> cache_;
+ disk_cache::BackendImpl* cache_impl_;
+ disk_cache::SimpleBackendImpl* simple_cache_impl_;
+ disk_cache::MemBackendImpl* mem_cache_;
+
+ uint32 mask_;
+ int size_;
+ net::CacheType type_;
+ bool memory_only_;
+ bool simple_cache_mode_;
+ bool simple_cache_wait_for_index_;
+ bool force_creation_;
+ bool new_eviction_;
+ bool first_cleanup_;
+ bool integrity_;
+ bool use_current_thread_;
+ // This is intentionally left uninitialized, to be used by any test.
+ bool success_;
+
+ private:
+ void InitMemoryCache();
+ void InitDiskCache();
+
+ base::Thread cache_thread_;
+ DISALLOW_COPY_AND_ASSIGN(DiskCacheTestWithCache);
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H_
diff --git a/chromium/net/disk_cache/disk_cache_test_util.cc b/chromium/net/disk_cache/disk_cache_test_util.cc
new file mode 100644
index 00000000000..8f334f05370
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache_test_util.cc
@@ -0,0 +1,146 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/disk_cache_test_util.h"
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/path_service.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/file.h"
+
+using base::Time;
+using base::TimeDelta;
+
+std::string GenerateKey(bool same_length) {
+ char key[200];
+ CacheTestFillBuffer(key, sizeof(key), same_length);
+
+ key[199] = '\0';
+ return std::string(key);
+}
+
+void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls) {
+ static bool called = false;
+ if (!called) {
+ called = true;
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ }
+
+ for (size_t i = 0; i < len; i++) {
+ buffer[i] = static_cast<char>(rand());
+ if (!buffer[i] && no_nulls)
+ buffer[i] = 'g';
+ }
+ if (len && !buffer[0])
+ buffer[0] = 'g';
+}
+
+bool CreateCacheTestFile(const base::FilePath& name) {
+ int flags = base::PLATFORM_FILE_CREATE_ALWAYS |
+ base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE;
+
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(name, flags, NULL, NULL)));
+ if (!file->IsValid())
+ return false;
+
+ file->SetLength(4 * 1024 * 1024);
+ return true;
+}
+
+bool DeleteCache(const base::FilePath& path) {
+ disk_cache::DeleteCache(path, false);
+ return true;
+}
+
+bool CheckCacheIntegrity(const base::FilePath& path, bool new_eviction,
+ uint32 mask) {
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
+ path, mask, base::MessageLoopProxy::current().get(), NULL));
+ if (!cache.get())
+ return false;
+ if (new_eviction)
+ cache->SetNewEviction();
+ cache->SetFlags(disk_cache::kNoRandom);
+ if (cache->SyncInit() != net::OK)
+ return false;
+ return cache->SelfCheck() >= 0;
+}
+
+// -----------------------------------------------------------------------
+
+MessageLoopHelper::MessageLoopHelper()
+ : num_callbacks_(0),
+ num_iterations_(0),
+ last_(0),
+ completed_(false),
+ callback_reused_error_(false),
+ callbacks_called_(0) {
+}
+
+MessageLoopHelper::~MessageLoopHelper() {
+}
+
+bool MessageLoopHelper::WaitUntilCacheIoFinished(int num_callbacks) {
+ if (num_callbacks == callbacks_called_)
+ return true;
+
+ ExpectCallbacks(num_callbacks);
+ // Create a recurrent timer of 50 mS.
+ if (!timer_.IsRunning())
+ timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(50), this,
+ &MessageLoopHelper::TimerExpired);
+ base::MessageLoop::current()->Run();
+ return completed_;
+}
+
+// Quits the message loop when all callbacks are called or we've been waiting
+// too long for them (2 secs without a callback).
+void MessageLoopHelper::TimerExpired() {
+ CHECK_LE(callbacks_called_, num_callbacks_);
+ if (callbacks_called_ == num_callbacks_) {
+ completed_ = true;
+ base::MessageLoop::current()->Quit();
+ } else {
+ // Not finished yet. See if we have to abort.
+ if (last_ == callbacks_called_)
+ num_iterations_++;
+ else
+ last_ = callbacks_called_;
+ if (40 == num_iterations_)
+ base::MessageLoop::current()->Quit();
+ }
+}
+
+// -----------------------------------------------------------------------
+
+CallbackTest::CallbackTest(MessageLoopHelper* helper,
+ bool reuse)
+ : helper_(helper),
+ reuse_(reuse ? 0 : 1) {
+}
+
+CallbackTest::~CallbackTest() {
+}
+
+// On the actual callback, increase the number of tests received and check for
+// errors (an unexpected test received)
+void CallbackTest::Run(int result) {
+ last_result_ = result;
+
+ if (reuse_) {
+ DCHECK_EQ(1, reuse_);
+ if (2 == reuse_)
+ helper_->set_callback_reused_error(true);
+ reuse_++;
+ }
+
+ helper_->CallbackWasCalled();
+}
diff --git a/chromium/net/disk_cache/disk_cache_test_util.h b/chromium/net/disk_cache/disk_cache_test_util.h
new file mode 100644
index 00000000000..ad6b6ac9414
--- /dev/null
+++ b/chromium/net/disk_cache/disk_cache_test_util.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H_
+#define NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H_
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/message_loop/message_loop.h"
+#include "base/timer/timer.h"
+#include "base/tuple.h"
+#include "build/build_config.h"
+
+// Re-creates a given test file inside the cache test folder.
+bool CreateCacheTestFile(const base::FilePath& name);
+
+// Deletes all file son the cache.
+bool DeleteCache(const base::FilePath& path);
+
+// Fills buffer with random values (may contain nulls unless no_nulls is true).
+void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls);
+
+// Generates a random key of up to 200 bytes.
+std::string GenerateKey(bool same_length);
+
+// Returns true if the cache is not corrupt.
+bool CheckCacheIntegrity(const base::FilePath& path, bool new_eviction,
+ uint32 mask);
+
+// -----------------------------------------------------------------------
+
+// Simple helper to deal with the message loop on a test.
+class MessageLoopHelper {
+ public:
+ MessageLoopHelper();
+ ~MessageLoopHelper();
+
+ // Run the message loop and wait for num_callbacks before returning. Returns
+ // false if we are waiting to long. Each callback that will be waited on is
+ // required to call CallbackWasCalled() to indicate when it was called.
+ bool WaitUntilCacheIoFinished(int num_callbacks);
+
+ // True if a given callback was called more times than it expected.
+ bool callback_reused_error() const { return callback_reused_error_; }
+ void set_callback_reused_error(bool error) {
+ callback_reused_error_ = error;
+ }
+
+ int callbacks_called() const { return callbacks_called_; }
+ // Report that a callback was called. Each callback that will be waited on
+ // via WaitUntilCacheIoFinished() is expected to call this method to
+ // indicate when it has been executed.
+ void CallbackWasCalled() { ++callbacks_called_; }
+
+ private:
+ // Sets the number of callbacks that can be received so far.
+ void ExpectCallbacks(int num_callbacks) {
+ num_callbacks_ = num_callbacks;
+ num_iterations_ = last_ = 0;
+ completed_ = false;
+ }
+
+ // Called periodically to test if WaitUntilCacheIoFinished should return.
+ void TimerExpired();
+
+ base::RepeatingTimer<MessageLoopHelper> timer_;
+ int num_callbacks_;
+ int num_iterations_;
+ int last_;
+ bool completed_;
+
+ // True if a callback was called/reused more than expected.
+ bool callback_reused_error_;
+ int callbacks_called_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageLoopHelper);
+};
+
+// -----------------------------------------------------------------------
+
+// Simple callback to process IO completions from the cache. It allows tests
+// with multiple simultaneous IO operations.
+class CallbackTest {
+ public:
+ // Creates a new CallbackTest object. When the callback is called, it will
+ // update |helper|. If |reuse| is false and a callback is called more than
+ // once, or if |reuse| is true and a callback is called more than twice, an
+ // error will be reported to |helper|.
+ CallbackTest(MessageLoopHelper* helper, bool reuse);
+ ~CallbackTest();
+
+ void Run(int result);
+
+ int last_result() const { return last_result_; }
+
+ private:
+ MessageLoopHelper* helper_;
+ int reuse_;
+ int last_result_;
+ DISALLOW_COPY_AND_ASSIGN(CallbackTest);
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H_
diff --git a/chromium/net/disk_cache/disk_format.cc b/chromium/net/disk_cache/disk_format.cc
new file mode 100644
index 00000000000..5b08954e088
--- /dev/null
+++ b/chromium/net/disk_cache/disk_format.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/disk_format.h"
+
+namespace disk_cache {
+
+IndexHeader::IndexHeader() {
+ memset(this, 0, sizeof(*this));
+ magic = kIndexMagic;
+ version = kCurrentVersion;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/disk_format.h b/chromium/net/disk_cache/disk_format.h
new file mode 100644
index 00000000000..5d7597ab17d
--- /dev/null
+++ b/chromium/net/disk_cache/disk_format.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The cache is stored on disk as a collection of block-files, plus an index
+// file plus a collection of external files.
+//
+// Any data blob bigger than kMaxBlockSize (disk_cache/addr.h) will be stored in
+// a separate file named f_xxx where x is a hexadecimal number. Shorter data
+// will be stored as a series of blocks on a block-file. In any case, CacheAddr
+// represents the address of the data inside the cache.
+//
+// The index file is just a simple hash table that maps a particular entry to
+// a CacheAddr value. Linking for a given hash bucket is handled internally
+// by the cache entry.
+//
+// The last element of the cache is the block-file. A block file is a file
+// designed to store blocks of data of a given size. For more details see
+// disk_cache/disk_format_base.h
+//
+// A new cache is initialized with four block files (named data_0 through
+// data_3), each one dedicated to store blocks of a given size. The number at
+// the end of the file name is the block file number (in decimal).
+//
+// There are two "special" types of blocks: an entry and a rankings node. An
+// entry keeps track of all the information related to the same cache entry,
+// such as the key, hash value, data pointers etc. A rankings node keeps track
+// of the information that is updated frequently for a given entry, such as its
+// location on the LRU lists, last access time etc.
+//
+// The files that store internal information for the cache (blocks and index)
+// are at least partially memory mapped. They have a location that is signaled
+// every time the internal structures are modified, so it is possible to detect
+// (most of the time) when the process dies in the middle of an update.
+//
+// In order to prevent dirty data to be used as valid (after a crash), every
+// cache entry has a dirty identifier. Each running instance of the cache keeps
+// a separate identifier (maintained on the "this_id" header field) that is used
+// to mark every entry that is created or modified. When the entry is closed,
+// and all the data can be trusted, the dirty flag is cleared from the entry.
+// When the cache encounters an entry whose identifier is different than the one
+// being currently used, it means that the entry was not properly closed on a
+// previous run, so it is discarded.
+
+#ifndef NET_DISK_CACHE_DISK_FORMAT_H_
+#define NET_DISK_CACHE_DISK_FORMAT_H_
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/disk_format_base.h"
+
+namespace disk_cache {
+
+const int kIndexTablesize = 0x10000;
+const uint32 kIndexMagic = 0xC103CAC3;
+const uint32 kCurrentVersion = 0x20000; // Version 2.0.
+
+struct LruData {
+ int32 pad1[2];
+ int32 filled; // Flag to tell when we filled the cache.
+ int32 sizes[5];
+ CacheAddr heads[5];
+ CacheAddr tails[5];
+ CacheAddr transaction; // In-flight operation target.
+ int32 operation; // Actual in-flight operation.
+ int32 operation_list; // In-flight operation list.
+ int32 pad2[7];
+};
+
+// Header for the master index file.
+struct NET_EXPORT_PRIVATE IndexHeader {
+ IndexHeader();
+
+ uint32 magic;
+ uint32 version;
+ int32 num_entries; // Number of entries currently stored.
+ int32 num_bytes; // Total size of the stored data.
+ int32 last_file; // Last external file created.
+ int32 this_id; // Id for all entries being changed (dirty flag).
+ CacheAddr stats; // Storage for usage data.
+ int32 table_len; // Actual size of the table (0 == kIndexTablesize).
+ int32 crash; // Signals a previous crash.
+ int32 experiment; // Id of an ongoing test.
+ uint64 create_time; // Creation time for this set of files.
+ int32 pad[52];
+ LruData lru; // Eviction control data.
+};
+
+// The structure of the whole index file.
+struct Index {
+ IndexHeader header;
+ CacheAddr table[kIndexTablesize]; // Default size. Actual size controlled
+ // by header.table_len.
+};
+
+// Main structure for an entry on the backing storage. If the key is longer than
+// what can be stored on this structure, it will be extended on consecutive
+// blocks (adding 256 bytes each time), up to 4 blocks (1024 - 32 - 1 chars).
+// After that point, the whole key will be stored as a data block or external
+// file.
+struct EntryStore {
+ uint32 hash; // Full hash of the key.
+ CacheAddr next; // Next entry with the same hash or bucket.
+ CacheAddr rankings_node; // Rankings node for this entry.
+ int32 reuse_count; // How often is this entry used.
+ int32 refetch_count; // How often is this fetched from the net.
+ int32 state; // Current state.
+ uint64 creation_time;
+ int32 key_len;
+ CacheAddr long_key; // Optional address of a long key.
+ int32 data_size[4]; // We can store up to 4 data streams for each
+ CacheAddr data_addr[4]; // entry.
+ uint32 flags; // Any combination of EntryFlags.
+ int32 pad[4];
+ uint32 self_hash; // The hash of EntryStore up to this point.
+ char key[256 - 24 * 4]; // null terminated
+};
+
+COMPILE_ASSERT(sizeof(EntryStore) == 256, bad_EntyStore);
+const int kMaxInternalKeyLength = 4 * sizeof(EntryStore) -
+ offsetof(EntryStore, key) - 1;
+
+// Possible states for a given entry.
+enum EntryState {
+ ENTRY_NORMAL = 0,
+ ENTRY_EVICTED, // The entry was recently evicted from the cache.
+ ENTRY_DOOMED // The entry was doomed.
+};
+
+// Flags that can be applied to an entry.
+enum EntryFlags {
+ PARENT_ENTRY = 1, // This entry has children (sparse) entries.
+ CHILD_ENTRY = 1 << 1 // Child entry that stores sparse data.
+};
+
+#pragma pack(push, 4)
+// Rankings information for a given entry.
+struct RankingsNode {
+ uint64 last_used; // LRU info.
+ uint64 last_modified; // LRU info.
+ CacheAddr next; // LRU list.
+ CacheAddr prev; // LRU list.
+ CacheAddr contents; // Address of the EntryStore.
+ int32 dirty; // The entry is being modifyied.
+ uint32 self_hash; // RankingsNode's hash.
+};
+#pragma pack(pop)
+
+COMPILE_ASSERT(sizeof(RankingsNode) == 36, bad_RankingsNode);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_FORMAT_H_
diff --git a/chromium/net/disk_cache/disk_format_base.h b/chromium/net/disk_cache/disk_format_base.h
new file mode 100644
index 00000000000..c8b7490abfd
--- /dev/null
+++ b/chromium/net/disk_cache/disk_format_base.h
@@ -0,0 +1,130 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For a general description of the files used by the cache see file_format.h.
+//
+// A block file is a file designed to store blocks of data of a given size. It
+// is able to store data that spans from one to four consecutive "blocks", and
+// it grows as needed to store up to approximately 65000 blocks. It has a fixed
+// size header used for book keeping such as tracking free of blocks on the
+// file. For example, a block-file for 1KB blocks will grow from 8KB when
+// totally empty to about 64MB when completely full. At that point, data blocks
+// of 1KB will be stored on a second block file that will store the next set of
+// 65000 blocks. The first file contains the number of the second file, and the
+// second file contains the number of a third file, created when the second file
+// reaches its limit. It is important to remember that no matter how long the
+// chain of files is, any given block can be located directly by its address,
+// which contains the file number and starting block inside the file.
+
+#ifndef NET_DISK_CACHE_DISK_FORMAT_BASE_H_
+#define NET_DISK_CACHE_DISK_FORMAT_BASE_H_
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+
+typedef uint32 CacheAddr;
+
+const uint32 kBlockVersion2 = 0x20000; // Version 2.0.
+
+const uint32 kBlockMagic = 0xC104CAC3;
+const int kBlockHeaderSize = 8192; // Two pages: almost 64k entries
+const int kMaxBlocks = (kBlockHeaderSize - 80) * 8;
+
+// Bitmap to track used blocks on a block-file.
+typedef uint32 AllocBitmap[kMaxBlocks / 32];
+
+// A block-file is the file used to store information in blocks (could be
+// EntryStore blocks, RankingsNode blocks or user-data blocks).
+// We store entries that can expand for up to 4 consecutive blocks, and keep
+// counters of the number of blocks available for each type of entry. For
+// instance, an entry of 3 blocks is an entry of type 3. We also keep track of
+// where did we find the last entry of that type (to avoid searching the bitmap
+// from the beginning every time).
+// This Structure is the header of a block-file:
+struct BlockFileHeader {
+ uint32 magic;
+ uint32 version;
+ int16 this_file; // Index of this file.
+ int16 next_file; // Next file when this one is full.
+ int32 entry_size; // Size of the blocks of this file.
+ int32 num_entries; // Number of stored entries.
+ int32 max_entries; // Current maximum number of entries.
+ int32 empty[4]; // Counters of empty entries for each type.
+ int32 hints[4]; // Last used position for each entry type.
+ volatile int32 updating; // Keep track of updates to the header.
+ int32 user[5];
+ AllocBitmap allocation_map;
+};
+
+COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header);
+
+// Sparse data support:
+// We keep a two level hierarchy to enable sparse data for an entry: the first
+// level consists of using separate "child" entries to store ranges of 1 MB,
+// and the second level stores blocks of 1 KB inside each child entry.
+//
+// Whenever we need to access a particular sparse offset, we first locate the
+// child entry that stores that offset, so we discard the 20 least significant
+// bits of the offset, and end up with the child id. For instance, the child id
+// to store the first megabyte is 0, and the child that should store offset
+// 0x410000 has an id of 4.
+//
+// The child entry is stored the same way as any other entry, so it also has a
+// name (key). The key includes a signature to be able to identify children
+// created for different generations of the same resource. In other words, given
+// that a given sparse entry can have a large number of child entries, and the
+// resource can be invalidated and replaced with a new version at any time, it
+// is important to be sure that a given child actually belongs to certain entry.
+//
+// The full name of a child entry is composed with a prefix ("Range_"), and two
+// hexadecimal 64-bit numbers at the end, separated by semicolons. The first
+// number is the signature of the parent key, and the second number is the child
+// id as described previously. The signature itself is also stored internally by
+// the child and the parent entries. For example, a sparse entry with a key of
+// "sparse entry name", and a signature of 0x052AF76, may have a child entry
+// named "Range_sparse entry name:052af76:4", which stores data in the range
+// 0x400000 to 0x4FFFFF.
+//
+// Each child entry keeps track of all the 1 KB blocks that have been written
+// to the entry, but being a regular entry, it will happily return zeros for any
+// read that spans data not written before. The actual sparse data is stored in
+// one of the data streams of the child entry (at index 1), while the control
+// information is stored in another stream (at index 2), both by parents and
+// the children.
+
+// This structure contains the control information for parent and child entries.
+// It is stored at offset 0 of the data stream with index 2.
+// It is possible to write to a child entry in a way that causes the last block
+// to be only partialy filled. In that case, last_block and last_block_len will
+// keep track of that block.
+struct SparseHeader {
+ int64 signature; // The parent and children signature.
+ uint32 magic; // Structure identifier (equal to kIndexMagic).
+ int32 parent_key_len; // Key length for the parent entry.
+ int32 last_block; // Index of the last written block.
+ int32 last_block_len; // Lenght of the last written block.
+ int32 dummy[10];
+};
+
+// The SparseHeader will be followed by a bitmap, as described by this
+// structure.
+struct SparseData {
+ SparseHeader header;
+ uint32 bitmap[32]; // Bitmap representation of known children (if this
+ // is a parent entry), or used blocks (for child
+ // entries. The size is fixed for child entries but
+ // not for parents; it can be as small as 4 bytes
+ // and as large as 8 KB.
+};
+
+// The number of blocks stored by a child entry.
+const int kNumSparseBits = 1024;
+COMPILE_ASSERT(sizeof(SparseData) == sizeof(SparseHeader) + kNumSparseBits / 8,
+ Invalid_SparseData_bitmap);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_FORMAT_BASE_H_
diff --git a/chromium/net/disk_cache/entry_impl.cc b/chromium/net/disk_cache/entry_impl.cc
new file mode 100644
index 00000000000..4b6e4cf2b04
--- /dev/null
+++ b/chromium/net/disk_cache/entry_impl.cc
@@ -0,0 +1,1550 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/entry_impl.h"
+
+#include "base/hash.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/net_log_parameters.h"
+#include "net/disk_cache/sparse_control.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
+const int kKeyFileIndex = 3;
+
+// This class implements FileIOCallback to buffer the callback from a file IO
+// operation from the actual net class.
+class SyncCallback: public disk_cache::FileIOCallback {
+ public:
+ // |end_event_type| is the event type to log on completion. Logs nothing on
+ // discard, or when the NetLog is not set to log all events.
+ SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer,
+ const net::CompletionCallback& callback,
+ net::NetLog::EventType end_event_type)
+ : entry_(entry), callback_(callback), buf_(buffer),
+ start_(TimeTicks::Now()), end_event_type_(end_event_type) {
+ entry->AddRef();
+ entry->IncrementIoCount();
+ }
+ virtual ~SyncCallback() {}
+
+ virtual void OnFileIOComplete(int bytes_copied) OVERRIDE;
+ void Discard();
+
+ private:
+ disk_cache::EntryImpl* entry_;
+ net::CompletionCallback callback_;
+ scoped_refptr<net::IOBuffer> buf_;
+ TimeTicks start_;
+ const net::NetLog::EventType end_event_type_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncCallback);
+};
+
+void SyncCallback::OnFileIOComplete(int bytes_copied) {
+ entry_->DecrementIoCount();
+ if (!callback_.is_null()) {
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().EndEvent(
+ end_event_type_,
+ disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied));
+ }
+ entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
+ buf_ = NULL; // Release the buffer before invoking the callback.
+ callback_.Run(bytes_copied);
+ }
+ entry_->Release();
+ delete this;
+}
+
+void SyncCallback::Discard() {
+ callback_.Reset();
+ buf_ = NULL;
+ OnFileIOComplete(0);
+}
+
+const int kMaxBufferSize = 1024 * 1024; // 1 MB.
+
+} // namespace
+
+namespace disk_cache {
+
+// This class handles individual memory buffers that store data before it is
+// sent to disk. The buffer can start at any offset, but if we try to write to
+// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
+// zero. The buffer grows up to a size determined by the backend, to keep the
+// total memory used under control.
+class EntryImpl::UserBuffer {
+ public:
+ explicit UserBuffer(BackendImpl* backend)
+ : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) {
+ buffer_.reserve(kMaxBlockSize);
+ }
+ ~UserBuffer() {
+ if (backend_.get())
+ backend_->BufferDeleted(capacity() - kMaxBlockSize);
+ }
+
+ // Returns true if we can handle writing |len| bytes to |offset|.
+ bool PreWrite(int offset, int len);
+
+ // Truncates the buffer to |offset| bytes.
+ void Truncate(int offset);
+
+ // Writes |len| bytes from |buf| at the given |offset|.
+ void Write(int offset, IOBuffer* buf, int len);
+
+ // Returns true if we can read |len| bytes from |offset|, given that the
+ // actual file has |eof| bytes stored. Note that the number of bytes to read
+ // may be modified by this method even though it returns false: that means we
+ // should do a smaller read from disk.
+ bool PreRead(int eof, int offset, int* len);
+
+ // Read |len| bytes from |buf| at the given |offset|.
+ int Read(int offset, IOBuffer* buf, int len);
+
+ // Prepare this buffer for reuse.
+ void Reset();
+
+ char* Data() { return buffer_.size() ? &buffer_[0] : NULL; }
+ int Size() { return static_cast<int>(buffer_.size()); }
+ int Start() { return offset_; }
+ int End() { return offset_ + Size(); }
+
+ private:
+ int capacity() { return static_cast<int>(buffer_.capacity()); }
+ bool GrowBuffer(int required, int limit);
+
+ base::WeakPtr<BackendImpl> backend_;
+ int offset_;
+ std::vector<char> buffer_;
+ bool grow_allowed_;
+ DISALLOW_COPY_AND_ASSIGN(UserBuffer);
+};
+
+bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(len, 0);
+ DCHECK_GE(offset + len, 0);
+
+ // We don't want to write before our current start.
+ if (offset < offset_)
+ return false;
+
+ // Lets get the common case out of the way.
+ if (offset + len <= capacity())
+ return true;
+
+ // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
+ // buffer offset_ at 0.
+ if (!Size() && offset > kMaxBlockSize)
+ return GrowBuffer(len, kMaxBufferSize);
+
+ int required = offset - offset_ + len;
+ return GrowBuffer(required, kMaxBufferSize * 6 / 5);
+}
+
+void EntryImpl::UserBuffer::Truncate(int offset) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(offset, offset_);
+ DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
+
+ offset -= offset_;
+ if (Size() >= offset)
+ buffer_.resize(offset);
+}
+
+void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(len, 0);
+ DCHECK_GE(offset + len, 0);
+ DCHECK_GE(offset, offset_);
+ DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
+
+ if (!Size() && offset > kMaxBlockSize)
+ offset_ = offset;
+
+ offset -= offset_;
+
+ if (offset > Size())
+ buffer_.resize(offset);
+
+ if (!len)
+ return;
+
+ char* buffer = buf->data();
+ int valid_len = Size() - offset;
+ int copy_len = std::min(valid_len, len);
+ if (copy_len) {
+ memcpy(&buffer_[offset], buffer, copy_len);
+ len -= copy_len;
+ buffer += copy_len;
+ }
+ if (!len)
+ return;
+
+ buffer_.insert(buffer_.end(), buffer, buffer + len);
+}
+
+bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GT(*len, 0);
+
+ if (offset < offset_) {
+ // We are reading before this buffer.
+ if (offset >= eof)
+ return true;
+
+ // If the read overlaps with the buffer, change its length so that there is
+ // no overlap.
+ *len = std::min(*len, offset_ - offset);
+ *len = std::min(*len, eof - offset);
+
+ // We should read from disk.
+ return false;
+ }
+
+ if (!Size())
+ return false;
+
+ // See if we can fulfill the first part of the operation.
+ return (offset - offset_ < Size());
+}
+
+int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GT(len, 0);
+ DCHECK(Size() || offset < offset_);
+
+ int clean_bytes = 0;
+ if (offset < offset_) {
+ // We don't have a file so lets fill the first part with 0.
+ clean_bytes = std::min(offset_ - offset, len);
+ memset(buf->data(), 0, clean_bytes);
+ if (len == clean_bytes)
+ return len;
+ offset = offset_;
+ len -= clean_bytes;
+ }
+
+ int start = offset - offset_;
+ int available = Size() - start;
+ DCHECK_GE(start, 0);
+ DCHECK_GE(available, 0);
+ len = std::min(len, available);
+ memcpy(buf->data() + clean_bytes, &buffer_[start], len);
+ return len + clean_bytes;
+}
+
+void EntryImpl::UserBuffer::Reset() {
+ if (!grow_allowed_) {
+ if (backend_.get())
+ backend_->BufferDeleted(capacity() - kMaxBlockSize);
+ grow_allowed_ = true;
+ std::vector<char> tmp;
+ buffer_.swap(tmp);
+ buffer_.reserve(kMaxBlockSize);
+ }
+ offset_ = 0;
+ buffer_.clear();
+}
+
+bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
+ DCHECK_GE(required, 0);
+ int current_size = capacity();
+ if (required <= current_size)
+ return true;
+
+ if (required > limit)
+ return false;
+
+ if (!backend_.get())
+ return false;
+
+ int to_add = std::max(required - current_size, kMaxBlockSize * 4);
+ to_add = std::max(current_size, to_add);
+ required = std::min(current_size + to_add, limit);
+
+ grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
+ if (!grow_allowed_)
+ return false;
+
+ DVLOG(3) << "Buffer grow to " << required;
+
+ buffer_.reserve(required);
+ return true;
+}
+
+// ------------------------------------------------------------------------
+
+EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
+ : entry_(NULL, Addr(0)), node_(NULL, Addr(0)),
+ backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only),
+ dirty_(false) {
+ entry_.LazyInit(backend->File(address), address);
+ for (int i = 0; i < kNumStreams; i++) {
+ unreported_size_[i] = 0;
+ }
+}
+
+void EntryImpl::DoomImpl() {
+ if (doomed_ || !backend_.get())
+ return;
+
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
+ backend_->InternalDoomEntry(this);
+}
+
+int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
+ }
+
+ int result = InternalReadData(index, offset, buf, buf_len, callback);
+
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
+ }
+
+ int result = InternalWriteData(index, offset, buf, buf_len, callback,
+ truncate);
+
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ TimeTicks start = TimeTicks::Now();
+ result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
+ callback);
+ ReportIOTime(kSparseRead, start);
+ return result;
+}
+
+int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ TimeTicks start = TimeTicks::Now();
+ result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
+ buf_len, callback);
+ ReportIOTime(kSparseWrite, start);
+ return result;
+}
+
+int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ return sparse_->GetAvailableRange(offset, len, start);
+}
+
+void EntryImpl::CancelSparseIOImpl() {
+ if (!sparse_.get())
+ return;
+
+ sparse_->CancelIO();
+}
+
+int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) {
+ DCHECK(sparse_.get());
+ return sparse_->ReadyToUse(callback);
+}
+
+uint32 EntryImpl::GetHash() {
+ return entry_.Data()->hash;
+}
+
+bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
+ uint32 hash) {
+ Trace("Create entry In");
+ EntryStore* entry_store = entry_.Data();
+ RankingsNode* node = node_.Data();
+ memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
+ memset(node, 0, sizeof(RankingsNode));
+ if (!node_.LazyInit(backend_->File(node_address), node_address))
+ return false;
+
+ entry_store->rankings_node = node_address.value();
+ node->contents = entry_.address().value();
+
+ entry_store->hash = hash;
+ entry_store->creation_time = Time::Now().ToInternalValue();
+ entry_store->key_len = static_cast<int32>(key.size());
+ if (entry_store->key_len > kMaxInternalKeyLength) {
+ Addr address(0);
+ if (!CreateBlock(entry_store->key_len + 1, &address))
+ return false;
+
+ entry_store->long_key = address.value();
+ File* key_file = GetBackingFile(address, kKeyFileIndex);
+ key_ = key;
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!key_file || !key_file->Write(key.data(), key.size(), offset)) {
+ DeleteData(address, kKeyFileIndex);
+ return false;
+ }
+
+ if (address.is_separate_file())
+ key_file->SetLength(key.size() + 1);
+ } else {
+ memcpy(entry_store->key, key.data(), key.size());
+ entry_store->key[key.size()] = '\0';
+ }
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size()));
+ node->dirty = backend_->GetCurrentEntryId();
+ Log("Create Entry ");
+ return true;
+}
+
+bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
+ if (entry_.Data()->hash != hash ||
+ static_cast<size_t>(entry_.Data()->key_len) != key.size())
+ return false;
+
+ return (key.compare(GetKey()) == 0);
+}
+
+void EntryImpl::InternalDoom() {
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ DCHECK(node_.HasData());
+ if (!node_.Data()->dirty) {
+ node_.Data()->dirty = backend_->GetCurrentEntryId();
+ node_.Store();
+ }
+ doomed_ = true;
+}
+
+void EntryImpl::DeleteEntryData(bool everything) {
+ DCHECK(doomed_ || !everything);
+
+ if (GetEntryFlags() & PARENT_ENTRY) {
+ // We have some child entries that must go away.
+ SparseControl::DeleteChildren(this);
+ }
+
+ if (GetDataSize(0))
+ CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
+ if (GetDataSize(1))
+ CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
+ for (int index = 0; index < kNumStreams; index++) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ entry_.Store();
+ DeleteData(address, index);
+ }
+ }
+
+ if (!everything)
+ return;
+
+ // Remove all traces of this entry.
+ backend_->RemoveEntry(this);
+
+ // Note that at this point node_ and entry_ are just two blocks of data, and
+ // even if they reference each other, nobody should be referencing them.
+
+ Addr address(entry_.Data()->long_key);
+ DeleteData(address, kKeyFileIndex);
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
+
+ backend_->DeleteBlock(entry_.address(), true);
+ entry_.Discard();
+
+ if (!LeaveRankingsBehind()) {
+ backend_->DeleteBlock(node_.address(), true);
+ node_.Discard();
+ }
+}
+
+CacheAddr EntryImpl::GetNextAddress() {
+ return entry_.Data()->next;
+}
+
+void EntryImpl::SetNextAddress(Addr address) {
+ DCHECK_NE(address.value(), entry_.address().value());
+ entry_.Data()->next = address.value();
+ bool success = entry_.Store();
+ DCHECK(success);
+}
+
+bool EntryImpl::LoadNodeAddress() {
+ Addr address(entry_.Data()->rankings_node);
+ if (!node_.LazyInit(backend_->File(address), address))
+ return false;
+ return node_.Load();
+}
+
+bool EntryImpl::Update() {
+ DCHECK(node_.HasData());
+
+ if (read_only_)
+ return true;
+
+ RankingsNode* rankings = node_.Data();
+ if (!rankings->dirty) {
+ rankings->dirty = backend_->GetCurrentEntryId();
+ if (!node_.Store())
+ return false;
+ }
+ return true;
+}
+
+void EntryImpl::SetDirtyFlag(int32 current_id) {
+ DCHECK(node_.HasData());
+ if (node_.Data()->dirty && current_id != node_.Data()->dirty)
+ dirty_ = true;
+
+ if (!current_id)
+ dirty_ = true;
+}
+
+void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
+ node_.Data()->dirty = new_id;
+ node_.Store();
+}
+
+bool EntryImpl::LeaveRankingsBehind() {
+ return !node_.Data()->contents;
+}
+
+// This only includes checks that relate to the first block of the entry (the
+// first 256 bytes), and values that should be set from the entry creation.
+// Basically, even if there is something wrong with this entry, we want to see
+// if it is possible to load the rankings node and delete them together.
+bool EntryImpl::SanityCheck() {
+ if (!entry_.VerifyHash())
+ return false;
+
+ EntryStore* stored = entry_.Data();
+ if (!stored->rankings_node || stored->key_len <= 0)
+ return false;
+
+ if (stored->reuse_count < 0 || stored->refetch_count < 0)
+ return false;
+
+ Addr rankings_addr(stored->rankings_node);
+ if (!rankings_addr.SanityCheckForRankings())
+ return false;
+
+ Addr next_addr(stored->next);
+ if (next_addr.is_initialized() && !next_addr.SanityCheckForEntryV2()) {
+ STRESS_NOTREACHED();
+ return false;
+ }
+ STRESS_DCHECK(next_addr.value() != entry_.address().value());
+
+ if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
+ return false;
+
+ Addr key_addr(stored->long_key);
+ if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
+ (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
+ return false;
+
+ if (!key_addr.SanityCheckV2())
+ return false;
+
+ if (key_addr.is_initialized() &&
+ ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
+ (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
+ return false;
+
+ int num_blocks = NumBlocksForEntry(stored->key_len);
+ if (entry_.address().num_blocks() != num_blocks)
+ return false;
+
+ return true;
+}
+
+bool EntryImpl::DataSanityCheck() {
+ EntryStore* stored = entry_.Data();
+ Addr key_addr(stored->long_key);
+
+ // The key must be NULL terminated.
+ if (!key_addr.is_initialized() && stored->key[stored->key_len])
+ return false;
+
+ if (stored->hash != base::Hash(GetKey()))
+ return false;
+
+ for (int i = 0; i < kNumStreams; i++) {
+ Addr data_addr(stored->data_addr[i]);
+ int data_size = stored->data_size[i];
+ if (data_size < 0)
+ return false;
+ if (!data_size && data_addr.is_initialized())
+ return false;
+ if (!data_addr.SanityCheckV2())
+ return false;
+ if (!data_size)
+ continue;
+ if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
+ return false;
+ if (data_size > kMaxBlockSize && data_addr.is_block_file())
+ return false;
+ }
+ return true;
+}
+
+void EntryImpl::FixForDelete() {
+ EntryStore* stored = entry_.Data();
+ Addr key_addr(stored->long_key);
+
+ if (!key_addr.is_initialized())
+ stored->key[stored->key_len] = '\0';
+
+ for (int i = 0; i < kNumStreams; i++) {
+ Addr data_addr(stored->data_addr[i]);
+ int data_size = stored->data_size[i];
+ if (data_addr.is_initialized()) {
+ if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
+ (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
+ !data_addr.SanityCheckV2()) {
+ STRESS_NOTREACHED();
+ // The address is weird so don't attempt to delete it.
+ stored->data_addr[i] = 0;
+ // In general, trust the stored size as it should be in sync with the
+ // total size tracked by the backend.
+ }
+ }
+ if (data_size < 0)
+ stored->data_size[i] = 0;
+ }
+ entry_.Store();
+}
+
+void EntryImpl::IncrementIoCount() {
+ backend_->IncrementIoCount();
+}
+
+void EntryImpl::DecrementIoCount() {
+ if (backend_.get())
+ backend_->DecrementIoCount();
+}
+
+void EntryImpl::OnEntryCreated(BackendImpl* backend) {
+ // Just grab a reference to the backround queue.
+ background_queue_ = backend->GetBackgroundQueue();
+}
+
+void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
+ node_.Data()->last_used = last_used.ToInternalValue();
+ node_.Data()->last_modified = last_modified.ToInternalValue();
+ node_.set_modified();
+}
+
+void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
+ if (!backend_.get())
+ return;
+
+ switch (op) {
+ case kRead:
+ CACHE_UMA(AGE_MS, "ReadTime", 0, start);
+ break;
+ case kWrite:
+ CACHE_UMA(AGE_MS, "WriteTime", 0, start);
+ break;
+ case kSparseRead:
+ CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
+ break;
+ case kSparseWrite:
+ CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
+ break;
+ case kAsyncIO:
+ CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
+ break;
+ case kReadAsync1:
+ CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
+ break;
+ case kWriteAsync1:
+ CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
+ DCHECK(!net_log_.net_log());
+ net_log_ = net::BoundNetLog::Make(
+ net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
+ CreateNetLogEntryCreationCallback(this, created));
+}
+
+const net::BoundNetLog& EntryImpl::net_log() const {
+ return net_log_;
+}
+
+// static
+int EntryImpl::NumBlocksForEntry(int key_size) {
+ // The longest key that can be stored using one block.
+ int key1_len =
+ static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
+
+ if (key_size < key1_len || key_size > kMaxInternalKeyLength)
+ return 1;
+
+ return ((key_size - key1_len) / 256 + 2);
+}
+
+// ------------------------------------------------------------------------
+
+void EntryImpl::Doom() {
+ if (background_queue_.get())
+ background_queue_->DoomEntryImpl(this);
+}
+
+void EntryImpl::Close() {
+ if (background_queue_.get())
+ background_queue_->CloseEntryImpl(this);
+}
+
+std::string EntryImpl::GetKey() const {
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ int key_len = entry->Data()->key_len;
+ if (key_len <= kMaxInternalKeyLength)
+ return std::string(entry->Data()->key);
+
+ // We keep a copy of the key so that we can always return it, even if the
+ // backend is disabled.
+ if (!key_.empty())
+ return key_;
+
+ Addr address(entry->Data()->long_key);
+ DCHECK(address.is_initialized());
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
+ File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+ kKeyFileIndex);
+ if (!key_file)
+ return std::string();
+
+ ++key_len; // We store a trailing \0 on disk that we read back below.
+ if (!offset && key_file->GetLength() != static_cast<size_t>(key_len))
+ return std::string();
+
+ if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset))
+ key_.clear();
+ return key_;
+}
+
+Time EntryImpl::GetLastUsed() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_used);
+}
+
+Time EntryImpl::GetLastModified() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_modified);
+}
+
+int32 EntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index >= kNumStreams)
+ return 0;
+
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ return entry->Data()->data_size[index];
+}
+
+int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (callback.is_null())
+ return ReadDataImpl(index, offset, buf, buf_len, callback);
+
+ DCHECK(node_.Data()->dirty || read_only_);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!background_queue_.get())
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->ReadData(this, index, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate) {
+ if (callback.is_null())
+ return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
+
+ DCHECK(node_.Data()->dirty || read_only_);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!background_queue_.get())
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
+ callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (callback.is_null())
+ return ReadSparseDataImpl(offset, buf, buf_len, callback);
+
+ if (!background_queue_.get())
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->ReadSparseData(this, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (callback.is_null())
+ return WriteSparseDataImpl(offset, buf, buf_len, callback);
+
+ if (!background_queue_.get())
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->WriteSparseData(this, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ if (!background_queue_.get())
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->GetAvailableRange(this, offset, len, start, callback);
+ return net::ERR_IO_PENDING;
+}
+
+bool EntryImpl::CouldBeSparse() const {
+ if (sparse_.get())
+ return true;
+
+ scoped_ptr<SparseControl> sparse;
+ sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
+ return sparse->CouldBeSparse();
+}
+
+void EntryImpl::CancelSparseIO() {
+ if (background_queue_.get())
+ background_queue_->CancelSparseIO(this);
+}
+
+int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ if (!background_queue_.get())
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->ReadyForSparseIO(this, callback);
+ return net::ERR_IO_PENDING;
+}
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImpl::~EntryImpl() {
+ if (!backend_.get()) {
+ entry_.clear_modified();
+ node_.clear_modified();
+ return;
+ }
+ Log("~EntryImpl in");
+
+ // Save the sparse info to disk. This will generate IO for this entry and
+ // maybe for a child entry, so it is important to do it before deleting this
+ // entry.
+ sparse_.reset();
+
+ // Remove this entry from the list of open entries.
+ backend_->OnEntryDestroyBegin(entry_.address());
+
+ if (doomed_) {
+ DeleteEntryData(true);
+ } else {
+#if defined(NET_BUILD_STRESS_CACHE)
+ SanityCheck();
+#endif
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE);
+ bool ret = true;
+ for (int index = 0; index < kNumStreams; index++) {
+ if (user_buffers_[index].get()) {
+ if (!(ret = Flush(index, 0)))
+ LOG(ERROR) << "Failed to save user data";
+ }
+ if (unreported_size_[index]) {
+ backend_->ModifyStorageSize(
+ entry_.Data()->data_size[index] - unreported_size_[index],
+ entry_.Data()->data_size[index]);
+ }
+ }
+
+ if (!ret) {
+ // There was a failure writing the actual data. Mark the entry as dirty.
+ int current_id = backend_->GetCurrentEntryId();
+ node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
+ node_.Store();
+ } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
+ node_.Data()->dirty = 0;
+ node_.Store();
+ }
+ }
+
+ Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
+ net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
+ backend_->OnEntryDestroyEnd();
+}
+
+// ------------------------------------------------------------------------
+
+int EntryImpl::InternalReadData(int index, int offset,
+ IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!backend_.get())
+ return net::ERR_UNEXPECTED;
+
+ TimeTicks start = TimeTicks::Now();
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ backend_->OnEvent(Stats::READ_DATA);
+ backend_->OnRead(buf_len);
+
+ Addr address(entry_.Data()->data_addr[index]);
+ int eof = address.is_initialized() ? entry_size : 0;
+ if (user_buffers_[index].get() &&
+ user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
+ // Complete the operation locally.
+ buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
+ ReportIOTime(kRead, start);
+ return buf_len;
+ }
+
+ address.set_value(entry_.Data()->data_addr[index]);
+ DCHECK(address.is_initialized());
+ if (!address.is_initialized()) {
+ DoomImpl();
+ return net::ERR_FAILED;
+ }
+
+ File* file = GetBackingFile(address, index);
+ if (!file) {
+ DoomImpl();
+ LOG(ERROR) << "No file for " << std::hex << address.value();
+ return net::ERR_FILE_NOT_FOUND;
+ }
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ DCHECK_LE(offset + buf_len, kMaxBlockSize);
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ }
+
+ SyncCallback* io_callback = NULL;
+ if (!callback.is_null()) {
+ io_callback = new SyncCallback(this, buf, callback,
+ net::NetLog::TYPE_ENTRY_READ_DATA);
+ }
+
+ TimeTicks start_async = TimeTicks::Now();
+
+ bool completed;
+ if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ DoomImpl();
+ return net::ERR_CACHE_READ_FAILURE;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ if (io_callback)
+ ReportIOTime(kReadAsync1, start_async);
+
+ ReportIOTime(kRead, start);
+ return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::InternalWriteData(int index, int offset,
+ IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!backend_.get())
+ return net::ERR_UNEXPECTED;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset or buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ backend_->TooMuchStorageRequested(size);
+ return net::ERR_FAILED;
+ }
+
+ TimeTicks start = TimeTicks::Now();
+
+ // Read the size at this point (it may change inside prepare).
+ int entry_size = entry_.Data()->data_size[index];
+ bool extending = entry_size < offset + buf_len;
+ truncate = truncate && entry_size > offset + buf_len;
+ Trace("To PrepareTarget 0x%x", entry_.address().value());
+ if (!PrepareTarget(index, offset, buf_len, truncate))
+ return net::ERR_FAILED;
+
+ Trace("From PrepareTarget 0x%x", entry_.address().value());
+ if (extending || truncate)
+ UpdateSize(index, entry_size, offset + buf_len);
+
+ UpdateRank(true);
+
+ backend_->OnEvent(Stats::WRITE_DATA);
+ backend_->OnWrite(buf_len);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ user_buffers_[index]->Write(offset, buf, buf_len);
+ ReportIOTime(kWrite, start);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (offset + buf_len == 0) {
+ if (truncate) {
+ DCHECK(!address.is_initialized());
+ }
+ return 0;
+ }
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FILE_NOT_FOUND;
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ DCHECK_LE(offset + buf_len, kMaxBlockSize);
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ } else if (truncate || (extending && !buf_len)) {
+ if (!file->SetLength(offset + buf_len))
+ return net::ERR_FAILED;
+ }
+
+ if (!buf_len)
+ return 0;
+
+ SyncCallback* io_callback = NULL;
+ if (!callback.is_null()) {
+ io_callback = new SyncCallback(this, buf, callback,
+ net::NetLog::TYPE_ENTRY_WRITE_DATA);
+ }
+
+ TimeTicks start_async = TimeTicks::Now();
+
+ bool completed;
+ if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
+ &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_CACHE_WRITE_FAILURE;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ if (io_callback)
+ ReportIOTime(kWriteAsync1, start_async);
+
+ ReportIOTime(kWrite, start);
+ return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
+}
+
+// ------------------------------------------------------------------------
+
+bool EntryImpl::CreateDataBlock(int index, int size) {
+ DCHECK(index >= 0 && index < kNumStreams);
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (!CreateBlock(size, &address))
+ return false;
+
+ entry_.Data()->data_addr[index] = address.value();
+ entry_.Store();
+ return true;
+}
+
+bool EntryImpl::CreateBlock(int size, Addr* address) {
+ DCHECK(!address->is_initialized());
+ if (!backend_.get())
+ return false;
+
+ FileType file_type = Addr::RequiredFileType(size);
+ if (EXTERNAL == file_type) {
+ if (size > backend_->MaxFileSize())
+ return false;
+ if (!backend_->CreateExternalFile(address))
+ return false;
+ } else {
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!backend_->CreateBlock(file_type, num_blocks, address))
+ return false;
+ }
+ return true;
+}
+
+// Note that this method may end up modifying a block file so upon return the
+// involved block will be free, and could be reused for something else. If there
+// is a crash after that point (and maybe before returning to the caller), the
+// entry will be left dirty... and at some point it will be discarded; it is
+// important that the entry doesn't keep a reference to this address, or we'll
+// end up deleting the contents of |address| once again.
+void EntryImpl::DeleteData(Addr address, int index) {
+ DCHECK(backend_.get());
+ if (!address.is_initialized())
+ return;
+ if (address.is_separate_file()) {
+ int failure = !DeleteCacheFile(backend_->GetFileName(address));
+ CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
+ if (failure) {
+ LOG(ERROR) << "Failed to delete " <<
+ backend_->GetFileName(address).value() << " from the cache.";
+ }
+ if (files_[index].get())
+ files_[index] = NULL; // Releases the object.
+ } else {
+ backend_->DeleteBlock(address, true);
+ }
+}
+
+void EntryImpl::UpdateRank(bool modified) {
+ if (!backend_.get())
+ return;
+
+ if (!doomed_) {
+ // Everything is handled by the backend.
+ backend_->UpdateRank(this, modified);
+ return;
+ }
+
+ Time current = Time::Now();
+ node_.Data()->last_used = current.ToInternalValue();
+
+ if (modified)
+ node_.Data()->last_modified = current.ToInternalValue();
+}
+
+File* EntryImpl::GetBackingFile(Addr address, int index) {
+ if (!backend_.get())
+ return NULL;
+
+ File* file;
+ if (address.is_separate_file())
+ file = GetExternalFile(address, index);
+ else
+ file = backend_->File(address);
+ return file;
+}
+
+File* EntryImpl::GetExternalFile(Addr address, int index) {
+ DCHECK(index >= 0 && index <= kKeyFileIndex);
+ if (!files_[index].get()) {
+ // For a key file, use mixed mode IO.
+ scoped_refptr<File> file(new File(kKeyFileIndex == index));
+ if (file->Init(backend_->GetFileName(address)))
+ files_[index].swap(file);
+ }
+ return files_[index].get();
+}
+
+// We keep a memory buffer for everything that ends up stored on a block file
+// (because we don't know yet the final data size), and for some of the data
+// that end up on external files. This function will initialize that memory
+// buffer and / or the files needed to store the data.
+//
+// In general, a buffer may overlap data already stored on disk, and in that
+// case, the contents of the buffer are the most accurate. It may also extend
+// the file, but we don't want to read from disk just to keep the buffer up to
+// date. This means that as soon as there is a chance to get confused about what
+// is the most recent version of some part of a file, we'll flush the buffer and
+// reuse it for the new data. Keep in mind that the normal use pattern is quite
+// simple (write sequentially from the beginning), so we optimize for handling
+// that case.
+bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ if (truncate)
+ return HandleTruncation(index, offset, buf_len);
+
+ if (!offset && !buf_len)
+ return true;
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ if (address.is_block_file() && !MoveToLocalBuffer(index))
+ return false;
+
+ if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
+ // We are about to create a buffer for the first 16KB, make sure that we
+ // preserve existing data.
+ if (!CopyToLocalBuffer(index))
+ return false;
+ }
+ }
+
+ if (!user_buffers_[index].get())
+ user_buffers_[index].reset(new UserBuffer(backend_.get()));
+
+ return PrepareBuffer(index, offset, buf_len);
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
+ Addr address(entry_.Data()->data_addr[index]);
+
+ int current_size = entry_.Data()->data_size[index];
+ int new_size = offset + buf_len;
+
+ if (!new_size) {
+ // This is by far the most common scenario.
+ backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ unreported_size_[index] = 0;
+ entry_.Store();
+ DeleteData(address, index);
+
+ user_buffers_[index].reset();
+ return true;
+ }
+
+ // We never postpone truncating a file, if there is one, but we may postpone
+ // telling the backend about the size reduction.
+ if (user_buffers_[index].get()) {
+ DCHECK_GE(current_size, user_buffers_[index]->Start());
+ if (!address.is_initialized()) {
+ // There is no overlap between the buffer and disk.
+ if (new_size > user_buffers_[index]->Start()) {
+ // Just truncate our buffer.
+ DCHECK_LT(new_size, user_buffers_[index]->End());
+ user_buffers_[index]->Truncate(new_size);
+ return true;
+ }
+
+ // Just discard our buffer.
+ user_buffers_[index]->Reset();
+ return PrepareBuffer(index, offset, buf_len);
+ }
+
+ // There is some overlap or we need to extend the file before the
+ // truncation.
+ if (offset > user_buffers_[index]->Start())
+ user_buffers_[index]->Truncate(new_size);
+ UpdateSize(index, current_size, new_size);
+ if (!Flush(index, 0))
+ return false;
+ user_buffers_[index].reset();
+ }
+
+ // We have data somewhere, and it is not in a buffer.
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ if (new_size > kMaxBlockSize)
+ return true; // Let the operation go directly to disk.
+
+ return ImportSeparateFile(index, offset + buf_len);
+}
+
+bool EntryImpl::CopyToLocalBuffer(int index) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
+ user_buffers_[index].reset(new UserBuffer(backend_.get()));
+ user_buffers_[index]->Write(len, NULL, 0);
+
+ File* file = GetBackingFile(address, index);
+ int offset = 0;
+
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file ||
+ !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
+ user_buffers_[index].reset();
+ return false;
+ }
+ return true;
+}
+
+bool EntryImpl::MoveToLocalBuffer(int index) {
+ if (!CopyToLocalBuffer(index))
+ return false;
+
+ Addr address(entry_.Data()->data_addr[index]);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Store();
+ DeleteData(address, index);
+
+ // If we lose this entry we'll see it as zero sized.
+ int len = entry_.Data()->data_size[index];
+ backend_->ModifyStorageSize(len - unreported_size_[index], 0);
+ unreported_size_[index] = len;
+ return true;
+}
+
+bool EntryImpl::ImportSeparateFile(int index, int new_size) {
+ if (entry_.Data()->data_size[index] > new_size)
+ UpdateSize(index, entry_.Data()->data_size[index], new_size);
+
+ return MoveToLocalBuffer(index);
+}
+
+bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
+ DCHECK(user_buffers_[index].get());
+ if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
+ offset > entry_.Data()->data_size[index]) {
+ // We are about to extend the buffer or the file (with zeros), so make sure
+ // that we are not overwriting anything.
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized() && address.is_separate_file()) {
+ if (!Flush(index, 0))
+ return false;
+ // There is an actual file already, and we don't want to keep track of
+ // its length so we let this operation go straight to disk.
+ // The only case when a buffer is allowed to extend the file (as in fill
+ // with zeros before the start) is when there is no file yet to extend.
+ user_buffers_[index].reset();
+ return true;
+ }
+ }
+
+ if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
+ if (!Flush(index, offset + buf_len))
+ return false;
+
+ // Lets try again.
+ if (offset > user_buffers_[index]->End() ||
+ !user_buffers_[index]->PreWrite(offset, buf_len)) {
+ // We cannot complete the operation with a buffer.
+ DCHECK(!user_buffers_[index]->Size());
+ DCHECK(!user_buffers_[index]->Start());
+ user_buffers_[index].reset();
+ }
+ }
+ return true;
+}
+
+bool EntryImpl::Flush(int index, int min_len) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(user_buffers_[index].get());
+ DCHECK(!address.is_initialized() || address.is_separate_file());
+ DVLOG(3) << "Flush";
+
+ int size = std::max(entry_.Data()->data_size[index], min_len);
+ if (size && !address.is_initialized() && !CreateDataBlock(index, size))
+ return false;
+
+ if (!entry_.Data()->data_size[index]) {
+ DCHECK(!user_buffers_[index]->Size());
+ return true;
+ }
+
+ address.set_value(entry_.Data()->data_addr[index]);
+
+ int len = user_buffers_[index]->Size();
+ int offset = user_buffers_[index]->Start();
+ if (!len && !offset)
+ return true;
+
+ if (address.is_block_file()) {
+ DCHECK_EQ(len, entry_.Data()->data_size[index]);
+ DCHECK(!offset);
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+ }
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return false;
+
+ if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL))
+ return false;
+ user_buffers_[index]->Reset();
+
+ return true;
+}
+
+void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
+ if (entry_.Data()->data_size[index] == new_size)
+ return;
+
+ unreported_size_[index] += new_size - old_size;
+ entry_.Data()->data_size[index] = new_size;
+ entry_.set_modified();
+}
+
+int EntryImpl::InitSparseData() {
+ if (sparse_.get())
+ return net::OK;
+
+ // Use a local variable so that sparse_ never goes from 'valid' to NULL.
+ scoped_ptr<SparseControl> sparse(new SparseControl(this));
+ int result = sparse->Init();
+ if (net::OK == result)
+ sparse_.swap(sparse);
+
+ return result;
+}
+
+void EntryImpl::SetEntryFlags(uint32 flags) {
+ entry_.Data()->flags |= flags;
+ entry_.set_modified();
+}
+
+uint32 EntryImpl::GetEntryFlags() {
+ return entry_.Data()->flags;
+}
+
+void EntryImpl::GetData(int index, char** buffer, Addr* address) {
+ DCHECK(backend_.get());
+ if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
+ !user_buffers_[index]->Start()) {
+ // The data is already in memory, just copy it and we're done.
+ int data_len = entry_.Data()->data_size[index];
+ if (data_len <= user_buffers_[index]->Size()) {
+ DCHECK(!user_buffers_[index]->Start());
+ *buffer = new char[data_len];
+ memcpy(*buffer, user_buffers_[index]->Data(), data_len);
+ return;
+ }
+ }
+
+ // Bad news: we'd have to read the info from disk so instead we'll just tell
+ // the caller where to read from.
+ *buffer = NULL;
+ address->set_value(entry_.Data()->data_addr[index]);
+ if (address->is_initialized()) {
+ // Prevent us from deleting the block from the backing store.
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ }
+}
+
+void EntryImpl::Log(const char* msg) {
+ int dirty = 0;
+ if (node_.HasData()) {
+ dirty = node_.Data()->dirty;
+ }
+
+ Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
+ entry_.address().value(), node_.address().value());
+
+ Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
+ entry_.Data()->data_addr[1], entry_.Data()->long_key);
+
+ Trace(" doomed: %d 0x%x", doomed_, dirty);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/entry_impl.h b/chromium/net/disk_cache/entry_impl.h
new file mode 100644
index 00000000000..13d077c2f8e
--- /dev/null
+++ b/chromium/net/disk_cache/entry_impl.h
@@ -0,0 +1,278 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_ENTRY_IMPL_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_log.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class InFlightBackendIO;
+class SparseControl;
+typedef StorageBlock<EntryStore> CacheEntryBlock;
+typedef StorageBlock<RankingsNode> CacheRankingsBlock;
+
+// This class implements the Entry interface. An object of this
+// class represents a single entry on the cache.
+class NET_EXPORT_PRIVATE EntryImpl
+ : public Entry,
+ public base::RefCounted<EntryImpl> {
+ friend class base::RefCounted<EntryImpl>;
+ friend class SparseControl;
+ public:
+ enum Operation {
+ kRead,
+ kWrite,
+ kSparseRead,
+ kSparseWrite,
+ kAsyncIO,
+ kReadAsync1,
+ kWriteAsync1
+ };
+
+ EntryImpl(BackendImpl* backend, Addr address, bool read_only);
+
+ // Background implementation of the Entry interface.
+ void DoomImpl();
+ int ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback);
+ int WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate);
+ int ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback);
+ int WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback);
+ int GetAvailableRangeImpl(int64 offset, int len, int64* start);
+ void CancelSparseIOImpl();
+ int ReadyForSparseIOImpl(const CompletionCallback& callback);
+
+ inline CacheEntryBlock* entry() {
+ return &entry_;
+ }
+
+ inline CacheRankingsBlock* rankings() {
+ return &node_;
+ }
+
+ uint32 GetHash();
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(Addr node_address, const std::string& key, uint32 hash);
+
+ // Returns true if this entry matches the lookup arguments.
+ bool IsSameEntry(const std::string& key, uint32 hash);
+
+ // Permamently destroys this entry.
+ void InternalDoom();
+
+ // Deletes this entry from disk. If |everything| is false, only the user data
+ // will be removed, leaving the key and control data intact.
+ void DeleteEntryData(bool everything);
+
+ // Returns the address of the next entry on the list of entries with the same
+ // hash.
+ CacheAddr GetNextAddress();
+
+ // Sets the address of the next entry on the list of entries with the same
+ // hash.
+ void SetNextAddress(Addr address);
+
+ // Reloads the rankings node information.
+ bool LoadNodeAddress();
+
+ // Updates the stored data to reflect the run-time information for this entry.
+ // Returns false if the data could not be updated. The purpose of this method
+ // is to be able to detect entries that are currently in use.
+ bool Update();
+
+ bool dirty() {
+ return dirty_;
+ }
+
+ bool doomed() {
+ return doomed_;
+ }
+
+ // Marks this entry as dirty (in memory) if needed. This is intended only for
+ // entries that are being read from disk, to be called during loading.
+ void SetDirtyFlag(int32 current_id);
+
+ // Fixes this entry so it can be treated as valid (to delete it).
+ void SetPointerForInvalidEntry(int32 new_id);
+
+ // Returns true if this entry is so meesed up that not everything is going to
+ // be removed.
+ bool LeaveRankingsBehind();
+
+ // Returns false if the entry is clearly invalid.
+ bool SanityCheck();
+ bool DataSanityCheck();
+
+ // Attempts to make this entry reachable though the key.
+ void FixForDelete();
+
+ // Handle the pending asynchronous IO count.
+ void IncrementIoCount();
+ void DecrementIoCount();
+
+ // This entry is being returned to the user. It is always called from the
+ // primary thread (not the dedicated cache thread).
+ void OnEntryCreated(BackendImpl* backend);
+
+ // Set the access times for this entry. This method provides support for
+ // the upgrade tool.
+ void SetTimes(base::Time last_used, base::Time last_modified);
+
+ // Generates a histogram for the time spent working on this operation.
+ void ReportIOTime(Operation op, const base::TimeTicks& start);
+
+ // Logs a begin event and enables logging for the EntryImpl. Will also cause
+ // an end event to be logged on destruction. The EntryImpl must have its key
+ // initialized before this is called. |created| is true if the Entry was
+ // created rather than opened.
+ void BeginLogging(net::NetLog* net_log, bool created);
+
+ const net::BoundNetLog& net_log() const;
+
+ // Returns the number of blocks needed to store an EntryStore.
+ static int NumBlocksForEntry(int key_size);
+
+ // Entry interface.
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE;
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ enum {
+ kNumStreams = 3
+ };
+ class UserBuffer;
+
+ virtual ~EntryImpl();
+
+ // Do all the work for ReadDataImpl and WriteDataImpl. Implemented as
+ // separate functions to make logging of results simpler.
+ int InternalReadData(int index, int offset, IOBuffer* buf,
+ int buf_len, const CompletionCallback& callback);
+ int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate);
+
+ // Initializes the storage for an internal or external data block.
+ bool CreateDataBlock(int index, int size);
+
+ // Initializes the storage for an internal or external generic block.
+ bool CreateBlock(int size, Addr* address);
+
+ // Deletes the data pointed by address, maybe backed by files_[index].
+ // Note that most likely the caller should delete (and store) the reference to
+ // |address| *before* calling this method because we don't want to have an
+ // entry using an address that is already free.
+ void DeleteData(Addr address, int index);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Returns a pointer to the file that stores the given address.
+ File* GetBackingFile(Addr address, int index);
+
+ // Returns a pointer to the file that stores external data.
+ File* GetExternalFile(Addr address, int index);
+
+ // Prepares the target file or buffer for a write of buf_len bytes at the
+ // given offset.
+ bool PrepareTarget(int index, int offset, int buf_len, bool truncate);
+
+ // Adjusts the internal buffer and file handle for a write that truncates this
+ // stream.
+ bool HandleTruncation(int index, int offset, int buf_len);
+
+ // Copies data from disk to the internal buffer.
+ bool CopyToLocalBuffer(int index);
+
+ // Reads from a block data file to this object's memory buffer.
+ bool MoveToLocalBuffer(int index);
+
+ // Loads the external file to this object's memory buffer.
+ bool ImportSeparateFile(int index, int new_size);
+
+ // Makes sure that the internal buffer can handle the a write of |buf_len|
+ // bytes to |offset|.
+ bool PrepareBuffer(int index, int offset, int buf_len);
+
+ // Flushes the in-memory data to the backing storage. The data destination
+ // is determined based on the current data length and |min_len|.
+ bool Flush(int index, int min_len);
+
+ // Updates the size of a given data stream.
+ void UpdateSize(int index, int old_size, int new_size);
+
+ // Initializes the sparse control object. Returns a net error code.
+ int InitSparseData();
+
+ // Adds the provided |flags| to the current EntryFlags for this entry.
+ void SetEntryFlags(uint32 flags);
+
+ // Returns the current EntryFlags for this entry.
+ uint32 GetEntryFlags();
+
+ // Gets the data stored at the given index. If the information is in memory,
+ // a buffer will be allocated and the data will be copied to it (the caller
+ // can find out the size of the buffer before making this call). Otherwise,
+ // the cache address of the data will be returned, and that address will be
+ // removed from the regular book keeping of this entry so the caller is
+ // responsible for deleting the block (or file) from the backing store at some
+ // point; there is no need to report any storage-size change, only to do the
+ // actual cleanup.
+ void GetData(int index, char** buffer, Addr* address);
+
+ // Logs this entry to the internal trace buffer.
+ void Log(const char* msg);
+
+ CacheEntryBlock entry_; // Key related information for this entry.
+ CacheRankingsBlock node_; // Rankings related information for this entry.
+ base::WeakPtr<BackendImpl> backend_; // Back pointer to the cache.
+ base::WeakPtr<InFlightBackendIO> background_queue_; // In-progress queue.
+ scoped_ptr<UserBuffer> user_buffers_[kNumStreams]; // Stores user data.
+ // Files to store external user data and key.
+ scoped_refptr<File> files_[kNumStreams + 1];
+ mutable std::string key_; // Copy of the key.
+ int unreported_size_[kNumStreams]; // Bytes not reported yet to the backend.
+ bool doomed_; // True if this entry was removed from the cache.
+ bool read_only_; // True if not yet writing.
+ bool dirty_; // True if we detected that this is a dirty entry.
+ scoped_ptr<SparseControl> sparse_; // Support for sparse entries.
+
+ net::BoundNetLog net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(EntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ENTRY_IMPL_H_
diff --git a/chromium/net/disk_cache/entry_unittest.cc b/chromium/net/disk_cache/entry_unittest.cc
new file mode 100644
index 00000000000..7addc85e5a0
--- /dev/null
+++ b/chromium/net/disk_cache/entry_unittest.cc
@@ -0,0 +1,3405 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/file_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "base/timer/timer.h"
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/mem_entry_impl.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_entry_impl.h"
+#include "net/disk_cache/simple/simple_test_util.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+using disk_cache::ScopedEntryPtr;
+
+// Tests that can run with different types of caches.
+class DiskCacheEntryTest : public DiskCacheTestWithCache {
+ public:
+ void InternalSyncIOBackground(disk_cache::Entry* entry);
+ void ExternalSyncIOBackground(disk_cache::Entry* entry);
+
+ protected:
+ void InternalSyncIO();
+ void InternalAsyncIO();
+ void ExternalSyncIO();
+ void ExternalAsyncIO();
+ void ReleaseBuffer();
+ void StreamAccess();
+ void GetKey();
+ void GetTimes();
+ void GrowData();
+ void TruncateData();
+ void ZeroLengthIO();
+ void Buffering();
+ void SizeAtCreate();
+ void SizeChanges();
+ void ReuseEntry(int size);
+ void InvalidData();
+ void ReadWriteDestroyBuffer();
+ void DoomNormalEntry();
+ void DoomEntryNextToOpenEntry();
+ void DoomedEntry();
+ void BasicSparseIO();
+ void HugeSparseIO();
+ void GetAvailableRange();
+ void CouldBeSparse();
+ void UpdateSparseEntry();
+ void DoomSparseEntry();
+ void PartialSparseEntry();
+ bool SimpleCacheMakeBadChecksumEntry(const char* key, int* data_size);
+};
+
+// This part of the test runs on the background thread.
+void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
+ const int kSize1 = 10;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ EXPECT_EQ(
+ 0,
+ entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ EXPECT_EQ(10,
+ entry->WriteData(
+ 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
+ memset(buffer1->data(), 0, kSize1);
+ EXPECT_EQ(
+ 10,
+ entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
+ EXPECT_STREQ("the data", buffer1->data());
+
+ const int kSize2 = 5000;
+ const int kSize3 = 10000;
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
+ memset(buffer3->data(), 0, kSize3);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ EXPECT_EQ(
+ 5000,
+ entry->WriteData(
+ 1, 1500, buffer2.get(), kSize2, net::CompletionCallback(), false));
+ memset(buffer2->data(), 0, kSize2);
+ EXPECT_EQ(4989,
+ entry->ReadData(
+ 1, 1511, buffer2.get(), kSize2, net::CompletionCallback()));
+ EXPECT_STREQ("big data goes here", buffer2->data());
+ EXPECT_EQ(
+ 5000,
+ entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
+ EXPECT_EQ(1500,
+ entry->ReadData(
+ 1, 5000, buffer2.get(), kSize2, net::CompletionCallback()));
+
+ EXPECT_EQ(0,
+ entry->ReadData(
+ 1, 6500, buffer2.get(), kSize2, net::CompletionCallback()));
+ EXPECT_EQ(
+ 6500,
+ entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
+ EXPECT_EQ(8192,
+ entry->WriteData(
+ 1, 0, buffer3.get(), 8192, net::CompletionCallback(), false));
+ EXPECT_EQ(
+ 8192,
+ entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
+ EXPECT_EQ(8192, entry->GetDataSize(1));
+
+ // We need to delete the memory buffer on this thread.
+ EXPECT_EQ(0, entry->WriteData(
+ 0, 0, NULL, 0, net::CompletionCallback(), true));
+ EXPECT_EQ(0, entry->WriteData(
+ 1, 0, NULL, 0, net::CompletionCallback(), true));
+}
+
+// We need to support synchronous IO even though it is not a supported operation
+// from the point of view of the disk cache's public interface, because we use
+// it internally, not just by a few tests, but as part of the implementation
+// (see sparse_control.cc, for example).
+void DiskCacheEntryTest::InternalSyncIO() {
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+
+ // The bulk of the test runs from within the callback, on the cache thread.
+ RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground,
+ base::Unretained(this),
+ entry));
+
+
+ entry->Doom();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, InternalSyncIO) {
+ InitCache();
+ InternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InternalSyncIO();
+}
+
+void DiskCacheEntryTest::InternalAsyncIO() {
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+
+ // Avoid using internal buffers for the test. We have to write something to
+ // the entry and close it so that we flush the internal buffer to disk. After
+ // that, IO operations will be really hitting the disk. We don't care about
+ // the content, so just extending the entry is enough (all extensions zero-
+ // fill any holes).
+ EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
+ EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
+
+ MessageLoopHelper helper;
+ // Let's verify that each IO goes to the right callback object.
+ CallbackTest callback1(&helper, false);
+ CallbackTest callback2(&helper, false);
+ CallbackTest callback3(&helper, false);
+ CallbackTest callback4(&helper, false);
+ CallbackTest callback5(&helper, false);
+ CallbackTest callback6(&helper, false);
+ CallbackTest callback7(&helper, false);
+ CallbackTest callback8(&helper, false);
+ CallbackTest callback9(&helper, false);
+ CallbackTest callback10(&helper, false);
+ CallbackTest callback11(&helper, false);
+ CallbackTest callback12(&helper, false);
+ CallbackTest callback13(&helper, false);
+
+ const int kSize1 = 10;
+ const int kSize2 = 5000;
+ const int kSize3 = 10000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ CacheTestFillBuffer(buffer3->data(), kSize3, false);
+
+ EXPECT_EQ(0,
+ entry->ReadData(
+ 0,
+ 15 * 1024,
+ buffer1.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ int expected = 0;
+ int ret = entry->WriteData(
+ 0,
+ 0,
+ buffer1.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
+ false);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ memset(buffer2->data(), 0, kSize2);
+ ret = entry->ReadData(
+ 0,
+ 0,
+ buffer2.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("the data", buffer2->data());
+
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ ret = entry->WriteData(
+ 1,
+ 1500,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
+ true);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ memset(buffer3->data(), 0, kSize3);
+ ret = entry->ReadData(
+ 1,
+ 1511,
+ buffer3.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
+ EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("big data goes here", buffer3->data());
+ ret = entry->ReadData(
+ 1,
+ 0,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ memset(buffer3->data(), 0, kSize3);
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
+ ret = entry->ReadData(
+ 1,
+ 5000,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
+ EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry->ReadData(
+ 1,
+ 0,
+ buffer3.get(),
+ kSize3,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
+ EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry->WriteData(
+ 1,
+ 0,
+ buffer3.get(),
+ 8192,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
+ true);
+ EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ ret = entry->ReadData(
+ 1,
+ 0,
+ buffer3.get(),
+ kSize3,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
+ EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(8192, entry->GetDataSize(1));
+
+ ret = entry->ReadData(
+ 0,
+ 0,
+ buffer1.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry->ReadData(
+ 1,
+ 0,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ EXPECT_FALSE(helper.callback_reused_error());
+
+ entry->Doom();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
+ InitCache();
+ InternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InternalAsyncIO();
+}
+
+// This part of the test runs on the background thread.
+void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
+ const int kSize1 = 17000;
+ const int kSize2 = 25000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ EXPECT_EQ(17000,
+ entry->WriteData(
+ 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
+ memset(buffer1->data(), 0, kSize1);
+ EXPECT_EQ(
+ 17000,
+ entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
+ EXPECT_STREQ("the data", buffer1->data());
+
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ EXPECT_EQ(
+ 25000,
+ entry->WriteData(
+ 1, 10000, buffer2.get(), kSize2, net::CompletionCallback(), false));
+ memset(buffer2->data(), 0, kSize2);
+ EXPECT_EQ(24989,
+ entry->ReadData(
+ 1, 10011, buffer2.get(), kSize2, net::CompletionCallback()));
+ EXPECT_STREQ("big data goes here", buffer2->data());
+ EXPECT_EQ(
+ 25000,
+ entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
+ EXPECT_EQ(5000,
+ entry->ReadData(
+ 1, 30000, buffer2.get(), kSize2, net::CompletionCallback()));
+
+ EXPECT_EQ(0,
+ entry->ReadData(
+ 1, 35000, buffer2.get(), kSize2, net::CompletionCallback()));
+ EXPECT_EQ(
+ 17000,
+ entry->ReadData(1, 0, buffer1.get(), kSize1, net::CompletionCallback()));
+ EXPECT_EQ(
+ 17000,
+ entry->WriteData(
+ 1, 20000, buffer1.get(), kSize1, net::CompletionCallback(), false));
+ EXPECT_EQ(37000, entry->GetDataSize(1));
+
+ // We need to delete the memory buffer on this thread.
+ EXPECT_EQ(0, entry->WriteData(
+ 0, 0, NULL, 0, net::CompletionCallback(), true));
+ EXPECT_EQ(0, entry->WriteData(
+ 1, 0, NULL, 0, net::CompletionCallback(), true));
+}
+
+void DiskCacheEntryTest::ExternalSyncIO() {
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
+
+ // The bulk of the test runs from within the callback, on the cache thread.
+ RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground,
+ base::Unretained(this),
+ entry));
+
+ entry->Doom();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
+ InitCache();
+ ExternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ ExternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ExternalSyncIO();
+}
+
+void DiskCacheEntryTest::ExternalAsyncIO() {
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
+
+ int expected = 0;
+
+ MessageLoopHelper helper;
+ // Let's verify that each IO goes to the right callback object.
+ CallbackTest callback1(&helper, false);
+ CallbackTest callback2(&helper, false);
+ CallbackTest callback3(&helper, false);
+ CallbackTest callback4(&helper, false);
+ CallbackTest callback5(&helper, false);
+ CallbackTest callback6(&helper, false);
+ CallbackTest callback7(&helper, false);
+ CallbackTest callback8(&helper, false);
+ CallbackTest callback9(&helper, false);
+
+ const int kSize1 = 17000;
+ const int kSize2 = 25000;
+ const int kSize3 = 25000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ CacheTestFillBuffer(buffer3->data(), kSize3, false);
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ int ret = entry->WriteData(
+ 0,
+ 0,
+ buffer1.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
+ false);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ memset(buffer2->data(), 0, kSize1);
+ ret = entry->ReadData(
+ 0,
+ 0,
+ buffer2.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("the data", buffer2->data());
+
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ ret = entry->WriteData(
+ 1,
+ 10000,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
+ false);
+ EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ memset(buffer3->data(), 0, kSize3);
+ ret = entry->ReadData(
+ 1,
+ 10011,
+ buffer3.get(),
+ kSize3,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
+ EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("big data goes here", buffer3->data());
+ ret = entry->ReadData(
+ 1,
+ 0,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
+ EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ memset(buffer3->data(), 0, kSize3);
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
+ ret = entry->ReadData(
+ 1,
+ 30000,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(0,
+ entry->ReadData(
+ 1,
+ 35000,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
+ ret = entry->ReadData(
+ 1,
+ 0,
+ buffer1.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ ret = entry->WriteData(
+ 1,
+ 20000,
+ buffer3.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
+ false);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(37000, entry->GetDataSize(1));
+
+ EXPECT_FALSE(helper.callback_reused_error());
+
+ entry->Doom();
+ entry->Close();
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
+ InitCache();
+ ExternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, ExternalAsyncIONoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ ExternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ExternalAsyncIO();
+}
+
+// Tests that IOBuffers are not referenced after IO completes.
+void DiskCacheEntryTest::ReleaseBuffer() {
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+
+ const int kBufferSize = 1024;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
+ CacheTestFillBuffer(buffer->data(), kBufferSize, false);
+
+ net::ReleaseBufferCompletionCallback cb(buffer.get());
+ int rv =
+ entry->WriteData(0, 0, buffer.get(), kBufferSize, cb.callback(), false);
+ EXPECT_EQ(kBufferSize, cb.GetResult(rv));
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ ReleaseBuffer();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ReleaseBuffer();
+}
+
+void DiskCacheEntryTest::StreamAccess() {
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+
+ const int kBufferSize = 1024;
+ const int kNumStreams = 3;
+ scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
+ for (int i = 0; i < kNumStreams; i++) {
+ reference_buffers[i] = new net::IOBuffer(kBufferSize);
+ CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
+ }
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
+ for (int i = 0; i < kNumStreams; i++) {
+ EXPECT_EQ(
+ kBufferSize,
+ WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
+ memset(buffer1->data(), 0, kBufferSize);
+ EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
+ EXPECT_EQ(
+ 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
+ }
+ EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
+ ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
+ entry->Close();
+
+ // Open the entry and read it in chunks, including a read past the end.
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+ const int kReadBufferSize = 600;
+ const int kFinalReadSize = kBufferSize - kReadBufferSize;
+ COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads);
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
+ for (int i = 0; i < kNumStreams; i++) {
+ memset(buffer2->data(), 0, kReadBufferSize);
+ EXPECT_EQ(kReadBufferSize,
+ ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
+ EXPECT_EQ(
+ 0,
+ memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
+
+ memset(buffer2->data(), 0, kReadBufferSize);
+ EXPECT_EQ(
+ kFinalReadSize,
+ ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
+ EXPECT_EQ(0,
+ memcmp(reference_buffers[i]->data() + kReadBufferSize,
+ buffer2->data(),
+ kFinalReadSize));
+ }
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, StreamAccess) {
+ InitCache();
+ StreamAccess();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
+ SetMemoryOnlyMode();
+ InitCache();
+ StreamAccess();
+}
+
+void DiskCacheEntryTest::GetKey() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_EQ(key, entry->GetKey()) << "short key";
+ entry->Close();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ char key_buffer[20000];
+
+ CacheTestFillBuffer(key_buffer, 3000, true);
+ key_buffer[1000] = '\0';
+
+ key = key_buffer;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
+ entry->Close();
+
+ key_buffer[1000] = 'p';
+ key_buffer[3000] = '\0';
+ key = key_buffer;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
+ entry->Close();
+
+ CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
+ key_buffer[19999] = '\0';
+
+ key = key_buffer;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_TRUE(key == entry->GetKey()) << "long key";
+ entry->Close();
+
+ CacheTestFillBuffer(key_buffer, 0x4000, true);
+ key_buffer[0x4000] = '\0';
+
+ key = key_buffer;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GetKey) {
+ InitCache();
+ GetKey();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GetKey();
+}
+
+void DiskCacheEntryTest::GetTimes() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+
+ Time t1 = Time::Now();
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_TRUE(entry->GetLastModified() >= t1);
+ EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
+
+ AddDelay();
+ Time t2 = Time::Now();
+ EXPECT_TRUE(t2 > t1);
+ EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
+ if (type_ == net::APP_CACHE) {
+ EXPECT_TRUE(entry->GetLastModified() < t2);
+ } else {
+ EXPECT_TRUE(entry->GetLastModified() >= t2);
+ }
+ EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
+
+ AddDelay();
+ Time t3 = Time::Now();
+ EXPECT_TRUE(t3 > t2);
+ const int kSize = 200;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
+ if (type_ == net::APP_CACHE) {
+ EXPECT_TRUE(entry->GetLastUsed() < t2);
+ EXPECT_TRUE(entry->GetLastModified() < t2);
+ } else if (type_ == net::SHADER_CACHE) {
+ EXPECT_TRUE(entry->GetLastUsed() < t3);
+ EXPECT_TRUE(entry->GetLastModified() < t3);
+ } else {
+ EXPECT_TRUE(entry->GetLastUsed() >= t3);
+ EXPECT_TRUE(entry->GetLastModified() < t3);
+ }
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GetTimes) {
+ InitCache();
+ GetTimes();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GetTimes();
+}
+
+TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
+ SetCacheType(net::APP_CACHE);
+ InitCache();
+ GetTimes();
+}
+
+TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
+ SetCacheType(net::SHADER_CACHE);
+ InitCache();
+ GetTimes();
+}
+
+void DiskCacheEntryTest::GrowData() {
+ std::string key1("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
+
+ const int kSize = 20000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ memset(buffer2->data(), 0, kSize);
+
+ base::strlcpy(buffer1->data(), "the data", kSize);
+ EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false));
+ EXPECT_EQ(10, ReadData(entry, 0, 0, buffer2.get(), 10));
+ EXPECT_STREQ("the data", buffer2->data());
+ EXPECT_EQ(10, entry->GetDataSize(0));
+
+ EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
+ EXPECT_EQ(2000, entry->GetDataSize(0));
+ EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
+
+ EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+ EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
+ entry->Close();
+
+ memset(buffer2->data(), 0, kSize);
+ std::string key2("Second key");
+ ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
+ EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false));
+ EXPECT_EQ(10, entry->GetDataSize(0));
+ entry->Close();
+
+ // Go from an internal address to a bigger block size.
+ ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
+ EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
+ EXPECT_EQ(2000, entry->GetDataSize(0));
+ EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
+ entry->Close();
+ memset(buffer2->data(), 0, kSize);
+
+ // Go from an internal address to an external one.
+ ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
+ EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+ EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
+ entry->Close();
+
+ // Double check the size from disk.
+ ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+
+ // Now extend the entry without actual data.
+ EXPECT_EQ(0, WriteData(entry, 0, 45500, buffer1.get(), 0, false));
+ entry->Close();
+
+ // And check again from disk.
+ ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
+ EXPECT_EQ(45500, entry->GetDataSize(0));
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GrowData) {
+ InitCache();
+ GrowData();
+}
+
+TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ GrowData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GrowData();
+}
+
+void DiskCacheEntryTest::TruncateData() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize1 = 20000;
+ const int kSize2 = 20000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ memset(buffer2->data(), 0, kSize2);
+
+ // Simple truncation:
+ EXPECT_EQ(200, WriteData(entry, 0, 0, buffer1.get(), 200, false));
+ EXPECT_EQ(200, entry->GetDataSize(0));
+ EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, false));
+ EXPECT_EQ(200, entry->GetDataSize(0));
+ EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, true));
+ EXPECT_EQ(100, entry->GetDataSize(0));
+ EXPECT_EQ(0, WriteData(entry, 0, 50, buffer1.get(), 0, true));
+ EXPECT_EQ(50, entry->GetDataSize(0));
+ EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true));
+ EXPECT_EQ(0, entry->GetDataSize(0));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ // Go to an external file.
+ EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+ EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), 20000));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
+ memset(buffer2->data(), 0, kSize2);
+
+ // External file truncation
+ EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, false));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+ EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, true));
+ EXPECT_EQ(18000, entry->GetDataSize(0));
+ EXPECT_EQ(0, WriteData(entry, 0, 17500, buffer1.get(), 0, true));
+ EXPECT_EQ(17500, entry->GetDataSize(0));
+
+ // And back to an internal block.
+ EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true));
+ EXPECT_EQ(1600, entry->GetDataSize(0));
+ EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer2.get(), 600));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
+ EXPECT_EQ(1000, ReadData(entry, 0, 0, buffer2.get(), 1000));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
+ << "Preserves previous data";
+
+ // Go from external file to zero length.
+ EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+ EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true));
+ EXPECT_EQ(0, entry->GetDataSize(0));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, TruncateData) {
+ InitCache();
+ TruncateData();
+}
+
+TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ TruncateData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ TruncateData();
+}
+
+void DiskCacheEntryTest::ZeroLengthIO() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ EXPECT_EQ(0, ReadData(entry, 0, 0, NULL, 0));
+ EXPECT_EQ(0, WriteData(entry, 0, 0, NULL, 0, false));
+
+ // This write should extend the entry.
+ EXPECT_EQ(0, WriteData(entry, 0, 1000, NULL, 0, false));
+ EXPECT_EQ(0, ReadData(entry, 0, 500, NULL, 0));
+ EXPECT_EQ(0, ReadData(entry, 0, 2000, NULL, 0));
+ EXPECT_EQ(1000, entry->GetDataSize(0));
+
+ EXPECT_EQ(0, WriteData(entry, 0, 100000, NULL, 0, true));
+ EXPECT_EQ(0, ReadData(entry, 0, 50000, NULL, 0));
+ EXPECT_EQ(100000, entry->GetDataSize(0));
+
+ // Let's verify the actual content.
+ const int kSize = 20;
+ const char zeros[kSize] = {};
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ EXPECT_EQ(kSize, ReadData(entry, 0, 500, buffer.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
+
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ EXPECT_EQ(kSize, ReadData(entry, 0, 5000, buffer.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
+
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ EXPECT_EQ(kSize, ReadData(entry, 0, 50000, buffer.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
+ InitCache();
+ ZeroLengthIO();
+}
+
+TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ ZeroLengthIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ZeroLengthIO();
+}
+
+// Tests that we handle the content correctly when buffering, a feature of the
+// standard cache that permits fast responses to certain reads.
+void DiskCacheEntryTest::Buffering() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 200;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, true);
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+
+ EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
+ entry->Close();
+
+ // Write a little more and read what we wrote before.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
+ EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+
+ // Now go to an external file.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
+ entry->Close();
+
+ // Write something else and verify old data.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+
+ // Extend the file some more.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
+ entry->Close();
+
+ // And now make sure that we can deal with data in both places (ram/disk).
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
+
+ // We should not overwrite the data at 18000 with this.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+
+ EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
+
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
+
+ // Extend the file again and read before without closing the entry.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, Buffering) {
+ InitCache();
+ Buffering();
+}
+
+TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ Buffering();
+}
+
+// Checks that entries are zero length when created.
+void DiskCacheEntryTest::SizeAtCreate() {
+ const char key[] = "the first key";
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kNumStreams = 3;
+ for (int i = 0; i < kNumStreams; ++i)
+ EXPECT_EQ(0, entry->GetDataSize(i));
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, SizeAtCreate) {
+ InitCache();
+ SizeAtCreate();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
+ SetMemoryOnlyMode();
+ InitCache();
+ SizeAtCreate();
+}
+
+// Some extra tests to make sure that buffering works properly when changing
+// the entry size.
+void DiskCacheEntryTest::SizeChanges() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 200;
+ const char zeros[kSize] = {};
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, true);
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+
+ EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, true));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, true));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, true));
+ entry->Close();
+
+ // Extend the file and read between the old size and the new write.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(23000 + kSize, entry->GetDataSize(1));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true));
+ EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
+ EXPECT_EQ(kSize, ReadData(entry, 1, 24000, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
+
+ // Read at the end of the old file size.
+ EXPECT_EQ(kSize,
+ ReadData(entry, 1, 23000 + kSize - 35, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
+
+ // Read slightly before the last write.
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 24900, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
+ EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
+
+ // Extend the entry a little more.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 26000, buffer1.get(), kSize, true));
+ EXPECT_EQ(26000 + kSize, entry->GetDataSize(1));
+ CacheTestFillBuffer(buffer2->data(), kSize, true);
+ EXPECT_EQ(kSize, ReadData(entry, 1, 25900, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
+ EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
+
+ // And now reduce the size.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true));
+ EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
+ EXPECT_EQ(28, ReadData(entry, 1, 25000 + kSize - 28, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
+
+ // Reduce the size with a buffer that is not extending the size.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 24000, buffer1.get(), kSize, false));
+ EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 24500, buffer1.get(), kSize, true));
+ EXPECT_EQ(24500 + kSize, entry->GetDataSize(1));
+ EXPECT_EQ(kSize, ReadData(entry, 1, 23900, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
+ EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
+
+ // And now reduce the size below the old size.
+ EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, true));
+ EXPECT_EQ(19000 + kSize, entry->GetDataSize(1));
+ EXPECT_EQ(kSize, ReadData(entry, 1, 18900, buffer2.get(), kSize));
+ EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
+ EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
+
+ // Verify that the actual file is truncated.
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(19000 + kSize, entry->GetDataSize(1));
+
+ // Extend the newly opened file with a zero length write, expect zero fill.
+ EXPECT_EQ(0, WriteData(entry, 1, 20000 + kSize, buffer1.get(), 0, false));
+ EXPECT_EQ(kSize, ReadData(entry, 1, 19000 + kSize, buffer1.get(), kSize));
+ EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, SizeChanges) {
+ InitCache();
+ SizeChanges();
+}
+
+TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ SizeChanges();
+}
+
+// Write more than the total cache capacity but to a single entry. |size| is the
+// amount of bytes to write each time.
+void DiskCacheEntryTest::ReuseEntry(int size) {
+ std::string key1("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
+
+ entry->Close();
+ std::string key2("the second key");
+ ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
+ CacheTestFillBuffer(buffer->data(), size, false);
+
+ for (int i = 0; i < 15; i++) {
+ EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
+ EXPECT_EQ(size, WriteData(entry, 0, 0, buffer.get(), size, false));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
+ }
+
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
+ SetMaxSize(200 * 1024);
+ InitCache();
+ ReuseEntry(20 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
+ SetMemoryOnlyMode();
+ SetMaxSize(200 * 1024);
+ InitCache();
+ ReuseEntry(20 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
+ SetMaxSize(100 * 1024);
+ InitCache();
+ ReuseEntry(10 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
+ SetMemoryOnlyMode();
+ SetMaxSize(100 * 1024);
+ InitCache();
+ ReuseEntry(10 * 1024);
+}
+
+// Reading somewhere that was not written should return zeros.
+void DiskCacheEntryTest::InvalidData() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize1 = 20000;
+ const int kSize2 = 20000;
+ const int kSize3 = 20000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ memset(buffer2->data(), 0, kSize2);
+
+ // Simple data grow:
+ EXPECT_EQ(200, WriteData(entry, 0, 400, buffer1.get(), 200, false));
+ EXPECT_EQ(600, entry->GetDataSize(0));
+ EXPECT_EQ(100, ReadData(entry, 0, 300, buffer3.get(), 100));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ // The entry is now on disk. Load it and extend it.
+ EXPECT_EQ(200, WriteData(entry, 0, 800, buffer1.get(), 200, false));
+ EXPECT_EQ(1000, entry->GetDataSize(0));
+ EXPECT_EQ(100, ReadData(entry, 0, 700, buffer3.get(), 100));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ // This time using truncate.
+ EXPECT_EQ(200, WriteData(entry, 0, 1800, buffer1.get(), 200, true));
+ EXPECT_EQ(2000, entry->GetDataSize(0));
+ EXPECT_EQ(100, ReadData(entry, 0, 1500, buffer3.get(), 100));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
+
+ // Go to an external file.
+ EXPECT_EQ(200, WriteData(entry, 0, 19800, buffer1.get(), 200, false));
+ EXPECT_EQ(20000, entry->GetDataSize(0));
+ EXPECT_EQ(4000, ReadData(entry, 0, 14000, buffer3.get(), 4000));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
+
+ // And back to an internal block.
+ EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true));
+ EXPECT_EQ(1600, entry->GetDataSize(0));
+ EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer3.get(), 600));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
+
+ // Extend it again.
+ EXPECT_EQ(600, WriteData(entry, 0, 2000, buffer1.get(), 600, false));
+ EXPECT_EQ(2600, entry->GetDataSize(0));
+ EXPECT_EQ(200, ReadData(entry, 0, 1800, buffer3.get(), 200));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
+
+ // And again (with truncation flag).
+ EXPECT_EQ(600, WriteData(entry, 0, 3000, buffer1.get(), 600, true));
+ EXPECT_EQ(3600, entry->GetDataSize(0));
+ EXPECT_EQ(200, ReadData(entry, 0, 2800, buffer3.get(), 200));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, InvalidData) {
+ InitCache();
+ InvalidData();
+}
+
+TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
+ InitCache();
+ cache_impl_->SetFlags(disk_cache::kNoBuffering);
+ InvalidData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InvalidData();
+}
+
+// Tests that the cache preserves the buffer of an IO operation.
+void DiskCacheEntryTest::ReadWriteDestroyBuffer() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 200;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+
+ net::TestCompletionCallback cb;
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, buffer.get(), kSize, cb.callback(), false));
+
+ // Release our reference to the buffer.
+ buffer = NULL;
+ EXPECT_EQ(kSize, cb.WaitForResult());
+
+ // And now test with a Read().
+ buffer = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0, 0, buffer.get(), kSize, cb.callback()));
+ buffer = NULL;
+ EXPECT_EQ(kSize, cb.WaitForResult());
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
+ InitCache();
+ ReadWriteDestroyBuffer();
+}
+
+void DiskCacheEntryTest::DoomNormalEntry() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Doom();
+ entry->Close();
+
+ const int kSize = 20000;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, true);
+ buffer->data()[19999] = '\0';
+
+ key = buffer->data();
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+ EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
+ entry->Doom();
+ entry->Close();
+
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, DoomEntry) {
+ InitCache();
+ DoomNormalEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomNormalEntry();
+}
+
+// Tests dooming an entry that's linked to an open entry.
+void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
+ disk_cache::Entry* entry1;
+ disk_cache::Entry* entry2;
+ ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
+ entry1->Close();
+ ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
+ entry1->Close();
+ ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
+ entry1->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
+ ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
+ entry2->Doom();
+ entry2->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
+ entry2->Doom();
+ entry2->Close();
+ entry1->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
+ InitCache();
+ DoomEntryNextToOpenEntry();
+}
+
+TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
+ SetNewEviction();
+ InitCache();
+ DoomEntryNextToOpenEntry();
+}
+
+TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
+ SetCacheType(net::APP_CACHE);
+ InitCache();
+ DoomEntryNextToOpenEntry();
+}
+
+// Verify that basic operations work as expected with doomed entries.
+void DiskCacheEntryTest::DoomedEntry() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ entry->Doom();
+
+ FlushQueueForTest();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ Time initial = Time::Now();
+ AddDelay();
+
+ const int kSize1 = 2000;
+ const int kSize2 = 2000;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ memset(buffer2->data(), 0, kSize2);
+
+ EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
+ EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
+ EXPECT_EQ(key, entry->GetKey());
+ EXPECT_TRUE(initial < entry->GetLastModified());
+ EXPECT_TRUE(initial < entry->GetLastUsed());
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, DoomedEntry) {
+ InitCache();
+ DoomedEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomedEntry();
+}
+
+// Tests that we discard entries if the data is missing.
+TEST_F(DiskCacheEntryTest, MissingData) {
+ InitCache();
+
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ // Write to an external file.
+ const int kSize = 20000;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
+ entry->Close();
+ FlushQueueForTest();
+
+ disk_cache::Addr address(0x80000001);
+ base::FilePath name = cache_impl_->GetFileName(address);
+ EXPECT_TRUE(base::DeleteFile(name, false));
+
+ // Attempt to read the data.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
+ ReadData(entry, 0, 0, buffer.get(), kSize));
+ entry->Close();
+
+ // The entry should be gone.
+ ASSERT_NE(net::OK, OpenEntry(key, &entry));
+}
+
+// Test that child entries in a memory cache backend are not visible from
+// enumerations.
+TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
+ SetMemoryOnlyMode();
+ InitCache();
+
+ const int kSize = 4096;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ std::string key("the first key");
+ disk_cache::Entry* parent_entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
+
+ // Writes to the parent entry.
+ EXPECT_EQ(kSize,
+ parent_entry->WriteSparseData(
+ 0, buf.get(), kSize, net::CompletionCallback()));
+
+ // This write creates a child entry and writes to it.
+ EXPECT_EQ(kSize,
+ parent_entry->WriteSparseData(
+ 8192, buf.get(), kSize, net::CompletionCallback()));
+
+ parent_entry->Close();
+
+ // Perform the enumerations.
+ void* iter = NULL;
+ disk_cache::Entry* entry = NULL;
+ int count = 0;
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ ASSERT_TRUE(entry != NULL);
+ ++count;
+ disk_cache::MemEntryImpl* mem_entry =
+ reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
+ EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
+ mem_entry->Close();
+ }
+ EXPECT_EQ(1, count);
+}
+
+// Writes |buf_1| to offset and reads it back as |buf_2|.
+void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
+ net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
+ net::TestCompletionCallback cb;
+
+ memset(buf_2->data(), 0, size);
+ int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(ret));
+
+ ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
+ EXPECT_EQ(size, cb.GetResult(ret));
+
+ ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
+ EXPECT_EQ(size, cb.GetResult(ret));
+
+ EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
+}
+
+// Reads |size| bytes from |entry| at |offset| and verifies that they are the
+// same as the content of the provided |buffer|.
+void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
+ int size) {
+ net::TestCompletionCallback cb;
+
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
+ memset(buf_1->data(), 0, size);
+ int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
+ EXPECT_EQ(size, cb.GetResult(ret));
+ EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
+}
+
+void DiskCacheEntryTest::BasicSparseIO() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 2048;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Write at offset 0.
+ VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
+
+ // Write at offset 0x400000 (4 MB).
+ VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
+
+ // Write at offset 0x800000000 (32 GB).
+ VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
+
+ entry->Close();
+
+ // Check everything again.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
+ VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
+ VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, BasicSparseIO) {
+ InitCache();
+ BasicSparseIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BasicSparseIO();
+}
+
+void DiskCacheEntryTest::HugeSparseIO() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ // Write 1.2 MB so that we cover multiple entries.
+ const int kSize = 1200 * 1024;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Write at offset 0x20F0000 (33 MB - 64 KB).
+ VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
+ entry->Close();
+
+ // Check it again.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, HugeSparseIO) {
+ InitCache();
+ HugeSparseIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ HugeSparseIO();
+}
+
+void DiskCacheEntryTest::GetAvailableRange() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 16 * 1024;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
+ EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
+ EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
+
+ // We stop at the first empty block.
+ int64 start;
+ net::TestCompletionCallback cb;
+ int rv = entry->GetAvailableRange(
+ 0x20F0000, kSize * 2, &start, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+ EXPECT_EQ(0x20F0000, start);
+
+ start = 0;
+ rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+ rv = entry->GetAvailableRange(
+ 0x20F0000 - kSize, kSize, &start, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+ rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+ EXPECT_EQ(0x20F0000, start);
+
+ // We should be able to Read based on the results of GetAvailableRange.
+ start = -1;
+ rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+ rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+
+ start = 0;
+ rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
+ EXPECT_EQ(0x2000, cb.GetResult(rv));
+ EXPECT_EQ(0x20F2000, start);
+ EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
+
+ // Make sure that we respect the |len| argument.
+ start = 0;
+ rv = entry->GetAvailableRange(
+ 0x20F0001 - kSize, kSize, &start, cb.callback());
+ EXPECT_EQ(1, cb.GetResult(rv));
+ EXPECT_EQ(0x20F0000, start);
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GetAvailableRange) {
+ InitCache();
+ GetAvailableRange();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GetAvailableRange();
+}
+
+void DiskCacheEntryTest::CouldBeSparse() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 16 * 1024;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ // Write at offset 0x20F0000 (33 MB - 64 KB).
+ EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
+
+ EXPECT_TRUE(entry->CouldBeSparse());
+ entry->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_TRUE(entry->CouldBeSparse());
+ entry->Close();
+
+ // Now verify a regular entry.
+ key.assign("another key");
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_FALSE(entry->CouldBeSparse());
+
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
+ EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
+ EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
+
+ EXPECT_FALSE(entry->CouldBeSparse());
+ entry->Close();
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_FALSE(entry->CouldBeSparse());
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, CouldBeSparse) {
+ InitCache();
+ CouldBeSparse();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
+ SetMemoryOnlyMode();
+ InitCache();
+ CouldBeSparse();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+
+ const int kSize = 8192;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ // This loop writes back to back starting from offset 0 and 9000.
+ for (int i = 0; i < kSize; i += 1024) {
+ scoped_refptr<net::WrappedIOBuffer> buf_3(
+ new net::WrappedIOBuffer(buf_1->data() + i));
+ VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
+ VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
+ }
+
+ // Make sure we have data written.
+ VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
+ VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
+
+ // This tests a large write that spans 3 entries from a misaligned offset.
+ VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
+ SetMemoryOnlyMode();
+ InitCache();
+
+ const int kSize = 8192;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ disk_cache::Entry* entry;
+ std::string key("the first key");
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ // Writes in the middle of an entry.
+ EXPECT_EQ(
+ 1024,
+ entry->WriteSparseData(0, buf.get(), 1024, net::CompletionCallback()));
+ EXPECT_EQ(
+ 1024,
+ entry->WriteSparseData(5120, buf.get(), 1024, net::CompletionCallback()));
+ EXPECT_EQ(1024,
+ entry->WriteSparseData(
+ 10000, buf.get(), 1024, net::CompletionCallback()));
+
+ // Writes in the middle of an entry and spans 2 child entries.
+ EXPECT_EQ(8192,
+ entry->WriteSparseData(
+ 50000, buf.get(), 8192, net::CompletionCallback()));
+
+ int64 start;
+ net::TestCompletionCallback cb;
+ // Test that we stop at a discontinuous child at the second block.
+ int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
+ EXPECT_EQ(1024, cb.GetResult(rv));
+ EXPECT_EQ(0, start);
+
+ // Test that number of bytes is reported correctly when we start from the
+ // middle of a filled region.
+ rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
+ EXPECT_EQ(512, cb.GetResult(rv));
+ EXPECT_EQ(512, start);
+
+ // Test that we found bytes in the child of next block.
+ rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
+ EXPECT_EQ(1024, cb.GetResult(rv));
+ EXPECT_EQ(5120, start);
+
+ // Test that the desired length is respected. It starts within a filled
+ // region.
+ rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
+ EXPECT_EQ(512, cb.GetResult(rv));
+ EXPECT_EQ(5500, start);
+
+ // Test that the desired length is respected. It starts before a filled
+ // region.
+ rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
+ EXPECT_EQ(500, cb.GetResult(rv));
+ EXPECT_EQ(5120, start);
+
+ // Test that multiple blocks are scanned.
+ rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
+ EXPECT_EQ(8192, cb.GetResult(rv));
+ EXPECT_EQ(50000, start);
+
+ entry->Close();
+}
+
+void DiskCacheEntryTest::UpdateSparseEntry() {
+ std::string key("the first key");
+ disk_cache::Entry* entry1;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
+
+ const int kSize = 2048;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Write at offset 0.
+ VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
+ entry1->Close();
+
+ // Write at offset 2048.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
+ VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
+
+ disk_cache::Entry* entry2;
+ ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
+
+ entry1->Close();
+ entry2->Close();
+ FlushQueueForTest();
+ if (memory_only_)
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ else
+ EXPECT_EQ(3, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
+ SetCacheType(net::MEDIA_CACHE);
+ InitCache();
+ UpdateSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
+ SetMemoryOnlyMode();
+ SetCacheType(net::MEDIA_CACHE);
+ InitCache();
+ UpdateSparseEntry();
+}
+
+void DiskCacheEntryTest::DoomSparseEntry() {
+ std::string key1("the first key");
+ std::string key2("the second key");
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
+ ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
+
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ int64 offset = 1024;
+ // Write to a bunch of ranges.
+ for (int i = 0; i < 12; i++) {
+ EXPECT_EQ(kSize,
+ entry1->WriteSparseData(
+ offset, buf.get(), kSize, net::CompletionCallback()));
+ // Keep the second map under the default size.
+ if (i < 9) {
+ EXPECT_EQ(kSize,
+ entry2->WriteSparseData(
+ offset, buf.get(), kSize, net::CompletionCallback()));
+ }
+
+ offset *= 4;
+ }
+
+ if (memory_only_)
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ else
+ EXPECT_EQ(15, cache_->GetEntryCount());
+
+ // Doom the first entry while it's still open.
+ entry1->Doom();
+ entry1->Close();
+ entry2->Close();
+
+ // Doom the second entry after it's fully saved.
+ EXPECT_EQ(net::OK, DoomEntry(key2));
+
+ // Make sure we do all needed work. This may fail for entry2 if between Close
+ // and DoomEntry the system decides to remove all traces of the file from the
+ // system cache so we don't see that there is pending IO.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ if (memory_only_) {
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ } else {
+ if (5 == cache_->GetEntryCount()) {
+ // Most likely we are waiting for the result of reading the sparse info
+ // (it's always async on Posix so it is easy to miss). Unfortunately we
+ // don't have any signal to watch for so we can only wait.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
+ base::MessageLoop::current()->RunUntilIdle();
+ }
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ }
+}
+
+TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
+ UseCurrentThread();
+ InitCache();
+ DoomSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomSparseEntry();
+}
+
+// A CompletionCallback wrapper that deletes the cache from within the callback.
+// The way a CompletionCallback works means that all tasks (even new ones)
+// are executed by the message loop before returning to the caller so the only
+// way to simulate a race is to execute what we want on the callback.
+class SparseTestCompletionCallback: public net::TestCompletionCallback {
+ public:
+ explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
+ : cache_(cache.Pass()) {
+ }
+
+ private:
+ virtual void SetResult(int result) OVERRIDE {
+ cache_.reset();
+ TestCompletionCallback::SetResult(result);
+ }
+
+ scoped_ptr<disk_cache::Backend> cache_;
+ DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
+};
+
+// Tests that we don't crash when the backend is deleted while we are working
+// deleting the sub-entries of a sparse entry.
+TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
+ UseCurrentThread();
+ InitCache();
+ std::string key("the key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ int64 offset = 1024;
+ // Write to a bunch of ranges.
+ for (int i = 0; i < 12; i++) {
+ EXPECT_EQ(kSize,
+ entry->WriteSparseData(
+ offset, buf.get(), kSize, net::CompletionCallback()));
+ offset *= 4;
+ }
+ EXPECT_EQ(9, cache_->GetEntryCount());
+
+ entry->Close();
+ disk_cache::Backend* cache = cache_.get();
+ SparseTestCompletionCallback cb(cache_.Pass());
+ int rv = cache->DoomEntry(key, cb.callback());
+ EXPECT_EQ(net::ERR_IO_PENDING, rv);
+ EXPECT_EQ(net::OK, cb.WaitForResult());
+}
+
+void DiskCacheEntryTest::PartialSparseEntry() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ // We should be able to deal with IO that is not aligned to the block size
+ // of a sparse entry, at least to write a big range without leaving holes.
+ const int kSize = 4 * 1024;
+ const int kSmallSize = 128;
+ scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf1->data(), kSize, false);
+
+ // The first write is just to extend the entry. The third write occupies
+ // a 1KB block partially, it may not be written internally depending on the
+ // implementation.
+ EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
+ EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
+ EXPECT_EQ(kSmallSize,
+ WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
+ entry->Close();
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
+ memset(buf2->data(), 0, kSize);
+ EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
+
+ EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
+ EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
+ EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
+
+ // This read should not change anything.
+ EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
+ EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
+ EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
+
+ int rv;
+ int64 start;
+ net::TestCompletionCallback cb;
+ if (memory_only_) {
+ rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
+ EXPECT_EQ(100, cb.GetResult(rv));
+ EXPECT_EQ(500, start);
+ } else {
+ rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
+ EXPECT_EQ(1024, cb.GetResult(rv));
+ EXPECT_EQ(1024, start);
+ }
+ rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
+ EXPECT_EQ(500, cb.GetResult(rv));
+ EXPECT_EQ(kSize, start);
+ rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
+ EXPECT_EQ(3616, cb.GetResult(rv));
+ EXPECT_EQ(20 * 1024, start);
+
+ // 1. Query before a filled 1KB block.
+ // 2. Query within a filled 1KB block.
+ // 3. Query beyond a filled 1KB block.
+ if (memory_only_) {
+ rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
+ EXPECT_EQ(3496, cb.GetResult(rv));
+ EXPECT_EQ(20000, start);
+ } else {
+ rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
+ EXPECT_EQ(3016, cb.GetResult(rv));
+ EXPECT_EQ(20480, start);
+ }
+ rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
+ EXPECT_EQ(1523, cb.GetResult(rv));
+ EXPECT_EQ(3073, start);
+ rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+ EXPECT_EQ(4600, start);
+
+ // Now make another write and verify that there is no hole in between.
+ EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
+ rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
+ EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
+ EXPECT_EQ(1024, start);
+ EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
+ EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
+ EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
+ InitCache();
+ PartialSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ PartialSparseEntry();
+}
+
+// Tests that corrupt sparse children are removed automatically.
+TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf1->data(), kSize, false);
+
+ const int k1Meg = 1024 * 1024;
+ EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
+ EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
+ EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
+ entry->Close();
+ EXPECT_EQ(4, cache_->GetEntryCount());
+
+ void* iter = NULL;
+ int count = 0;
+ std::string child_key[2];
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ ASSERT_TRUE(entry != NULL);
+ // Writing to an entry will alter the LRU list and invalidate the iterator.
+ if (entry->GetKey() != key && count < 2)
+ child_key[count++] = entry->GetKey();
+ entry->Close();
+ }
+ for (int i = 0; i < 2; i++) {
+ ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry));
+ // Overwrite the header's magic and signature.
+ EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
+ entry->Close();
+ }
+
+ EXPECT_EQ(4, cache_->GetEntryCount());
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ // Two children should be gone. One while reading and one while writing.
+ EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
+ EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
+ EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
+
+ // We never touched this one.
+ EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
+ entry->Close();
+
+ // We re-created one of the corrupt children.
+ EXPECT_EQ(3, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, CancelSparseIO) {
+ UseCurrentThread();
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 40 * 1024;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ // This will open and write two "real" entries.
+ net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
+ int rv = entry->WriteSparseData(
+ 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
+ EXPECT_EQ(net::ERR_IO_PENDING, rv);
+
+ int64 offset = 0;
+ rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
+ rv = cb5.GetResult(rv);
+ if (!cb1.have_result()) {
+ // We may or may not have finished writing to the entry. If we have not,
+ // we cannot start another operation at this time.
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
+ }
+
+ // We cancel the pending operation, and register multiple notifications.
+ entry->CancelSparseIO();
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback()));
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback()));
+ entry->CancelSparseIO(); // Should be a no op at this point.
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback()));
+
+ if (!cb1.have_result()) {
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->ReadSparseData(
+ offset, buf.get(), kSize, net::CompletionCallback()));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->WriteSparseData(
+ offset, buf.get(), kSize, net::CompletionCallback()));
+ }
+
+ // Now see if we receive all notifications. Note that we should not be able
+ // to write everything (unless the timing of the system is really weird).
+ rv = cb1.WaitForResult();
+ EXPECT_TRUE(rv == 4096 || rv == kSize);
+ EXPECT_EQ(net::OK, cb2.WaitForResult());
+ EXPECT_EQ(net::OK, cb3.WaitForResult());
+ EXPECT_EQ(net::OK, cb4.WaitForResult());
+
+ rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
+ EXPECT_EQ(0, cb5.GetResult(rv));
+ entry->Close();
+}
+
+// Tests that we perform sanity checks on an entry's key. Note that there are
+// other tests that exercise sanity checks by using saved corrupt files.
+TEST_F(DiskCacheEntryTest, KeySanityCheck) {
+ UseCurrentThread();
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ disk_cache::EntryImpl* entry_impl =
+ static_cast<disk_cache::EntryImpl*>(entry);
+ disk_cache::EntryStore* store = entry_impl->entry()->Data();
+
+ // We have reserved space for a short key (one block), let's say that the key
+ // takes more than one block, and remove the NULLs after the actual key.
+ store->key_len = 800;
+ memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
+ entry_impl->entry()->set_modified();
+ entry->Close();
+
+ // We have a corrupt entry. Now reload it. We should NOT read beyond the
+ // allocated buffer here.
+ ASSERT_NE(net::OK, OpenEntry(key, &entry));
+ DisableIntegrityCheck();
+}
+
+// The simple cache backend isn't intended to work on Windows, which has very
+// different file system guarantees from Linux.
+#if defined(OS_POSIX)
+
+TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ InternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ ExternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
+ SetSimpleCacheMode();
+ InitCache();
+ ReleaseBuffer();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
+ SetSimpleCacheMode();
+ InitCache();
+ StreamAccess();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
+ SetSimpleCacheMode();
+ InitCache();
+ GetKey();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
+ SetSimpleCacheMode();
+ InitCache();
+ GetTimes();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
+ SetSimpleCacheMode();
+ InitCache();
+ GrowData();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
+ SetSimpleCacheMode();
+ InitCache();
+ TruncateData();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ ZeroLengthIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
+ SetSimpleCacheMode();
+ InitCache();
+ SizeAtCreate();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
+ SetSimpleCacheMode();
+ SetMaxSize(200 * 1024);
+ InitCache();
+ ReuseEntry(20 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
+ SetSimpleCacheMode();
+ SetMaxSize(100 * 1024);
+ InitCache();
+ ReuseEntry(10 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
+ SetSimpleCacheMode();
+ InitCache();
+ SizeChanges();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
+ SetSimpleCacheMode();
+ InitCache();
+ InvalidData();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
+ SetSimpleCacheMode();
+ InitCache();
+ ReadWriteDestroyBuffer();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ DoomNormalEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ DoomEntryNextToOpenEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ DoomedEntry();
+}
+
+// Creates an entry with corrupted last byte in stream 0.
+// Requires SimpleCacheMode.
+bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const char* key,
+ int* data_size) {
+ disk_cache::Entry* entry = NULL;
+
+ if (CreateEntry(key, &entry) != net::OK || !entry) {
+ LOG(ERROR) << "Could not create entry";
+ return false;
+ }
+
+ const char data[] = "this is very good data";
+ const int kDataSize = arraysize(data);
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
+ base::strlcpy(buffer->data(), data, kDataSize);
+
+ EXPECT_EQ(kDataSize, WriteData(entry, 0, 0, buffer.get(), kDataSize, false));
+ entry->Close();
+ entry = NULL;
+
+ // Corrupt the last byte of the data.
+ base::FilePath entry_file0_path = cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 0));
+ int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN;
+ base::PlatformFile entry_file0 =
+ base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL);
+ if (entry_file0 == base::kInvalidPlatformFileValue)
+ return false;
+ int64 file_offset =
+ disk_cache::simple_util::GetFileOffsetFromKeyAndDataOffset(
+ key, kDataSize - 2);
+ EXPECT_EQ(1, base::WritePlatformFile(entry_file0, file_offset, "X", 1));
+ if (!base::ClosePlatformFile(entry_file0))
+ return false;
+ *data_size = kDataSize;
+ return true;
+}
+
+// Tests that the simple cache can detect entries that have bad data.
+TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+ int size_unused;
+ ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
+
+ disk_cache::Entry* entry = NULL;
+
+ // Open the entry.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ScopedEntryPtr entry_closer(entry);
+
+ const int kReadBufferSize = 200;
+ EXPECT_GE(kReadBufferSize, entry->GetDataSize(0));
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
+ EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
+ ReadData(entry, 0, 0, read_buffer.get(), kReadBufferSize));
+}
+
+// Tests that an entry that has had an IO error occur can still be Doomed().
+TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+ int size_unused;
+ ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
+
+ disk_cache::Entry* entry = NULL;
+
+ // Open the entry, forcing an IO error.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ScopedEntryPtr entry_closer(entry);
+
+ const int kReadBufferSize = 200;
+ EXPECT_GE(kReadBufferSize, entry->GetDataSize(0));
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
+ EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
+ ReadData(entry, 0, 0, read_buffer.get(), kReadBufferSize));
+
+ entry->Doom(); // Should not crash.
+}
+
+bool TruncatePath(const base::FilePath& file_path, int64 length) {
+ const int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN;
+ base::PlatformFile file =
+ base::CreatePlatformFile(file_path, flags, NULL, NULL);
+ if (base::kInvalidPlatformFileValue == file)
+ return false;
+ const bool result = base::TruncatePlatformFile(file, length);
+ base::ClosePlatformFile(file);
+ return result;
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ disk_cache::Entry* null = NULL;
+ EXPECT_NE(null, entry);
+ entry->Close();
+ entry = NULL;
+
+ // Force the entry to flush to disk, so subsequent platform file operations
+ // succed.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ entry->Close();
+ entry = NULL;
+
+ // Truncate the file such that the length isn't sufficient to have an EOF
+ // record.
+ int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
+ const base::FilePath entry_path = cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 0));
+ const int64 invalid_size =
+ disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
+ kTruncationBytes);
+ EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
+ EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
+ // Test sequence:
+ // Create, Write, Read, Close.
+ SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* const null_entry = NULL;
+
+ disk_cache::Entry* entry = NULL;
+ EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
+ ASSERT_NE(null_entry, entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ const int kBufferSize = 10;
+ scoped_refptr<net::IOBufferWithSize> write_buffer(
+ new net::IOBufferWithSize(kBufferSize));
+ CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
+ EXPECT_EQ(
+ write_buffer->size(),
+ WriteData(entry, 0, 0, write_buffer.get(), write_buffer->size(), false));
+
+ scoped_refptr<net::IOBufferWithSize> read_buffer(
+ new net::IOBufferWithSize(kBufferSize));
+ EXPECT_EQ(
+ read_buffer->size(),
+ ReadData(entry, 0, 0, read_buffer.get(), read_buffer->size()));
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
+ // Test sequence:
+ // Create, Write, Close.
+ SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* const null_entry = NULL;
+
+ MessageLoopHelper helper;
+ CallbackTest create_callback(&helper, false);
+
+ int expected_callback_runs = 0;
+ const int kBufferSize = 10;
+ scoped_refptr<net::IOBufferWithSize> write_buffer(
+ new net::IOBufferWithSize(kBufferSize));
+
+ disk_cache::Entry* entry = NULL;
+ EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
+ ASSERT_NE(null_entry, entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
+ CallbackTest write_callback(&helper, false);
+ int ret = entry->WriteData(
+ 0,
+ 0,
+ write_buffer.get(),
+ write_buffer->size(),
+ base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
+ false);
+ ASSERT_EQ(net::ERR_IO_PENDING, ret);
+ helper.WaitUntilCacheIoFinished(++expected_callback_runs);
+}
+
+TEST_F(DiskCacheEntryTest,
+ SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
+ // Test sequence:
+ // Create, Write, Read, Close.
+ SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* const null_entry = NULL;
+ MessageLoopHelper helper;
+
+ disk_cache::Entry* entry = NULL;
+ // Note that |entry| is only set once CreateEntry() completed which is why we
+ // have to wait (i.e. use the helper CreateEntry() function).
+ EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
+ ASSERT_NE(null_entry, entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ const int kBufferSize = 10;
+ scoped_refptr<net::IOBufferWithSize> write_buffer(
+ new net::IOBufferWithSize(kBufferSize));
+ CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
+ CallbackTest write_callback(&helper, false);
+ int ret = entry->WriteData(
+ 0,
+ 0,
+ write_buffer.get(),
+ write_buffer->size(),
+ base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
+ false);
+ EXPECT_EQ(net::ERR_IO_PENDING, ret);
+ int expected_callback_runs = 1;
+
+ scoped_refptr<net::IOBufferWithSize> read_buffer(
+ new net::IOBufferWithSize(kBufferSize));
+ CallbackTest read_callback(&helper, false);
+ ret = entry->ReadData(
+ 0,
+ 0,
+ read_buffer.get(),
+ read_buffer->size(),
+ base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
+ EXPECT_EQ(net::ERR_IO_PENDING, ret);
+ ++expected_callback_runs;
+
+ helper.WaitUntilCacheIoFinished(expected_callback_runs);
+ ASSERT_EQ(read_buffer->size(), write_buffer->size());
+ EXPECT_EQ(
+ 0,
+ memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
+ // Test sequence:
+ // Create, Write, Read, Write, Read, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ MessageLoopHelper helper;
+ CallbackTest callback1(&helper, false);
+ CallbackTest callback2(&helper, false);
+ CallbackTest callback3(&helper, false);
+ CallbackTest callback4(&helper, false);
+ CallbackTest callback5(&helper, false);
+
+ int expected = 0;
+ const int kSize1 = 10;
+ const int kSize2 = 20;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
+ scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+
+ disk_cache::Entry* entry = NULL;
+ // Create is optimistic, must return OK.
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&callback1))));
+ EXPECT_NE(null, entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ // This write may or may not be optimistic (it depends if the previous
+ // optimistic create already finished by the time we call the write here).
+ int ret = entry->WriteData(
+ 0,
+ 0,
+ buffer1.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
+ false);
+ EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ // This Read must not be optimistic, since we don't support that yet.
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(
+ 0,
+ 0,
+ buffer1_read.get(),
+ kSize1,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
+ expected++;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
+
+ // At this point after waiting, the pending operations queue on the entry
+ // should be empty, so the next Write operation must run as optimistic.
+ EXPECT_EQ(kSize2,
+ entry->WriteData(
+ 0,
+ 0,
+ buffer2.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
+ false));
+
+ // Lets do another read so we block until both the write and the read
+ // operation finishes and we can then test for HasOneRef() below.
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(
+ 0,
+ 0,
+ buffer2_read.get(),
+ kSize2,
+ base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
+
+ // Check that we are not leaking.
+ EXPECT_NE(entry, null);
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
+ // Test sequence:
+ // Create, Open, Close, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ MessageLoopHelper helper;
+ CallbackTest callback1(&helper, false);
+ CallbackTest callback2(&helper, false);
+
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&callback1))));
+ EXPECT_NE(null, entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ disk_cache::Entry* entry2 = NULL;
+ ASSERT_EQ(net::ERR_IO_PENDING,
+ cache_->OpenEntry(key, &entry2,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&callback2))));
+ ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
+
+ EXPECT_NE(null, entry2);
+ EXPECT_EQ(entry, entry2);
+
+ // We have to call close twice, since we called create and open above.
+ entry->Close();
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
+ // Test sequence:
+ // Create, Close, Open, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ EXPECT_NE(null, entry);
+ entry->Close();
+
+ net::TestCompletionCallback cb;
+ disk_cache::Entry* entry2 = NULL;
+ ASSERT_EQ(net::ERR_IO_PENDING,
+ cache_->OpenEntry(key, &entry2, cb.callback()));
+ ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
+ ScopedEntryPtr entry_closer(entry2);
+
+ EXPECT_NE(null, entry2);
+ EXPECT_EQ(entry, entry2);
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
+ // Test sequence:
+ // Create, Close, Write, Open, Open, Close, Write, Read, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ net::TestCompletionCallback cb;
+ const int kSize1 = 10;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ EXPECT_NE(null, entry);
+ entry->Close();
+
+ // Lets do a Write so we block until both the Close and the Write
+ // operation finishes. Write must fail since we are writing in a closed entry.
+ EXPECT_EQ(
+ net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
+ EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
+
+ // Finish running the pending tasks so that we fully complete the close
+ // operation and destroy the entry object.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ // At this point the |entry| must have been destroyed, and called
+ // RemoveSelfFromBackend().
+ disk_cache::Entry* entry2 = NULL;
+ ASSERT_EQ(net::ERR_IO_PENDING,
+ cache_->OpenEntry(key, &entry2, cb.callback()));
+ ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
+ EXPECT_NE(null, entry2);
+
+ disk_cache::Entry* entry3 = NULL;
+ ASSERT_EQ(net::ERR_IO_PENDING,
+ cache_->OpenEntry(key, &entry3, cb.callback()));
+ ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
+ EXPECT_NE(null, entry3);
+ EXPECT_EQ(entry2, entry3);
+ entry3->Close();
+
+ // The previous Close doesn't actually closes the entry since we opened it
+ // twice, so the next Write operation must succeed and it must be able to
+ // perform it optimistically, since there is no operation running on this
+ // entry.
+ EXPECT_EQ(kSize1,
+ entry2->WriteData(
+ 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
+
+ // Lets do another read so we block until both the write and the read
+ // operation finishes and we can then test for HasOneRef() below.
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry2->ReadData(0, 0, buffer1.get(), kSize1, cb.callback()));
+ EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
+ entry2->Close();
+}
+
+// This test is flaky because of the race of Create followed by a Doom.
+// See test SimpleCacheCreateDoomRace.
+TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheOptimistic5) {
+ // Test sequence:
+ // Create, Doom, Write, Read, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ net::TestCompletionCallback cb;
+ const int kSize1 = 10;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ EXPECT_NE(null, entry);
+ ScopedEntryPtr entry_closer(entry);
+ entry->Doom();
+
+ EXPECT_EQ(
+ net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
+ EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
+
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0, 0, buffer1.get(), kSize1, cb.callback()));
+ EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
+ // Test sequence:
+ // Create, Write, Doom, Doom, Read, Doom, Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ net::TestCompletionCallback cb;
+ const int kSize1 = 10;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ EXPECT_NE(null, entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ EXPECT_EQ(
+ net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
+ EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
+
+ entry->Doom();
+ entry->Doom();
+
+ // This Read must not be optimistic, since we don't support that yet.
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0, 0, buffer1_read.get(), kSize1, cb.callback()));
+ EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
+
+ entry->Doom();
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+}
+
+// Confirm that IO buffers are not referenced by the Simple Cache after a write
+// completes.
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+ disk_cache::Entry* entry = NULL;
+
+ // First, an optimistic create.
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ ASSERT_TRUE(entry);
+ ScopedEntryPtr entry_closer(entry);
+
+ const int kWriteSize = 512;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
+ EXPECT_TRUE(buffer1->HasOneRef());
+ CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
+
+ // An optimistic write happens only when there is an empty queue of pending
+ // operations. To ensure the queue is empty, we issue a write and wait until
+ // it completes.
+ EXPECT_EQ(kWriteSize,
+ WriteData(entry, 0, 0, buffer1.get(), kWriteSize, false));
+ EXPECT_TRUE(buffer1->HasOneRef());
+
+ // Finally, we should perform an optimistic write and confirm that all
+ // references to the IO buffer have been released.
+ EXPECT_EQ(
+ kWriteSize,
+ entry->WriteData(
+ 1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
+ EXPECT_TRUE(buffer1->HasOneRef());
+}
+
+TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCreateDoomRace) {
+ // Test sequence:
+ // Create, Doom, Write, Close, Check files are not on disk anymore.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ net::TestCompletionCallback cb;
+ const int kSize1 = 10;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ EXPECT_NE(null, entry);
+
+ cache_->DoomEntry(key, cb.callback());
+ EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
+
+ // Lets do a Write so we block until all operations are done, so we can check
+ // the HasOneRef() below. This call can't be optimistic and we are checking
+ // that here.
+ EXPECT_EQ(
+ net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
+ EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+ entry->Close();
+
+ // Finish running the pending tasks so that we fully complete the close
+ // operation and destroy the entry object.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
+ base::FilePath entry_file_path = cache_path_.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, i));
+ base::PlatformFileInfo info;
+ EXPECT_FALSE(file_util::GetFileInfo(entry_file_path, &info));
+ }
+}
+
+// Checks that an optimistic Create would fail later on a racing Open.
+TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ // Create a corrupt file in place of a future entry. Optimistic create should
+ // initially succeed, but realize later that creation failed.
+ const std::string key = "the key";
+ net::TestCompletionCallback cb;
+ disk_cache::Entry* entry = NULL;
+ disk_cache::Entry* entry2 = NULL;
+
+ EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
+ key, cache_path_));
+ EXPECT_EQ(net::OK, cache_->CreateEntry(key, &entry, cb.callback()));
+ ASSERT_TRUE(entry);
+ ScopedEntryPtr entry_closer(entry);
+ ASSERT_NE(net::OK, OpenEntry(key, &entry2));
+
+ // Check that we are not leaking.
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+
+ DisableIntegrityCheck();
+}
+
+// Tests that old entries are evicted while new entries remain in the index.
+// This test relies on non-mandatory properties of the simple Cache Backend:
+// LRU eviction, specific values of high-watermark and low-watermark etc.
+// When changing the eviction algorithm, the test will have to be re-engineered.
+TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
+ const int kMaxSize = 200 * 1024;
+ const int kWriteSize = kMaxSize / 10;
+ const int kNumExtraEntries = 12;
+ SetSimpleCacheMode();
+ SetMaxSize(kMaxSize);
+ InitCache();
+
+ std::string key1("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
+ CacheTestFillBuffer(buffer->data(), kWriteSize, false);
+ EXPECT_EQ(kWriteSize,
+ WriteData(entry, 0, 0, buffer.get(), kWriteSize, false));
+ entry->Close();
+
+ std::string key2("the key prefix");
+ for (int i = 0; i < kNumExtraEntries; i++) {
+ ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry));
+ ScopedEntryPtr entry_closer(entry);
+ EXPECT_EQ(kWriteSize,
+ WriteData(entry, 0, 0, buffer.get(), kWriteSize, false));
+ }
+
+ // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
+ // the internal knowledge about |SimpleBackendImpl|.
+ ASSERT_NE(net::OK, OpenEntry(key1, &entry))
+ << "Should have evicted the old entry";
+ for (int i = 0; i < 2; i++) {
+ int entry_no = kNumExtraEntries - i - 1;
+ // Generally there is no guarantee that at this point the backround eviction
+ // is finished. We are testing the positive case, i.e. when the eviction
+ // never reaches this entry, should be non-flaky.
+ ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no),
+ &entry))
+ << "Should not have evicted fresh entry " << entry_no;
+ entry->Close();
+ }
+}
+
+// Tests that if a read and a following in-flight truncate are both in progress
+// simultaniously that they both can occur successfully. See
+// http://crbug.com/239223
+TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+
+ const int kBufferSize = 1024;
+ scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
+ CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
+
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ EXPECT_EQ(kBufferSize,
+ WriteData(entry, 0, 0, write_buffer.get(), kBufferSize, false));
+ entry->Close();
+ entry = NULL;
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ScopedEntryPtr entry_closer(entry);
+
+ MessageLoopHelper helper;
+ int expected = 0;
+
+ // Make a short read.
+ const int kReadBufferSize = 512;
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
+ CallbackTest read_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0,
+ 0,
+ read_buffer.get(),
+ kReadBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&read_callback))));
+ ++expected;
+
+ // Truncate the entry to the length of that read.
+ scoped_refptr<net::IOBuffer>
+ truncate_buffer(new net::IOBuffer(kReadBufferSize));
+ CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
+ CallbackTest truncate_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->WriteData(0,
+ 0,
+ truncate_buffer.get(),
+ kReadBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&truncate_callback)),
+ true));
+ ++expected;
+
+ // Wait for both the read and truncation to finish, and confirm that both
+ // succeeded.
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(kReadBufferSize, read_callback.last_result());
+ EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
+ EXPECT_EQ(0,
+ memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
+}
+
+// Tests that if a write and a read dependant on it are both in flight
+// simultaneiously that they both can complete successfully without erroneous
+// early returns. See http://crbug.com/239223
+TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+ ScopedEntryPtr entry_closer(entry);
+
+ const int kBufferSize = 1024;
+ scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
+ CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
+
+ MessageLoopHelper helper;
+ int expected = 0;
+
+ CallbackTest write_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->WriteData(0,
+ 0,
+ write_buffer.get(),
+ kBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&write_callback)),
+ true));
+ ++expected;
+
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
+ CallbackTest read_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0,
+ 0,
+ read_buffer.get(),
+ kBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&read_callback))));
+ ++expected;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(kBufferSize, write_callback.last_result());
+ EXPECT_EQ(kBufferSize, read_callback.last_result());
+ EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
+ SetSimpleCacheMode();
+ DisableSimpleCacheWaitForIndex();
+ DisableIntegrityCheck();
+ InitCache();
+
+ // Assume the index is not initialized, which is likely, since we are blocking
+ // the IO thread from executing the index finalization step.
+ disk_cache::Entry* entry1;
+ net::TestCompletionCallback cb1;
+ disk_cache::Entry* entry2;
+ net::TestCompletionCallback cb2;
+ int rv1 = cache_->OpenEntry("key", &entry1, cb1.callback());
+ int rv2 = cache_->CreateEntry("key", &entry2, cb2.callback());
+
+ EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
+ ASSERT_EQ(net::OK, cb2.GetResult(rv2));
+ entry2->Close();
+}
+
+// Checks that reading two entries simultaneously does not discard a CRC check.
+// TODO(pasko): make it work with Simple Cache.
+TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "key";
+
+ int size;
+ ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
+
+ scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
+ scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
+
+ // Advance the first reader a little.
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
+
+ // Make the second reader pass the point where the first one is, and close.
+ disk_cache::Entry* entry2 = NULL;
+ EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
+ EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
+ EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
+ entry2->Close();
+
+ // Read the data till the end should produce an error.
+ EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
+ entry->Close();
+ DisableIntegrityCheck();
+}
+
+// Checking one more scenario of overlapped reading of a bad entry.
+// Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
+// last two reads.
+TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "key";
+ int size;
+ ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
+
+ scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
+ scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
+
+ // Advance the first reader a little.
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ ScopedEntryPtr entry_closer(entry);
+ EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
+
+ // Advance the 2nd reader by the same amount.
+ disk_cache::Entry* entry2 = NULL;
+ EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
+ ScopedEntryPtr entry2_closer(entry2);
+ EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
+
+ // Continue reading 1st.
+ EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
+
+ // This read should fail as well because we have previous read failures.
+ EXPECT_GT(0, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
+ DisableIntegrityCheck();
+}
+
+// Test if we can sequentially read each subset of the data until all the data
+// is read, then the CRC is calculated correctly and the reads are successful.
+TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
+ // Test sequence:
+ // Create, Write, Read (first half of data), Read (second half of data),
+ // Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ const int kHalfSize = 200;
+ const int kSize = 2 * kHalfSize;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_NE(null, entry);
+
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
+ entry->Close();
+
+ disk_cache::Entry* entry2 = NULL;
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
+ EXPECT_EQ(entry, entry2);
+
+ // Read the first half of the data.
+ int offset = 0;
+ int buf_len = kHalfSize;
+ scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
+ EXPECT_EQ(buf_len, ReadData(entry2, 0, offset, buffer1_read1.get(), buf_len));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
+
+ // Read the second half of the data.
+ offset = buf_len;
+ buf_len = kHalfSize;
+ scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
+ EXPECT_EQ(buf_len, ReadData(entry2, 0, offset, buffer1_read2.get(), buf_len));
+ char* buffer1_data = buffer1->data() + offset;
+ EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
+
+ // Check that we are not leaking.
+ EXPECT_NE(entry, null);
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+ entry->Close();
+ entry = NULL;
+}
+
+// Test if we can write the data not in sequence and read correctly. In
+// this case the CRC will not be present.
+TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
+ // Test sequence:
+ // Create, Write (second half of data), Write (first half of data), Read,
+ // Close.
+ SetSimpleCacheMode();
+ InitCache();
+ disk_cache::Entry* null = NULL;
+ const char key[] = "the first key";
+
+ const int kHalfSize = 200;
+ const int kSize = 2 * kHalfSize;
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ char* buffer1_data = buffer1->data() + kHalfSize;
+ memcpy(buffer2->data(), buffer1_data, kHalfSize);
+ disk_cache::Entry* entry = NULL;
+
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_NE(null, entry);
+
+ int offset = kHalfSize;
+ int buf_len = kHalfSize;
+
+ EXPECT_EQ(buf_len,
+ WriteData(entry, 0, offset, buffer2.get(), buf_len, false));
+ offset = 0;
+ buf_len = kHalfSize;
+ EXPECT_EQ(buf_len,
+ WriteData(entry, 0, offset, buffer1.get(), buf_len, false));
+ entry->Close();
+
+ disk_cache::Entry* entry2 = NULL;
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
+ EXPECT_EQ(entry, entry2);
+
+ scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
+ EXPECT_EQ(kSize, ReadData(entry2, 0, 0, buffer1_read1.get(), kSize));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
+
+ // Check that we are not leaking.
+ ASSERT_NE(entry, null);
+ EXPECT_TRUE(
+ static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
+ entry->Close();
+ entry = NULL;
+}
+
+#endif // defined(OS_POSIX)
diff --git a/chromium/net/disk_cache/errors.h b/chromium/net/disk_cache/errors.h
new file mode 100644
index 00000000000..1c69d42f770
--- /dev/null
+++ b/chromium/net/disk_cache/errors.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Error codes reported by self tests or to UMA.
+
+#ifndef NET_DISK_CACHE_ERRORS_H__
+#define NET_DISK_CACHE_ERRORS_H__
+
+namespace disk_cache {
+
+enum {
+ ERR_NO_ERROR = 0,
+ ERR_INIT_FAILED = -1,
+ ERR_INVALID_TAIL = -2,
+ ERR_INVALID_HEAD = -3,
+ ERR_INVALID_PREV = -4,
+ ERR_INVALID_NEXT = -5,
+ ERR_INVALID_ENTRY = -6,
+ ERR_INVALID_ADDRESS = -7,
+ ERR_INVALID_LINKS = -8,
+ ERR_NUM_ENTRIES_MISMATCH = -9,
+ ERR_READ_FAILURE = -10,
+ ERR_PREVIOUS_CRASH = -11,
+ ERR_STORAGE_ERROR = -12,
+ ERR_INVALID_MASK = -13,
+ ERR_CACHE_DOOMED = -14, // Not really an error condition.
+ ERR_CACHE_CREATED = -15 // Not really an error condition.
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ERRORS_H__
diff --git a/chromium/net/disk_cache/eviction.cc b/chromium/net/disk_cache/eviction.cc
new file mode 100644
index 00000000000..47b52552ef5
--- /dev/null
+++ b/chromium/net/disk_cache/eviction.cc
@@ -0,0 +1,597 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The eviction policy is a very simple pure LRU, so the elements at the end of
+// the list are evicted until kCleanUpMargin free space is available. There is
+// only one list in use (Rankings::NO_USE), and elements are sent to the front
+// of the list whenever they are accessed.
+
+// The new (in-development) eviction policy adds re-use as a factor to evict
+// an entry. The story so far:
+
+// Entries are linked on separate lists depending on how often they are used.
+// When we see an element for the first time, it goes to the NO_USE list; if
+// the object is reused later on, we move it to the LOW_USE list, until it is
+// used kHighUse times, at which point it is moved to the HIGH_USE list.
+// Whenever an element is evicted, we move it to the DELETED list so that if the
+// element is accessed again, we remember the fact that it was already stored
+// and maybe in the future we don't evict that element.
+
+// When we have to evict an element, first we try to use the last element from
+// the NO_USE list, then we move to the LOW_USE and only then we evict an entry
+// from the HIGH_USE. We attempt to keep entries on the cache for at least
+// kTargetTime hours (with frequently accessed items stored for longer periods),
+// but if we cannot do that, we fall-back to keep each list roughly the same
+// size so that we have a chance to see an element again and move it to another
+// list.
+
+#include "net/disk_cache/eviction.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/trace.h"
+
+using base::Time;
+using base::TimeTicks;
+
+namespace {
+
+const int kCleanUpMargin = 1024 * 1024;
+const int kHighUse = 10; // Reuse count to be on the HIGH_USE list.
+const int kTargetTime = 24 * 7; // Time to be evicted (hours since last use).
+const int kMaxDelayedTrims = 60;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+bool FallingBehind(int current_size, int max_size) {
+ return current_size > max_size - kCleanUpMargin * 20;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+// The real initialization happens during Init(), init_ is the only member that
+// has to be initialized here.
+Eviction::Eviction()
+ : backend_(NULL),
+ init_(false),
+ ptr_factory_(this) {
+}
+
+Eviction::~Eviction() {
+}
+
+void Eviction::Init(BackendImpl* backend) {
+ // We grab a bunch of info from the backend to make the code a little cleaner
+ // when we're actually doing work.
+ backend_ = backend;
+ rankings_ = &backend->rankings_;
+ header_ = &backend_->data_->header;
+ max_size_ = LowWaterAdjust(backend_->max_size_);
+ index_size_ = backend->mask_ + 1;
+ new_eviction_ = backend->new_eviction_;
+ first_trim_ = true;
+ trimming_ = false;
+ delay_trim_ = false;
+ trim_delays_ = 0;
+ init_ = true;
+ test_mode_ = false;
+}
+
+void Eviction::Stop() {
+ // It is possible for the backend initialization to fail, in which case this
+ // object was never initialized... and there is nothing to do.
+ if (!init_)
+ return;
+
+ // We want to stop further evictions, so let's pretend that we are busy from
+ // this point on.
+ DCHECK(!trimming_);
+ trimming_ = true;
+ ptr_factory_.InvalidateWeakPtrs();
+}
+
+void Eviction::TrimCache(bool empty) {
+ if (backend_->disabled_ || trimming_)
+ return;
+
+ if (!empty && !ShouldTrim())
+ return PostDelayedTrim();
+
+ if (new_eviction_)
+ return TrimCacheV2(empty);
+
+ Trace("*** Trim Cache ***");
+ trimming_ = true;
+ TimeTicks start = TimeTicks::Now();
+ Rankings::ScopedRankingsBlock node(rankings_);
+ Rankings::ScopedRankingsBlock next(
+ rankings_, rankings_->GetPrev(node.get(), Rankings::NO_USE));
+ int deleted_entries = 0;
+ int target_size = empty ? 0 : max_size_;
+ while ((header_->num_bytes > target_size || test_mode_) && next.get()) {
+ // The iterator could be invalidated within EvictEntry().
+ if (!next->HasData())
+ break;
+ node.reset(next.release());
+ next.reset(rankings_->GetPrev(node.get(), Rankings::NO_USE));
+ if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
+ // This entry is not being used by anybody.
+ // Do NOT use node as an iterator after this point.
+ rankings_->TrackRankingsBlock(node.get(), false);
+ if (EvictEntry(node.get(), empty, Rankings::NO_USE) && !test_mode_)
+ deleted_entries++;
+
+ if (!empty && test_mode_)
+ break;
+ }
+ if (!empty && (deleted_entries > 20 ||
+ (TimeTicks::Now() - start).InMilliseconds() > 20)) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimCache, ptr_factory_.GetWeakPtr(), false));
+ break;
+ }
+ }
+
+ if (empty) {
+ CACHE_UMA(AGE_MS, "TotalClearTimeV1", 0, start);
+ } else {
+ CACHE_UMA(AGE_MS, "TotalTrimTimeV1", 0, start);
+ }
+ CACHE_UMA(COUNTS, "TrimItemsV1", 0, deleted_entries);
+
+ trimming_ = false;
+ Trace("*** Trim Cache end ***");
+ return;
+}
+
+void Eviction::UpdateRank(EntryImpl* entry, bool modified) {
+ if (new_eviction_)
+ return UpdateRankV2(entry, modified);
+
+ rankings_->UpdateRank(entry->rankings(), modified, GetListForEntry(entry));
+}
+
+void Eviction::OnOpenEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnOpenEntryV2(entry);
+}
+
+void Eviction::OnCreateEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnCreateEntryV2(entry);
+
+ rankings_->Insert(entry->rankings(), true, GetListForEntry(entry));
+}
+
+void Eviction::OnDoomEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnDoomEntryV2(entry);
+
+ if (entry->LeaveRankingsBehind())
+ return;
+
+ rankings_->Remove(entry->rankings(), GetListForEntry(entry), true);
+}
+
+void Eviction::OnDestroyEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnDestroyEntryV2(entry);
+}
+
+void Eviction::SetTestMode() {
+ test_mode_ = true;
+}
+
+void Eviction::TrimDeletedList(bool empty) {
+ DCHECK(test_mode_ && new_eviction_);
+ TrimDeleted(empty);
+}
+
+void Eviction::PostDelayedTrim() {
+ // Prevent posting multiple tasks.
+ if (delay_trim_)
+ return;
+ delay_trim_ = true;
+ trim_delays_++;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&Eviction::DelayedTrim, ptr_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(1000));
+}
+
+void Eviction::DelayedTrim() {
+ delay_trim_ = false;
+ if (trim_delays_ < kMaxDelayedTrims && backend_->IsLoaded())
+ return PostDelayedTrim();
+
+ TrimCache(false);
+}
+
+bool Eviction::ShouldTrim() {
+ if (!FallingBehind(header_->num_bytes, max_size_) &&
+ trim_delays_ < kMaxDelayedTrims && backend_->IsLoaded()) {
+ return false;
+ }
+
+ UMA_HISTOGRAM_COUNTS("DiskCache.TrimDelays", trim_delays_);
+ trim_delays_ = 0;
+ return true;
+}
+
+bool Eviction::ShouldTrimDeleted() {
+ int index_load = header_->num_entries * 100 / index_size_;
+
+ // If the index is not loaded, the deleted list will tend to double the size
+ // of the other lists 3 lists (40% of the total). Otherwise, all lists will be
+ // about the same size.
+ int max_length = (index_load < 25) ? header_->num_entries * 2 / 5 :
+ header_->num_entries / 4;
+ return (!test_mode_ && header_->lru.sizes[Rankings::DELETED] > max_length);
+}
+
+void Eviction::ReportTrimTimes(EntryImpl* entry) {
+ if (first_trim_) {
+ first_trim_ = false;
+ if (backend_->ShouldReportAgain()) {
+ CACHE_UMA(AGE, "TrimAge", 0, entry->GetLastUsed());
+ ReportListStats();
+ }
+
+ if (header_->lru.filled)
+ return;
+
+ header_->lru.filled = 1;
+
+ if (header_->create_time) {
+ // This is the first entry that we have to evict, generate some noise.
+ backend_->FirstEviction();
+ } else {
+ // This is an old file, but we may want more reports from this user so
+ // lets save some create_time.
+ Time::Exploded old = {0};
+ old.year = 2009;
+ old.month = 3;
+ old.day_of_month = 1;
+ header_->create_time = Time::FromLocalExploded(old).ToInternalValue();
+ }
+ }
+}
+
+Rankings::List Eviction::GetListForEntry(EntryImpl* entry) {
+ return Rankings::NO_USE;
+}
+
+bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty,
+ Rankings::List list) {
+ EntryImpl* entry = backend_->GetEnumeratedEntry(node, list);
+ if (!entry) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ return false;
+ }
+
+ ReportTrimTimes(entry);
+ if (empty || !new_eviction_) {
+ entry->DoomImpl();
+ } else {
+ entry->DeleteEntryData(false);
+ EntryStore* info = entry->entry()->Data();
+ DCHECK_EQ(ENTRY_NORMAL, info->state);
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry), true);
+ info->state = ENTRY_EVICTED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+ }
+ if (!empty)
+ backend_->OnEvent(Stats::TRIM_ENTRY);
+
+ entry->Release();
+
+ return true;
+}
+
+// -----------------------------------------------------------------------
+
+void Eviction::TrimCacheV2(bool empty) {
+ Trace("*** Trim Cache ***");
+ trimming_ = true;
+ TimeTicks start = TimeTicks::Now();
+
+ const int kListsToSearch = 3;
+ Rankings::ScopedRankingsBlock next[kListsToSearch];
+ int list = Rankings::LAST_ELEMENT;
+
+ // Get a node from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ bool done = false;
+ next[i].set_rankings(rankings_);
+ if (done)
+ continue;
+ next[i].reset(rankings_->GetPrev(NULL, static_cast<Rankings::List>(i)));
+ if (!empty && NodeIsOldEnough(next[i].get(), i)) {
+ list = static_cast<Rankings::List>(i);
+ done = true;
+ }
+ }
+
+ // If we are not meeting the time targets lets move on to list length.
+ if (!empty && Rankings::LAST_ELEMENT == list)
+ list = SelectListByLength(next);
+
+ if (empty)
+ list = 0;
+
+ Rankings::ScopedRankingsBlock node(rankings_);
+ int deleted_entries = 0;
+ int target_size = empty ? 0 : max_size_;
+
+ for (; list < kListsToSearch; list++) {
+ while ((header_->num_bytes > target_size || test_mode_) &&
+ next[list].get()) {
+ // The iterator could be invalidated within EvictEntry().
+ if (!next[list]->HasData())
+ break;
+ node.reset(next[list].release());
+ next[list].reset(rankings_->GetPrev(node.get(),
+ static_cast<Rankings::List>(list)));
+ if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
+ // This entry is not being used by anybody.
+ // Do NOT use node as an iterator after this point.
+ rankings_->TrackRankingsBlock(node.get(), false);
+ if (EvictEntry(node.get(), empty, static_cast<Rankings::List>(list)))
+ deleted_entries++;
+
+ if (!empty && test_mode_)
+ break;
+ }
+ if (!empty && (deleted_entries > 20 ||
+ (TimeTicks::Now() - start).InMilliseconds() > 20)) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimCache, ptr_factory_.GetWeakPtr(), false));
+ break;
+ }
+ }
+ if (!empty)
+ list = kListsToSearch;
+ }
+
+ if (empty) {
+ TrimDeleted(true);
+ } else if (ShouldTrimDeleted()) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimDeleted, ptr_factory_.GetWeakPtr(), empty));
+ }
+
+ if (empty) {
+ CACHE_UMA(AGE_MS, "TotalClearTimeV2", 0, start);
+ } else {
+ CACHE_UMA(AGE_MS, "TotalTrimTimeV2", 0, start);
+ }
+ CACHE_UMA(COUNTS, "TrimItemsV2", 0, deleted_entries);
+
+ Trace("*** Trim Cache end ***");
+ trimming_ = false;
+ return;
+}
+
+void Eviction::UpdateRankV2(EntryImpl* entry, bool modified) {
+ rankings_->UpdateRank(entry->rankings(), modified, GetListForEntryV2(entry));
+}
+
+void Eviction::OnOpenEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK_EQ(ENTRY_NORMAL, info->state);
+
+ if (info->reuse_count < kint32max) {
+ info->reuse_count++;
+ entry->entry()->set_modified();
+
+ // We may need to move this to a new list.
+ if (1 == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::NO_USE, true);
+ rankings_->Insert(entry->rankings(), false, Rankings::LOW_USE);
+ entry->entry()->Store();
+ } else if (kHighUse == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::LOW_USE, true);
+ rankings_->Insert(entry->rankings(), false, Rankings::HIGH_USE);
+ entry->entry()->Store();
+ }
+ }
+}
+
+void Eviction::OnCreateEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ switch (info->state) {
+ case ENTRY_NORMAL: {
+ DCHECK(!info->reuse_count);
+ DCHECK(!info->refetch_count);
+ break;
+ };
+ case ENTRY_EVICTED: {
+ if (info->refetch_count < kint32max)
+ info->refetch_count++;
+
+ if (info->refetch_count > kHighUse && info->reuse_count < kHighUse) {
+ info->reuse_count = kHighUse;
+ } else {
+ info->reuse_count++;
+ }
+ info->state = ENTRY_NORMAL;
+ entry->entry()->Store();
+ rankings_->Remove(entry->rankings(), Rankings::DELETED, true);
+ break;
+ };
+ default:
+ NOTREACHED();
+ }
+
+ rankings_->Insert(entry->rankings(), true, GetListForEntryV2(entry));
+}
+
+void Eviction::OnDoomEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ if (ENTRY_NORMAL != info->state)
+ return;
+
+ if (entry->LeaveRankingsBehind()) {
+ info->state = ENTRY_DOOMED;
+ entry->entry()->Store();
+ return;
+ }
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry), true);
+
+ info->state = ENTRY_DOOMED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+}
+
+void Eviction::OnDestroyEntryV2(EntryImpl* entry) {
+ if (entry->LeaveRankingsBehind())
+ return;
+
+ rankings_->Remove(entry->rankings(), Rankings::DELETED, true);
+}
+
+Rankings::List Eviction::GetListForEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK_EQ(ENTRY_NORMAL, info->state);
+
+ if (!info->reuse_count)
+ return Rankings::NO_USE;
+
+ if (info->reuse_count < kHighUse)
+ return Rankings::LOW_USE;
+
+ return Rankings::HIGH_USE;
+}
+
+// This is a minimal implementation that just discards the oldest nodes.
+// TODO(rvargas): Do something better here.
+void Eviction::TrimDeleted(bool empty) {
+ Trace("*** Trim Deleted ***");
+ if (backend_->disabled_)
+ return;
+
+ TimeTicks start = TimeTicks::Now();
+ Rankings::ScopedRankingsBlock node(rankings_);
+ Rankings::ScopedRankingsBlock next(
+ rankings_, rankings_->GetPrev(node.get(), Rankings::DELETED));
+ int deleted_entries = 0;
+ while (next.get() &&
+ (empty || (deleted_entries < 20 &&
+ (TimeTicks::Now() - start).InMilliseconds() < 20))) {
+ node.reset(next.release());
+ next.reset(rankings_->GetPrev(node.get(), Rankings::DELETED));
+ if (RemoveDeletedNode(node.get()))
+ deleted_entries++;
+ if (test_mode_)
+ break;
+ }
+
+ if (deleted_entries && !empty && ShouldTrimDeleted()) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimDeleted, ptr_factory_.GetWeakPtr(), false));
+ }
+
+ CACHE_UMA(AGE_MS, "TotalTrimDeletedTime", 0, start);
+ CACHE_UMA(COUNTS, "TrimDeletedItems", 0, deleted_entries);
+ Trace("*** Trim Deleted end ***");
+ return;
+}
+
+bool Eviction::RemoveDeletedNode(CacheRankingsBlock* node) {
+ EntryImpl* entry = backend_->GetEnumeratedEntry(node, Rankings::DELETED);
+ if (!entry) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ return false;
+ }
+
+ bool doomed = (entry->entry()->Data()->state == ENTRY_DOOMED);
+ entry->entry()->Data()->state = ENTRY_DOOMED;
+ entry->DoomImpl();
+ entry->Release();
+ return !doomed;
+}
+
+bool Eviction::NodeIsOldEnough(CacheRankingsBlock* node, int list) {
+ if (!node)
+ return false;
+
+ // If possible, we want to keep entries on each list at least kTargetTime
+ // hours. Each successive list on the enumeration has 2x the target time of
+ // the previous list.
+ Time used = Time::FromInternalValue(node->Data()->last_used);
+ int multiplier = 1 << list;
+ return (Time::Now() - used).InHours() > kTargetTime * multiplier;
+}
+
+int Eviction::SelectListByLength(Rankings::ScopedRankingsBlock* next) {
+ int data_entries = header_->num_entries -
+ header_->lru.sizes[Rankings::DELETED];
+ // Start by having each list to be roughly the same size.
+ if (header_->lru.sizes[0] > data_entries / 3)
+ return 0;
+
+ int list = (header_->lru.sizes[1] > data_entries / 3) ? 1 : 2;
+
+ // Make sure that frequently used items are kept for a minimum time; we know
+ // that this entry is not older than its current target, but it must be at
+ // least older than the target for list 0 (kTargetTime), as long as we don't
+ // exhaust list 0.
+ if (!NodeIsOldEnough(next[list].get(), 0) &&
+ header_->lru.sizes[0] > data_entries / 10)
+ list = 0;
+
+ return list;
+}
+
+void Eviction::ReportListStats() {
+ if (!new_eviction_)
+ return;
+
+ Rankings::ScopedRankingsBlock last1(rankings_,
+ rankings_->GetPrev(NULL, Rankings::NO_USE));
+ Rankings::ScopedRankingsBlock last2(rankings_,
+ rankings_->GetPrev(NULL, Rankings::LOW_USE));
+ Rankings::ScopedRankingsBlock last3(rankings_,
+ rankings_->GetPrev(NULL, Rankings::HIGH_USE));
+ Rankings::ScopedRankingsBlock last4(rankings_,
+ rankings_->GetPrev(NULL, Rankings::DELETED));
+
+ if (last1.get())
+ CACHE_UMA(AGE, "NoUseAge", 0,
+ Time::FromInternalValue(last1.get()->Data()->last_used));
+ if (last2.get())
+ CACHE_UMA(AGE, "LowUseAge", 0,
+ Time::FromInternalValue(last2.get()->Data()->last_used));
+ if (last3.get())
+ CACHE_UMA(AGE, "HighUseAge", 0,
+ Time::FromInternalValue(last3.get()->Data()->last_used));
+ if (last4.get())
+ CACHE_UMA(AGE, "DeletedAge", 0,
+ Time::FromInternalValue(last4.get()->Data()->last_used));
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/eviction.h b/chromium/net/disk_cache/eviction.h
new file mode 100644
index 00000000000..f6224a936fd
--- /dev/null
+++ b/chromium/net/disk_cache/eviction.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_EVICTION_H_
+#define NET_DISK_CACHE_EVICTION_H_
+
+#include "base/basictypes.h"
+#include "base/memory/weak_ptr.h"
+#include "net/disk_cache/rankings.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class EntryImpl;
+struct IndexHeader;
+
+// This class implements the eviction algorithm for the cache and it is tightly
+// integrated with BackendImpl.
+class Eviction {
+ public:
+ Eviction();
+ ~Eviction();
+
+ void Init(BackendImpl* backend);
+ void Stop();
+
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(EntryImpl* entry, bool modified);
+
+ // Notifications of interesting events for a given entry.
+ void OnOpenEntry(EntryImpl* entry);
+ void OnCreateEntry(EntryImpl* entry);
+ void OnDoomEntry(EntryImpl* entry);
+ void OnDestroyEntry(EntryImpl* entry);
+
+ // Testing interface.
+ void SetTestMode();
+ void TrimDeletedList(bool empty);
+
+ private:
+ void PostDelayedTrim();
+ void DelayedTrim();
+ bool ShouldTrim();
+ bool ShouldTrimDeleted();
+ void ReportTrimTimes(EntryImpl* entry);
+ Rankings::List GetListForEntry(EntryImpl* entry);
+ bool EvictEntry(CacheRankingsBlock* node, bool empty, Rankings::List list);
+
+ // We'll just keep for a while a separate set of methods that implement the
+ // new eviction algorithm. This code will replace the original methods when
+ // finished.
+ void TrimCacheV2(bool empty);
+ void UpdateRankV2(EntryImpl* entry, bool modified);
+ void OnOpenEntryV2(EntryImpl* entry);
+ void OnCreateEntryV2(EntryImpl* entry);
+ void OnDoomEntryV2(EntryImpl* entry);
+ void OnDestroyEntryV2(EntryImpl* entry);
+ Rankings::List GetListForEntryV2(EntryImpl* entry);
+ void TrimDeleted(bool empty);
+ bool RemoveDeletedNode(CacheRankingsBlock* node);
+
+ bool NodeIsOldEnough(CacheRankingsBlock* node, int list);
+ int SelectListByLength(Rankings::ScopedRankingsBlock* next);
+ void ReportListStats();
+
+ BackendImpl* backend_;
+ Rankings* rankings_;
+ IndexHeader* header_;
+ int max_size_;
+ int trim_delays_;
+ int index_size_;
+ bool new_eviction_;
+ bool first_trim_;
+ bool trimming_;
+ bool delay_trim_;
+ bool init_;
+ bool test_mode_;
+ base::WeakPtrFactory<Eviction> ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(Eviction);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_EVICTION_H_
diff --git a/chromium/net/disk_cache/experiments.h b/chromium/net/disk_cache/experiments.h
new file mode 100644
index 00000000000..d7d4e58437b
--- /dev/null
+++ b/chromium/net/disk_cache/experiments.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_EXPERIMENTS_H_
+#define NET_DISK_CACHE_EXPERIMENTS_H_
+
+
+namespace disk_cache {
+
+// This lists the experiment groups that we care about. Only add new groups at
+// the end of the list, and always increase the number.
+enum {
+ NO_EXPERIMENT = 0,
+ EXPERIMENT_OLD_FILE1 = 3,
+ EXPERIMENT_OLD_FILE2 = 4,
+ EXPERIMENT_DELETED_LIST_OUT = 11,
+ EXPERIMENT_DELETED_LIST_CONTROL = 12,
+ EXPERIMENT_DELETED_LIST_IN = 13,
+ EXPERIMENT_DELETED_LIST_OUT2 = 14,
+ // There is no EXPERIMENT_SIMPLE_YES since this enum is used in the standard
+ // backend only.
+ EXPERIMENT_SIMPLE_CONTROL = 15,
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_EXPERIMENTS_H_
diff --git a/chromium/net/disk_cache/file.cc b/chromium/net/disk_cache/file.cc
new file mode 100644
index 00000000000..6b569518354
--- /dev/null
+++ b/chromium/net/disk_cache/file.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file.h"
+
+namespace disk_cache {
+
+// Cross platform constructors. Platform specific code is in
+// file_{win,posix}.cc.
+
+File::File() : init_(false), mixed_(false) {}
+
+File::File(bool mixed_mode) : init_(false), mixed_(mixed_mode) {}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/file.h b/chromium/net/disk_cache/file.h
new file mode 100644
index 00000000000..3038d884142
--- /dev/null
+++ b/chromium/net/disk_cache/file.h
@@ -0,0 +1,95 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_H_
+#define NET_DISK_CACHE_FILE_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/platform_file.h"
+#include "net/base/net_export.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace disk_cache {
+
+// This interface is used to support asynchronous ReadData and WriteData calls.
+class FileIOCallback {
+ public:
+ // Notified of the actual number of bytes read or written. This value is
+ // negative if an error occurred.
+ virtual void OnFileIOComplete(int bytes_copied) = 0;
+
+ protected:
+ virtual ~FileIOCallback() {}
+};
+
+// Simple wrapper around a file that allows asynchronous operations.
+class NET_EXPORT_PRIVATE File : public base::RefCounted<File> {
+ friend class base::RefCounted<File>;
+ public:
+ File();
+ // mixed_mode set to true enables regular synchronous operations for the file.
+ explicit File(bool mixed_mode);
+
+ // Initializes the object to use the passed in file instead of opening it with
+ // the Init() call. No asynchronous operations can be performed with this
+ // object.
+ explicit File(base::PlatformFile file);
+
+ // Initializes the object to point to a given file. The file must aready exist
+ // on disk, and allow shared read and write.
+ bool Init(const base::FilePath& name);
+
+ // Returns the handle or file descriptor.
+ base::PlatformFile platform_file() const;
+
+ // Returns true if the file was opened properly.
+ bool IsValid() const;
+
+ // Performs synchronous IO.
+ bool Read(void* buffer, size_t buffer_len, size_t offset);
+ bool Write(const void* buffer, size_t buffer_len, size_t offset);
+
+ // Performs asynchronous IO. callback will be called when the IO completes,
+ // as an APC on the thread that queued the operation.
+ bool Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+ bool Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+
+ // Sets the file's length. The file is truncated or extended with zeros to
+ // the new length.
+ bool SetLength(size_t length);
+ size_t GetLength();
+
+ // Blocks until |num_pending_io| IO operations complete.
+ static void WaitForPendingIO(int* num_pending_io);
+
+ // Drops current pending operations without waiting for them to complete.
+ static void DropPendingIO();
+
+ protected:
+ virtual ~File();
+
+ // Performs the actual asynchronous write. If notify is set and there is no
+ // callback, the call will be re-synchronized.
+ bool AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+
+ private:
+ bool init_;
+ bool mixed_;
+ base::PlatformFile platform_file_; // Regular, asynchronous IO handle.
+ base::PlatformFile sync_platform_file_; // Synchronous IO handle.
+
+ DISALLOW_COPY_AND_ASSIGN(File);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_H_
diff --git a/chromium/net/disk_cache/file_block.h b/chromium/net/disk_cache/file_block.h
new file mode 100644
index 00000000000..25709207df6
--- /dev/null
+++ b/chromium/net/disk_cache/file_block.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_BLOCK_H__
+#define NET_DISK_CACHE_FILE_BLOCK_H__
+
+namespace disk_cache {
+
+// This interface exposes common functionality for a single block of data
+// stored on a file-block, regardless of the real type or size of the block.
+// Used to simplify loading / storing the block from disk.
+class FileBlock {
+ public:
+ virtual ~FileBlock() {}
+
+ // Returns a pointer to the actual data.
+ virtual void* buffer() const = 0;
+
+ // Returns the size of the block;
+ virtual size_t size() const = 0;
+
+ // Returns the file offset of this block.
+ virtual int offset() const = 0;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_BLOCK_H__
diff --git a/chromium/net/disk_cache/file_lock.cc b/chromium/net/disk_cache/file_lock.cc
new file mode 100644
index 00000000000..3d1cfa52d8b
--- /dev/null
+++ b/chromium/net/disk_cache/file_lock.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file_lock.h"
+
+#include "base/atomicops.h"
+
+namespace {
+
+void Barrier() {
+#if !defined(COMPILER_MSVC)
+ // VS uses memory barrier semantics for volatiles.
+ base::subtle::MemoryBarrier();
+#endif
+}
+
+} // namespace
+
+namespace disk_cache {
+
+FileLock::FileLock(BlockFileHeader* header) {
+ updating_ = &header->updating;
+ (*updating_)++;
+ Barrier();
+ acquired_ = true;
+}
+
+FileLock::~FileLock() {
+ Unlock();
+}
+
+void FileLock::Lock() {
+ if (acquired_)
+ return;
+ (*updating_)++;
+ Barrier();
+}
+
+void FileLock::Unlock() {
+ if (!acquired_)
+ return;
+ Barrier();
+ (*updating_)--;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/file_lock.h b/chromium/net/disk_cache/file_lock.h
new file mode 100644
index 00000000000..7fcf75df05a
--- /dev/null
+++ b/chromium/net/disk_cache/file_lock.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_LOCK_H_
+#define NET_DISK_CACHE_FILE_LOCK_H_
+
+#include "net/base/net_export.h"
+#include "net/disk_cache/disk_format_base.h"
+
+namespace disk_cache {
+
+// This class implements a file lock that lives on the header of a memory mapped
+// file. This is NOT a thread related lock, it is a lock to detect corruption
+// of the file when the process crashes in the middle of an update.
+// The lock is acquired on the constructor and released on the destructor.
+// The typical use of the class is:
+// {
+// BlockFileHeader* header = GetFileHeader();
+// FileLock lock(header);
+// header->max_entries = num_entries;
+// // At this point the destructor is going to release the lock.
+// }
+// It is important to perform Lock() and Unlock() operations in the right order,
+// because otherwise the desired effect of the "lock" will not be achieved. If
+// the operations are inlined / optimized, the "locked" operations can happen
+// outside the lock.
+class NET_EXPORT_PRIVATE FileLock {
+ public:
+ explicit FileLock(BlockFileHeader* header);
+ virtual ~FileLock();
+
+ // Virtual to make sure the compiler never inlines the calls.
+ virtual void Lock();
+ virtual void Unlock();
+ private:
+ bool acquired_;
+ volatile int32* updating_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_LOCK_H_
diff --git a/chromium/net/disk_cache/file_posix.cc b/chromium/net/disk_cache/file_posix.cc
new file mode 100644
index 00000000000..2ad3db916e3
--- /dev/null
+++ b/chromium/net/disk_cache/file_posix.cc
@@ -0,0 +1,309 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file.h"
+
+#include <fcntl.h>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/threading/worker_pool.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/in_flight_io.h"
+
+namespace {
+
+// This class represents a single asynchronous IO operation while it is being
+// bounced between threads.
+class FileBackgroundIO : public disk_cache::BackgroundIO {
+ public:
+ // Other than the actual parameters for the IO operation (including the
+ // |callback| that must be notified at the end), we need the controller that
+ // is keeping track of all operations. When done, we notify the controller
+ // (we do NOT invoke the callback), in the worker thead that completed the
+ // operation.
+ FileBackgroundIO(disk_cache::File* file, const void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback,
+ disk_cache::InFlightIO* controller)
+ : disk_cache::BackgroundIO(controller), callback_(callback), file_(file),
+ buf_(buf), buf_len_(buf_len), offset_(offset) {
+ }
+
+ disk_cache::FileIOCallback* callback() {
+ return callback_;
+ }
+
+ disk_cache::File* file() {
+ return file_;
+ }
+
+ // Read and Write are the operations that can be performed asynchronously.
+ // The actual parameters for the operation are setup in the constructor of
+ // the object. Both methods should be called from a worker thread, by posting
+ // a task to the WorkerPool (they are RunnableMethods). When finished,
+ // controller->OnIOComplete() is called.
+ void Read();
+ void Write();
+
+ private:
+ virtual ~FileBackgroundIO() {}
+
+ disk_cache::FileIOCallback* callback_;
+
+ disk_cache::File* file_;
+ const void* buf_;
+ size_t buf_len_;
+ size_t offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileBackgroundIO);
+};
+
+
+// The specialized controller that keeps track of current operations.
+class FileInFlightIO : public disk_cache::InFlightIO {
+ public:
+ FileInFlightIO() {}
+ virtual ~FileInFlightIO() {}
+
+ // These methods start an asynchronous operation. The arguments have the same
+ // semantics of the File asynchronous operations, with the exception that the
+ // operation never finishes synchronously.
+ void PostRead(disk_cache::File* file, void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback);
+ void PostWrite(disk_cache::File* file, const void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback);
+
+ protected:
+ // Invokes the users' completion callback at the end of the IO operation.
+ // |cancel| is true if the actual task posted to the thread is still
+ // queued (because we are inside WaitForPendingIO), and false if said task is
+ // the one performing the call.
+ virtual void OnOperationComplete(disk_cache::BackgroundIO* operation,
+ bool cancel) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FileInFlightIO);
+};
+
+// ---------------------------------------------------------------------------
+
+// Runs on a worker thread.
+void FileBackgroundIO::Read() {
+ if (file_->Read(const_cast<void*>(buf_), buf_len_, offset_)) {
+ result_ = static_cast<int>(buf_len_);
+ } else {
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ }
+ NotifyController();
+}
+
+// Runs on a worker thread.
+void FileBackgroundIO::Write() {
+ bool rv = file_->Write(buf_, buf_len_, offset_);
+
+ result_ = rv ? static_cast<int>(buf_len_) : net::ERR_CACHE_WRITE_FAILURE;
+ NotifyController();
+}
+
+// ---------------------------------------------------------------------------
+
+void FileInFlightIO::PostRead(disk_cache::File *file, void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback *callback) {
+ scoped_refptr<FileBackgroundIO> operation(
+ new FileBackgroundIO(file, buf, buf_len, offset, callback, this));
+ file->AddRef(); // Balanced on OnOperationComplete()
+
+ base::WorkerPool::PostTask(FROM_HERE,
+ base::Bind(&FileBackgroundIO::Read, operation.get()), true);
+ OnOperationPosted(operation.get());
+}
+
+void FileInFlightIO::PostWrite(disk_cache::File* file, const void* buf,
+ size_t buf_len, size_t offset,
+ disk_cache::FileIOCallback* callback) {
+ scoped_refptr<FileBackgroundIO> operation(
+ new FileBackgroundIO(file, buf, buf_len, offset, callback, this));
+ file->AddRef(); // Balanced on OnOperationComplete()
+
+ base::WorkerPool::PostTask(FROM_HERE,
+ base::Bind(&FileBackgroundIO::Write, operation.get()), true);
+ OnOperationPosted(operation.get());
+}
+
+// Runs on the IO thread.
+void FileInFlightIO::OnOperationComplete(disk_cache::BackgroundIO* operation,
+ bool cancel) {
+ FileBackgroundIO* op = static_cast<FileBackgroundIO*>(operation);
+
+ disk_cache::FileIOCallback* callback = op->callback();
+ int bytes = operation->result();
+
+ // Release the references acquired in PostRead / PostWrite.
+ op->file()->Release();
+ callback->OnFileIOComplete(bytes);
+}
+
+// A static object tha will broker all async operations.
+FileInFlightIO* s_file_operations = NULL;
+
+// Returns the current FileInFlightIO.
+FileInFlightIO* GetFileInFlightIO() {
+ if (!s_file_operations) {
+ s_file_operations = new FileInFlightIO;
+ }
+ return s_file_operations;
+}
+
+// Deletes the current FileInFlightIO.
+void DeleteFileInFlightIO() {
+ DCHECK(s_file_operations);
+ delete s_file_operations;
+ s_file_operations = NULL;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+File::File(base::PlatformFile file)
+ : init_(true),
+ mixed_(true),
+ platform_file_(file),
+ sync_platform_file_(base::kInvalidPlatformFileValue) {
+}
+
+bool File::Init(const base::FilePath& name) {
+ if (init_)
+ return false;
+
+ int flags = base::PLATFORM_FILE_OPEN |
+ base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE;
+ platform_file_ = base::CreatePlatformFile(name, flags, NULL, NULL);
+ if (platform_file_ < 0) {
+ platform_file_ = 0;
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+base::PlatformFile File::platform_file() const {
+ return platform_file_;
+}
+
+bool File::IsValid() const {
+ if (!init_)
+ return false;
+ return (base::kInvalidPlatformFileValue != platform_file_);
+}
+
+bool File::Read(void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > static_cast<size_t>(kint32max) ||
+ offset > static_cast<size_t>(kint32max))
+ return false;
+
+ int ret = base::ReadPlatformFile(platform_file_, offset,
+ static_cast<char*>(buffer), buffer_len);
+ return (static_cast<size_t>(ret) == buffer_len);
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > static_cast<size_t>(kint32max) ||
+ offset > static_cast<size_t>(kint32max))
+ return false;
+
+ int ret = base::WritePlatformFile(platform_file_, offset,
+ static_cast<const char*>(buffer),
+ buffer_len);
+ return (static_cast<size_t>(ret) == buffer_len);
+}
+
+// We have to increase the ref counter of the file before performing the IO to
+// prevent the completion to happen with an invalid handle (if the file is
+// closed while the IO is in flight).
+bool File::Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Read(buffer, buffer_len, offset);
+ }
+
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ GetFileInFlightIO()->PostRead(this, buffer, buffer_len, offset, callback);
+
+ *completed = false;
+ return true;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Write(buffer, buffer_len, offset);
+ }
+
+ return AsyncWrite(buffer, buffer_len, offset, callback, completed);
+}
+
+bool File::SetLength(size_t length) {
+ DCHECK(init_);
+ if (length > ULONG_MAX)
+ return false;
+
+ return base::TruncatePlatformFile(platform_file_, length);
+}
+
+size_t File::GetLength() {
+ DCHECK(init_);
+ off_t ret = lseek(platform_file_, 0, SEEK_END);
+ if (ret < 0)
+ return 0;
+ return ret;
+}
+
+// Static.
+void File::WaitForPendingIO(int* num_pending_io) {
+ // We may be running unit tests so we should allow be able to reset the
+ // message loop.
+ GetFileInFlightIO()->WaitForPendingIO();
+ DeleteFileInFlightIO();
+}
+
+// Static.
+void File::DropPendingIO() {
+ GetFileInFlightIO()->DropPendingIO();
+ DeleteFileInFlightIO();
+}
+
+File::~File() {
+ if (IsValid())
+ base::ClosePlatformFile(platform_file_);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ GetFileInFlightIO()->PostWrite(this, buffer, buffer_len, offset, callback);
+
+ if (completed)
+ *completed = false;
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/file_win.cc b/chromium/net/disk_cache/file_win.cc
new file mode 100644
index 00000000000..f284b501045
--- /dev/null
+++ b/chromium/net/disk_cache/file_win.cc
@@ -0,0 +1,275 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file.h"
+
+#include "base/files/file_path.h"
+#include "base/lazy_instance.h"
+#include "base/message_loop/message_loop.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace {
+
+// Structure used for asynchronous operations.
+struct MyOverlapped {
+ MyOverlapped(disk_cache::File* file, size_t offset,
+ disk_cache::FileIOCallback* callback);
+ ~MyOverlapped() {}
+ OVERLAPPED* overlapped() {
+ return &context_.overlapped;
+ }
+
+ base::MessageLoopForIO::IOContext context_;
+ scoped_refptr<disk_cache::File> file_;
+ disk_cache::FileIOCallback* callback_;
+};
+
+COMPILE_ASSERT(!offsetof(MyOverlapped, context_), starts_with_overlapped);
+
+// Helper class to handle the IO completion notifications from the message loop.
+class CompletionHandler : public base::MessageLoopForIO::IOHandler {
+ virtual void OnIOCompleted(base::MessageLoopForIO::IOContext* context,
+ DWORD actual_bytes,
+ DWORD error);
+};
+
+static base::LazyInstance<CompletionHandler> g_completion_handler =
+ LAZY_INSTANCE_INITIALIZER;
+
+void CompletionHandler::OnIOCompleted(
+ base::MessageLoopForIO::IOContext* context,
+ DWORD actual_bytes,
+ DWORD error) {
+ MyOverlapped* data = reinterpret_cast<MyOverlapped*>(context);
+
+ if (error) {
+ DCHECK(!actual_bytes);
+ actual_bytes = static_cast<DWORD>(net::ERR_CACHE_READ_FAILURE);
+ NOTREACHED();
+ }
+
+ if (data->callback_)
+ data->callback_->OnFileIOComplete(static_cast<int>(actual_bytes));
+
+ delete data;
+}
+
+MyOverlapped::MyOverlapped(disk_cache::File* file, size_t offset,
+ disk_cache::FileIOCallback* callback) {
+ memset(this, 0, sizeof(*this));
+ context_.handler = g_completion_handler.Pointer();
+ context_.overlapped.Offset = static_cast<DWORD>(offset);
+ file_ = file;
+ callback_ = callback;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+File::File(base::PlatformFile file)
+ : init_(true), mixed_(true), platform_file_(INVALID_HANDLE_VALUE),
+ sync_platform_file_(file) {
+}
+
+bool File::Init(const base::FilePath& name) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+ DWORD access = GENERIC_READ | GENERIC_WRITE | DELETE;
+ platform_file_ = CreateFile(name.value().c_str(), access, sharing, NULL,
+ OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL);
+
+ if (INVALID_HANDLE_VALUE == platform_file_)
+ return false;
+
+ base::MessageLoopForIO::current()->RegisterIOHandler(
+ platform_file_, g_completion_handler.Pointer());
+
+ init_ = true;
+ sync_platform_file_ = CreateFile(name.value().c_str(), access, sharing, NULL,
+ OPEN_EXISTING, 0, NULL);
+
+ if (INVALID_HANDLE_VALUE == sync_platform_file_)
+ return false;
+
+ return true;
+}
+
+File::~File() {
+ if (!init_)
+ return;
+
+ if (INVALID_HANDLE_VALUE != platform_file_)
+ CloseHandle(platform_file_);
+ if (INVALID_HANDLE_VALUE != sync_platform_file_)
+ CloseHandle(sync_platform_file_);
+}
+
+base::PlatformFile File::platform_file() const {
+ DCHECK(init_);
+ return (INVALID_HANDLE_VALUE == platform_file_) ? sync_platform_file_ :
+ platform_file_;
+}
+
+bool File::IsValid() const {
+ if (!init_)
+ return false;
+ return (INVALID_HANDLE_VALUE != platform_file_ ||
+ INVALID_HANDLE_VALUE != sync_platform_file_);
+}
+
+bool File::Read(void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > LONG_MAX)
+ return false;
+
+ DWORD ret = SetFilePointer(sync_platform_file_, static_cast<LONG>(offset),
+ NULL, FILE_BEGIN);
+ if (INVALID_SET_FILE_POINTER == ret)
+ return false;
+
+ DWORD actual;
+ DWORD size = static_cast<DWORD>(buffer_len);
+ if (!ReadFile(sync_platform_file_, buffer, size, &actual, NULL))
+ return false;
+ return actual == size;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ DWORD ret = SetFilePointer(sync_platform_file_, static_cast<LONG>(offset),
+ NULL, FILE_BEGIN);
+ if (INVALID_SET_FILE_POINTER == ret)
+ return false;
+
+ DWORD actual;
+ DWORD size = static_cast<DWORD>(buffer_len);
+ if (!WriteFile(sync_platform_file_, buffer, size, &actual, NULL))
+ return false;
+ return actual == size;
+}
+
+// We have to increase the ref counter of the file before performing the IO to
+// prevent the completion to happen with an invalid handle (if the file is
+// closed while the IO is in flight).
+bool File::Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Read(buffer, buffer_len, offset);
+ }
+
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ MyOverlapped* data = new MyOverlapped(this, offset, callback);
+ DWORD size = static_cast<DWORD>(buffer_len);
+
+ DWORD actual;
+ if (!ReadFile(platform_file_, buffer, size, &actual, data->overlapped())) {
+ *completed = false;
+ if (GetLastError() == ERROR_IO_PENDING)
+ return true;
+ delete data;
+ return false;
+ }
+
+ // The operation completed already. We'll be called back anyway.
+ *completed = (actual == size);
+ DCHECK_EQ(size, actual);
+ data->callback_ = NULL;
+ data->file_ = NULL; // There is no reason to hold on to this anymore.
+ return *completed;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Write(buffer, buffer_len, offset);
+ }
+
+ return AsyncWrite(buffer, buffer_len, offset, callback, completed);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ DCHECK(callback);
+ DCHECK(completed);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ MyOverlapped* data = new MyOverlapped(this, offset, callback);
+ DWORD size = static_cast<DWORD>(buffer_len);
+
+ DWORD actual;
+ if (!WriteFile(platform_file_, buffer, size, &actual, data->overlapped())) {
+ *completed = false;
+ if (GetLastError() == ERROR_IO_PENDING)
+ return true;
+ delete data;
+ return false;
+ }
+
+ // The operation completed already. We'll be called back anyway.
+ *completed = (actual == size);
+ DCHECK_EQ(size, actual);
+ data->callback_ = NULL;
+ data->file_ = NULL; // There is no reason to hold on to this anymore.
+ return *completed;
+}
+
+bool File::SetLength(size_t length) {
+ DCHECK(init_);
+ if (length > ULONG_MAX)
+ return false;
+
+ DWORD size = static_cast<DWORD>(length);
+ HANDLE file = platform_file();
+ if (INVALID_SET_FILE_POINTER == SetFilePointer(file, size, NULL, FILE_BEGIN))
+ return false;
+
+ return TRUE == SetEndOfFile(file);
+}
+
+size_t File::GetLength() {
+ DCHECK(init_);
+ LARGE_INTEGER size;
+ HANDLE file = platform_file();
+ if (!GetFileSizeEx(file, &size))
+ return 0;
+ if (size.HighPart)
+ return ULONG_MAX;
+
+ return static_cast<size_t>(size.LowPart);
+}
+
+// Static.
+void File::WaitForPendingIO(int* num_pending_io) {
+ while (*num_pending_io) {
+ // Asynchronous IO operations may be in flight and the completion may end
+ // up calling us back so let's wait for them.
+ base::MessageLoopForIO::IOHandler* handler = g_completion_handler.Pointer();
+ base::MessageLoopForIO::current()->WaitForIOCompletion(100, handler);
+ }
+}
+
+// Static.
+void File::DropPendingIO() {
+ // Nothing to do here.
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/flash_cache_test_base.cc b/chromium/net/disk_cache/flash/flash_cache_test_base.cc
new file mode 100644
index 00000000000..164eb338970
--- /dev/null
+++ b/chromium/net/disk_cache/flash/flash_cache_test_base.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/flash/flash_cache_test_base.h"
+
+#include "base/files/file_path.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/time/time.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_store.h"
+#include "net/disk_cache/flash/storage.h"
+
+FlashCacheTest::FlashCacheTest() {
+ int seed = static_cast<int>(base::Time::Now().ToInternalValue());
+ srand(seed);
+}
+
+FlashCacheTest::~FlashCacheTest() {
+}
+
+void FlashCacheTest::SetUp() {
+ const base::FilePath::StringType kCachePath = FILE_PATH_LITERAL("cache");
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ path_ = temp_dir_.path().Append(kCachePath);
+}
+
+void FlashCacheTest::TearDown() {
+}
diff --git a/chromium/net/disk_cache/flash/flash_cache_test_base.h b/chromium/net/disk_cache/flash/flash_cache_test_base.h
new file mode 100644
index 00000000000..eb082276171
--- /dev/null
+++ b/chromium/net/disk_cache/flash/flash_cache_test_base.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_FLASH_TEST_BASE_H_
+#define NET_DISK_CACHE_DISK_CACHE_FLASH_TEST_BASE_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/disk_cache/flash/format.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int32 kNumTestSegments = 10;
+const int32 kStorageSize = kNumTestSegments * disk_cache::kFlashSegmentSize;
+
+} // namespace
+
+namespace disk_cache {
+
+class LogStore;
+
+} // namespace disk_cache
+
+class FlashCacheTest : public testing::Test {
+ protected:
+ FlashCacheTest();
+ virtual ~FlashCacheTest();
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ base::ScopedTempDir temp_dir_;
+ base::FilePath path_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FlashCacheTest);
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_FLASH_TEST_BASE_H_
diff --git a/chromium/net/disk_cache/flash/flash_entry_impl.cc b/chromium/net/disk_cache/flash/flash_entry_impl.cc
new file mode 100644
index 00000000000..a0e785cae10
--- /dev/null
+++ b/chromium/net/disk_cache/flash/flash_entry_impl.cc
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/task_runner_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/flash/flash_entry_impl.h"
+#include "net/disk_cache/flash/internal_entry.h"
+
+namespace disk_cache {
+
+FlashEntryImpl::FlashEntryImpl(const std::string& key,
+ LogStore* store,
+ base::MessageLoopProxy* cache_thread)
+ : init_(false),
+ key_(key),
+ new_internal_entry_(new InternalEntry(key, store)),
+ cache_thread_(cache_thread) {
+ memset(stream_sizes_, 0, sizeof(stream_sizes_));
+}
+
+FlashEntryImpl::FlashEntryImpl(int32 id,
+ LogStore* store,
+ base::MessageLoopProxy* cache_thread)
+ : init_(false),
+ old_internal_entry_(new InternalEntry(id, store)),
+ cache_thread_(cache_thread) {
+}
+
+int FlashEntryImpl::Init(const CompletionCallback& callback) {
+ if (new_internal_entry_.get()) {
+ DCHECK(callback.is_null());
+ init_ = true;
+ return net::OK;
+ }
+ DCHECK(!callback.is_null() && old_internal_entry_.get());
+ callback_ = callback;
+ PostTaskAndReplyWithResult(cache_thread_.get(),
+ FROM_HERE,
+ Bind(&InternalEntry::Init, old_internal_entry_),
+ Bind(&FlashEntryImpl::OnInitComplete, this));
+ return net::ERR_IO_PENDING;
+}
+
+void FlashEntryImpl::Doom() {
+ DCHECK(init_);
+ NOTREACHED();
+}
+
+void FlashEntryImpl::Close() {
+ DCHECK(init_);
+ Release();
+}
+
+std::string FlashEntryImpl::GetKey() const {
+ DCHECK(init_);
+ return key_;
+}
+
+base::Time FlashEntryImpl::GetLastUsed() const {
+ DCHECK(init_);
+ NOTREACHED();
+ return base::Time::Now();
+}
+
+base::Time FlashEntryImpl::GetLastModified() const {
+ DCHECK(init_);
+ NOTREACHED();
+ return base::Time::Now();
+}
+
+int32 FlashEntryImpl::GetDataSize(int index) const {
+ DCHECK(init_);
+ return new_internal_entry_->GetDataSize(index);
+}
+
+int FlashEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(init_);
+ return new_internal_entry_->ReadData(index, offset, buf, buf_len, callback);
+}
+
+int FlashEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ DCHECK(init_);
+ return new_internal_entry_->WriteData(index, offset, buf, buf_len, callback);
+}
+
+int FlashEntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(init_);
+ NOTREACHED();
+ return net::ERR_FAILED;
+}
+
+int FlashEntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(init_);
+ NOTREACHED();
+ return net::ERR_FAILED;
+}
+
+int FlashEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ DCHECK(init_);
+ NOTREACHED();
+ return net::ERR_FAILED;
+}
+
+bool FlashEntryImpl::CouldBeSparse() const {
+ DCHECK(init_);
+ NOTREACHED();
+ return false;
+}
+
+void FlashEntryImpl::CancelSparseIO() {
+ DCHECK(init_);
+ NOTREACHED();
+}
+
+int FlashEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
+ DCHECK(init_);
+ NOTREACHED();
+ return net::ERR_FAILED;
+}
+
+void FlashEntryImpl::OnInitComplete(
+ scoped_ptr<KeyAndStreamSizes> key_and_stream_sizes) {
+ DCHECK(!callback_.is_null());
+ if (!key_and_stream_sizes) {
+ callback_.Run(net::ERR_FAILED);
+ } else {
+ key_ = key_and_stream_sizes->key;
+ memcpy(stream_sizes_, key_and_stream_sizes->stream_sizes,
+ sizeof(stream_sizes_));
+ init_ = true;
+ callback_.Run(net::OK);
+ }
+}
+
+FlashEntryImpl::~FlashEntryImpl() {
+ cache_thread_->PostTask(FROM_HERE,
+ Bind(&InternalEntry::Close, new_internal_entry_));
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/flash_entry_impl.h b/chromium/net/disk_cache/flash/flash_entry_impl.h
new file mode 100644
index 00000000000..32f489f899d
--- /dev/null
+++ b/chromium/net/disk_cache/flash/flash_entry_impl.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_FLASH_ENTRY_IMPL_H_
+
+#include <string>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/flash/internal_entry.h"
+
+namespace base {
+
+class MessageLoopProxy;
+
+} // namespace base
+
+namespace disk_cache {
+
+class InternalEntry;
+class IOBuffer;
+class LogStore;
+
+// We use split objects to minimize the context switches between the main thread
+// and the cache thread in the most common case of creating a new entry.
+//
+// All calls on a new entry are served synchronously. When an object is
+// destructed (via final Close() call), a message is posted to the cache thread
+// to save the object to storage.
+//
+// When an entry is not new, every asynchronous call is posted to the cache
+// thread, just as before; synchronous calls like GetKey() and GetDataSize() are
+// served from the main thread.
+class NET_EXPORT_PRIVATE FlashEntryImpl
+ : public Entry,
+ public base::RefCountedThreadSafe<FlashEntryImpl> {
+ friend class base::RefCountedThreadSafe<FlashEntryImpl>;
+ public:
+ FlashEntryImpl(const std::string& key,
+ LogStore* store,
+ base::MessageLoopProxy* cache_thread);
+ FlashEntryImpl(int32 id,
+ LogStore* store,
+ base::MessageLoopProxy* cache_thread);
+
+ int Init(const CompletionCallback& callback);
+
+ // disk_cache::Entry interface.
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE;
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ void OnInitComplete(scoped_ptr<KeyAndStreamSizes> key_and_stream_sizes);
+ virtual ~FlashEntryImpl();
+
+ bool init_;
+ std::string key_;
+ int stream_sizes_[kFlashLogStoreEntryNumStreams];
+
+ // Used if |this| is an newly created entry.
+ scoped_refptr<InternalEntry> new_internal_entry_;
+
+ // Used if |this| is an existing entry.
+ scoped_refptr<InternalEntry> old_internal_entry_;
+
+ // Copy of the callback for asynchronous calls on |old_internal_entry_|.
+ CompletionCallback callback_;
+
+ scoped_refptr<base::MessageLoopProxy> cache_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(FlashEntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_ENTRY_IMPL_H_
diff --git a/chromium/net/disk_cache/flash/format.h b/chromium/net/disk_cache/flash/format.h
new file mode 100644
index 00000000000..872d96b34a2
--- /dev/null
+++ b/chromium/net/disk_cache/flash/format.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_FORMAT_H_
+#define NET_DISK_CACHE_FLASH_FORMAT_H_
+
+namespace disk_cache {
+
+// Storage constants.
+const int32 kFlashPageSize = 8 * 1024;
+const int32 kFlashBlockSize = 512 * kFlashPageSize;
+
+// Segment constants.
+const int32 kFlashSegmentSize = 4 * 1024 * 1024;
+const int32 kFlashSmallEntrySize = 4 * 1024;
+const size_t kFlashMaxEntryCount = kFlashSegmentSize / kFlashSmallEntrySize - 1;
+
+// Segment summary consists of a fixed region at the end of the segment
+// containing a counter specifying the number of saved offsets followed by the
+// offsets.
+const int32 kFlashSummarySize = (1 + kFlashMaxEntryCount) * sizeof(int32);
+const int32 kFlashSegmentFreeSpace = kFlashSegmentSize - kFlashSummarySize;
+
+// An entry consists of a fixed number of streams.
+const int32 kFlashLogStoreEntryNumStreams = 4;
+const int32 kFlashLogStoreEntryHeaderSize =
+ kFlashLogStoreEntryNumStreams * sizeof(int32);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_FORMAT_H_
diff --git a/chromium/net/disk_cache/flash/internal_entry.cc b/chromium/net/disk_cache/flash/internal_entry.cc
new file mode 100644
index 00000000000..c6bae8926aa
--- /dev/null
+++ b/chromium/net/disk_cache/flash/internal_entry.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/flash/internal_entry.h"
+
+#include "base/memory/ref_counted.h"
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/flash/log_store.h"
+#include "net/disk_cache/flash/log_store_entry.h"
+
+using net::IOBuffer;
+using net::StringIOBuffer;
+using net::CompletionCallback;
+
+namespace disk_cache {
+
+KeyAndStreamSizes::KeyAndStreamSizes() {
+}
+
+InternalEntry::InternalEntry(const std::string& key, LogStore* store)
+ : store_(store),
+ entry_(new LogStoreEntry(store_)) {
+ entry_->Init();
+ WriteKey(entry_.get(), key);
+}
+
+InternalEntry::InternalEntry(int32 id, LogStore* store)
+ : store_(store),
+ entry_(new LogStoreEntry(store_, id)) {
+}
+
+InternalEntry::~InternalEntry() {
+}
+
+scoped_ptr<KeyAndStreamSizes> InternalEntry::Init() {
+ scoped_ptr<KeyAndStreamSizes> null;
+ if (entry_->IsNew())
+ return null.Pass();
+ if (!entry_->Init())
+ return null.Pass();
+
+ scoped_ptr<KeyAndStreamSizes> rv(new KeyAndStreamSizes);
+ if (!ReadKey(entry_.get(), &rv->key))
+ return null.Pass();
+ for (int i = 0; i < kFlashLogStoreEntryNumStreams; ++i)
+ rv->stream_sizes[i] = entry_->GetDataSize(i+1);
+ return rv.Pass();
+}
+
+int32 InternalEntry::GetDataSize(int index) const {
+ return entry_->GetDataSize(++index);
+}
+
+int InternalEntry::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ return entry_->ReadData(++index, offset, buf, buf_len);
+}
+
+int InternalEntry::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ return entry_->WriteData(++index, offset, buf, buf_len);
+}
+
+void InternalEntry::Close() {
+ entry_->Close();
+}
+
+bool InternalEntry::WriteKey(LogStoreEntry* entry, const std::string& key) {
+ int key_size = static_cast<int>(key.size());
+ scoped_refptr<IOBuffer> key_buf(new StringIOBuffer(key));
+ return entry->WriteData(0, 0, key_buf.get(), key_size) == key_size;
+}
+
+bool InternalEntry::ReadKey(LogStoreEntry* entry, std::string* key) {
+ int key_size = entry->GetDataSize(0);
+ scoped_refptr<net::IOBuffer> key_buf(new net::IOBuffer(key_size));
+ if (entry->ReadData(0, 0, key_buf.get(), key_size) != key_size)
+ return false;
+ key->assign(key_buf->data(), key_size);
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/internal_entry.h b/chromium/net/disk_cache/flash/internal_entry.h
new file mode 100644
index 00000000000..eeb2793f4c1
--- /dev/null
+++ b/chromium/net/disk_cache/flash/internal_entry.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_INTERNAL_ENTRY_H_
+#define NET_DISK_CACHE_FLASH_INTERNAL_ENTRY_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/completion_callback.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/flash/format.h"
+
+namespace net {
+
+class IOBuffer;
+
+} // namespace net
+
+namespace disk_cache {
+
+struct KeyAndStreamSizes {
+ KeyAndStreamSizes();
+ std::string key;
+ int stream_sizes[kFlashLogStoreEntryNumStreams];
+};
+
+class LogStore;
+class LogStoreEntry;
+
+// Actual entry implementation that does all the work of reading, writing and
+// storing data.
+class NET_EXPORT_PRIVATE InternalEntry
+ : public base::RefCountedThreadSafe<InternalEntry> {
+ friend class base::RefCountedThreadSafe<InternalEntry>;
+ public:
+ InternalEntry(const std::string& key, LogStore* store);
+ InternalEntry(int32 id, LogStore* store);
+
+ scoped_ptr<KeyAndStreamSizes> Init();
+ int32 GetDataSize(int index) const;
+ int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ const net::CompletionCallback& callback);
+ int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ const net::CompletionCallback& callback);
+ void Close();
+
+ private:
+ bool WriteKey(LogStoreEntry* entry, const std::string& key);
+ bool ReadKey(LogStoreEntry* entry, std::string* key);
+ ~InternalEntry();
+
+ LogStore* store_;
+ scoped_ptr<LogStoreEntry> entry_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternalEntry);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_INTERNAL_ENTRY_H_
diff --git a/chromium/net/disk_cache/flash/log_store.cc b/chromium/net/disk_cache/flash/log_store.cc
new file mode 100644
index 00000000000..a2a82827027
--- /dev/null
+++ b/chromium/net/disk_cache/flash/log_store.cc
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/stl_util.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_store.h"
+#include "net/disk_cache/flash/segment.h"
+#include "net/disk_cache/flash/storage.h"
+
+namespace disk_cache {
+
+LogStore::LogStore(const base::FilePath& path, int32 size)
+ : storage_(path, size),
+ num_segments_(size / kFlashSegmentSize),
+ open_segments_(num_segments_),
+ write_index_(0),
+ current_entry_id_(-1),
+ current_entry_num_bytes_left_to_write_(0),
+ init_(false),
+ closed_(false) {
+ DCHECK(size % kFlashSegmentSize == 0);
+}
+
+LogStore::~LogStore() {
+ DCHECK(!init_ || closed_);
+ STLDeleteElements(&open_segments_);
+}
+
+bool LogStore::Init() {
+ DCHECK(!init_);
+ if (!storage_.Init())
+ return false;
+
+ // TODO(agayev): Once we start persisting segment metadata to disk, we will
+ // start from where we left off during the last shutdown.
+ scoped_ptr<Segment> segment(new Segment(write_index_, false, &storage_));
+ if (!segment->Init())
+ return false;
+
+ segment->AddUser();
+ open_segments_[write_index_] = segment.release();
+ init_ = true;
+ return true;
+}
+
+bool LogStore::Close() {
+ DCHECK(init_ && !closed_);
+ open_segments_[write_index_]->ReleaseUser();
+ if (!open_segments_[write_index_]->Close())
+ return false;
+ closed_ = true;
+ return true;
+ // TODO(agayev): persist metadata to disk.
+}
+
+bool LogStore::CreateEntry(int32 size, int32* id) {
+ DCHECK(init_ && !closed_);
+ DCHECK(current_entry_id_ == -1 && size <= disk_cache::kFlashSegmentFreeSpace);
+
+ // TODO(agayev): Avoid large entries from leaving the segments almost empty.
+ if (!open_segments_[write_index_]->CanHold(size)) {
+ if (!open_segments_[write_index_]->Close())
+ return false;
+
+ open_segments_[write_index_]->ReleaseUser();
+ if (open_segments_[write_index_]->HasNoUsers()) {
+ delete open_segments_[write_index_];
+ open_segments_[write_index_] = NULL;
+ }
+
+ write_index_ = GetNextSegmentIndex();
+ scoped_ptr<Segment> segment(new Segment(write_index_, false, &storage_));
+ if (!segment->Init())
+ return false;
+
+ segment->AddUser();
+ open_segments_[write_index_] = segment.release();
+ }
+
+ *id = open_segments_[write_index_]->write_offset();
+ open_segments_[write_index_]->StoreOffset(*id);
+ current_entry_id_ = *id;
+ current_entry_num_bytes_left_to_write_ = size;
+ open_entries_.insert(current_entry_id_);
+ return true;
+}
+
+void LogStore::DeleteEntry(int32 id, int32 size) {
+ DCHECK(init_ && !closed_);
+ DCHECK(open_entries_.find(id) == open_entries_.end());
+ // TODO(agayev): Increment the number of dead bytes in the segment metadata
+ // for the segment identified by |index|.
+}
+
+bool LogStore::WriteData(const void* buffer, int32 size) {
+ DCHECK(init_ && !closed_);
+ DCHECK(current_entry_id_ != -1 &&
+ size <= current_entry_num_bytes_left_to_write_);
+ if (open_segments_[write_index_]->WriteData(buffer, size)) {
+ current_entry_num_bytes_left_to_write_ -= size;
+ return true;
+ }
+ return false;
+}
+
+bool LogStore::OpenEntry(int32 id) {
+ DCHECK(init_ && !closed_);
+ if (open_entries_.find(id) != open_entries_.end())
+ return false;
+
+ // Segment is already open.
+ int32 index = id / disk_cache::kFlashSegmentSize;
+ if (open_segments_[index]) {
+ if (!open_segments_[index]->HaveOffset(id))
+ return false;
+ open_segments_[index]->AddUser();
+ open_entries_.insert(id);
+ return true;
+ }
+
+ // Segment is not open.
+ scoped_ptr<Segment> segment(new Segment(index, true, &storage_));
+ if (!segment->Init() || !segment->HaveOffset(id))
+ return false;
+
+ segment->AddUser();
+ open_segments_[index] = segment.release();
+ open_entries_.insert(id);
+ return true;
+}
+
+bool LogStore::ReadData(int32 id, void* buffer, int32 size,
+ int32 offset) const {
+ DCHECK(init_ && !closed_);
+ DCHECK(open_entries_.find(id) != open_entries_.end());
+
+ int32 index = id / disk_cache::kFlashSegmentSize;
+ DCHECK(open_segments_[index] && open_segments_[index]->HaveOffset(id));
+ return open_segments_[index]->ReadData(buffer, size, id + offset);
+}
+
+void LogStore::CloseEntry(int32 id) {
+ DCHECK(init_ && !closed_);
+ std::set<int32>::iterator entry_iter = open_entries_.find(id);
+ DCHECK(entry_iter != open_entries_.end());
+
+ if (current_entry_id_ != -1) {
+ DCHECK(id == current_entry_id_ && !current_entry_num_bytes_left_to_write_);
+ open_entries_.erase(entry_iter);
+ current_entry_id_ = -1;
+ return;
+ }
+
+ int32 index = id / disk_cache::kFlashSegmentSize;
+ DCHECK(open_segments_[index]);
+ open_entries_.erase(entry_iter);
+
+ open_segments_[index]->ReleaseUser();
+ if (open_segments_[index]->HasNoUsers()) {
+ delete open_segments_[index];
+ open_segments_[index] = NULL;
+ }
+}
+
+int32 LogStore::GetNextSegmentIndex() {
+ DCHECK(init_ && !closed_);
+ int32 next_index = (write_index_ + 1) % num_segments_;
+
+ while (InUse(next_index)) {
+ next_index = (next_index + 1) % num_segments_;
+ DCHECK_NE(next_index, write_index_);
+ }
+ return next_index;
+}
+
+bool LogStore::InUse(int32 index) const {
+ DCHECK(init_ && !closed_);
+ DCHECK(index >= 0 && index < num_segments_);
+ return open_segments_[index] != NULL;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/log_store.h b/chromium/net/disk_cache/flash/log_store.h
new file mode 100644
index 00000000000..e53b83e1625
--- /dev/null
+++ b/chromium/net/disk_cache/flash/log_store.h
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_LOG_STORE_H_
+#define NET_DISK_CACHE_FLASH_LOG_STORE_H_
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/flash/storage.h"
+
+namespace disk_cache {
+
+class Segment;
+
+// This class implements a general purpose store for storing and retrieving
+// entries consisting of arbitrary binary data. The store has log semantics,
+// i.e. it's not possible to overwrite data in place. In order to update an
+// entry, a new version must be written. Only one entry can be written to at
+// any given time, while concurrent reading of multiple entries is supported.
+class NET_EXPORT_PRIVATE LogStore {
+ public:
+ LogStore(const base::FilePath& path, int32 size);
+ ~LogStore();
+
+ // Performs initialization. Must be the first function called and further
+ // calls should be made only if it is successful.
+ bool Init();
+
+ // Closes the store. Should be the last function called before destruction.
+ bool Close();
+
+ // Creates an entry of |size| bytes. The id of the created entry is stored in
+ // |entry_id|.
+ bool CreateEntry(int32 size, int32* entry_id);
+
+ // Deletes |entry_id|; the client should keep track of |size| and provide it
+ // here. Only inactive (i.e. not currently open or being created) entries can
+ // be deleted.
+ void DeleteEntry(int32 entry_id, int32 size);
+
+ // Appends data to the end of the last created entry.
+ bool WriteData(const void* buffer, int32 size);
+
+ // Opens an entry with id |entry_id|.
+ bool OpenEntry(int32 entry_id);
+
+ // Reads |size| bytes starting from |offset| into |buffer|, where |offset| is
+ // relative to the entry's content, from an entry identified by |entry_id|.
+ bool ReadData(int32 entry_id, void* buffer, int32 size, int32 offset) const;
+
+ // Closes an entry that was either opened with OpenEntry or created with
+ // CreateEntry.
+ void CloseEntry(int32 id);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest, LogStoreReadFromClosedSegment);
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest, LogStoreSegmentSelectionIsFifo);
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest, LogStoreInUseSegmentIsSkipped);
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest, LogStoreReadFromCurrentAfterClose);
+
+ int32 GetNextSegmentIndex();
+ bool InUse(int32 segment_index) const;
+
+ Storage storage_;
+
+ int32 num_segments_;
+
+ // Currently open segments, either for reading or writing. There can only be
+ // one segment open for writing, and multiple open for reading.
+ std::vector<Segment*> open_segments_;
+
+ // The index of the segment currently being written to. It's an index to
+ // |open_segments_| vector.
+ int32 write_index_;
+
+ // Ids of entries currently open, either CreatEntry'ed or OpenEntry'ed.
+ std::set<int32> open_entries_;
+
+ // Id of the entry that is currently being written to, -1 if there is no entry
+ // currently being written to.
+ int32 current_entry_id_;
+
+ // Number of bytes left to be written to the entry identified by
+ // |current_entry_id_|. Its value makes sense iff |current_entry_id_| is not
+ // -1.
+ int32 current_entry_num_bytes_left_to_write_;
+
+ bool init_; // Init was called.
+ bool closed_; // Close was called.
+
+ DISALLOW_COPY_AND_ASSIGN(LogStore);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_LOG_STORE_H_
diff --git a/chromium/net/disk_cache/flash/log_store_entry.cc b/chromium/net/disk_cache/flash/log_store_entry.cc
new file mode 100644
index 00000000000..1e26ec54461
--- /dev/null
+++ b/chromium/net/disk_cache/flash/log_store_entry.cc
@@ -0,0 +1,171 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_store.h"
+#include "net/disk_cache/flash/log_store_entry.h"
+
+namespace disk_cache {
+
+LogStoreEntry::LogStoreEntry(LogStore* store)
+ : store_(store), id_(-1), init_(false), closed_(false), deleted_(false) {
+ DCHECK(store);
+}
+
+LogStoreEntry::LogStoreEntry(LogStore* store, int32 id)
+ : store_(store), id_(id), init_(false), closed_(false), deleted_(false) {
+ DCHECK(store);
+}
+
+LogStoreEntry::~LogStoreEntry() {
+ DCHECK(!init_ || closed_);
+}
+
+bool LogStoreEntry::Init() {
+ DCHECK(!init_);
+ if (IsNew()) {
+ init_ = true;
+ return true;
+ }
+
+ int32 stream_sizes[kFlashLogStoreEntryNumStreams];
+ COMPILE_ASSERT(sizeof(stream_sizes) == kFlashLogStoreEntryHeaderSize,
+ invalid_log_store_entry_header_size);
+
+ if (!store_->OpenEntry(id_) ||
+ !store_->ReadData(id_, stream_sizes, kFlashLogStoreEntryHeaderSize, 0)) {
+ return false;
+ }
+ for (int i = 0, offset = kFlashLogStoreEntryHeaderSize;
+ i < kFlashLogStoreEntryNumStreams; ++i) {
+ streams_[i].offset = offset;
+ streams_[i].size = stream_sizes[i];
+ offset += stream_sizes[i];
+ }
+ init_ = true;
+ return true;
+}
+
+bool LogStoreEntry::Close() {
+ DCHECK(init_ && !closed_);
+
+ if (IsNew()) {
+ closed_ = deleted_ ? true : Save();
+ } else {
+ store_->CloseEntry(id_);
+ if (deleted_)
+ store_->DeleteEntry(id_, Size());
+ closed_ = true;
+ }
+ return closed_;
+}
+
+int32 LogStoreEntry::id() const {
+ DCHECK(init_);
+ return id_;
+}
+
+int32 LogStoreEntry::GetDataSize(int index) const {
+ DCHECK(init_);
+ return InvalidStream(index) ? 0 : streams_[index].size;
+}
+
+int LogStoreEntry::ReadData(int index, int offset, net::IOBuffer* buf,
+ int buf_len) {
+ DCHECK(init_);
+ if (InvalidStream(index))
+ return net::ERR_INVALID_ARGUMENT;
+
+ int stream_size = streams_[index].size;
+ if (offset >= stream_size || offset < 0 || buf_len == 0)
+ return 0;
+ if (offset + buf_len > stream_size)
+ buf_len = stream_size - offset;
+
+ if (!IsNew()) {
+ offset += streams_[index].offset;
+ if (store_->ReadData(id_, buf->data(), buf_len, offset))
+ return buf_len;
+ return net::ERR_FAILED;
+ }
+ memcpy(buf->data(), &streams_[index].write_buffer[offset], buf_len);
+ return buf_len;
+}
+
+int LogStoreEntry::WriteData(int index, int offset, net::IOBuffer* buf,
+ int buf_len) {
+ DCHECK(init_ && !closed_);
+ if (InvalidStream(index))
+ return net::ERR_INVALID_ARGUMENT;
+
+ DCHECK(offset >= 0 && buf_len >= 0);
+ Stream& stream = streams_[index];
+ size_t new_size = static_cast<size_t>(offset + buf_len);
+ if (new_size) {
+ // TODO(agayev): Currently, only append and overwrite is supported. Add
+ // support for arbitrary writes.
+ DCHECK(!offset || offset == stream.size);
+ if (stream.write_buffer.size() < new_size)
+ stream.write_buffer.resize(new_size);
+ memcpy(&streams_[index].write_buffer[offset], buf->data(), buf_len);
+ }
+ stream.size = new_size;
+ return buf_len;
+}
+
+void LogStoreEntry::Delete() {
+ DCHECK(init_ && !closed_);
+ deleted_ = true;
+}
+
+bool LogStoreEntry::IsNew() const {
+ return id_ == -1;
+}
+
+bool LogStoreEntry::InvalidStream(int stream_index) const {
+ return stream_index < 0 || stream_index >= kFlashLogStoreEntryNumStreams;
+}
+
+int32 LogStoreEntry::Size() const {
+ DCHECK(init_);
+ int32 size = kFlashLogStoreEntryHeaderSize;
+ for (int i = 0; i < kFlashLogStoreEntryNumStreams; ++i)
+ size += streams_[i].size;
+ DCHECK(size > 0 && size <= kFlashSegmentFreeSpace);
+ return size;
+}
+
+bool LogStoreEntry::Save() {
+ DCHECK(init_ && !closed_ && !deleted_ && IsNew());
+ int32 stream_sizes[kFlashLogStoreEntryNumStreams];
+ COMPILE_ASSERT(sizeof(stream_sizes) == kFlashLogStoreEntryHeaderSize,
+ invalid_log_store_entry_header_size);
+
+ for (int i = 0; i < kFlashLogStoreEntryNumStreams; ++i)
+ stream_sizes[i] = streams_[i].size;
+
+ if (!store_->CreateEntry(Size(), &id_))
+ return false;
+ if (!store_->WriteData(stream_sizes, kFlashLogStoreEntryHeaderSize))
+ return false;
+ for (int i = 0; i < kFlashLogStoreEntryNumStreams; ++i) {
+ if (streams_[i].size > 0 &&
+ !store_->WriteData(&streams_[i].write_buffer[0], streams_[i].size)) {
+ return false;
+ }
+ }
+ store_->CloseEntry(id_);
+ return true;
+}
+
+LogStoreEntry::Stream::Stream() : offset(0), size(0) {
+}
+
+LogStoreEntry::Stream::~Stream() {
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/log_store_entry.h b/chromium/net/disk_cache/flash/log_store_entry.h
new file mode 100644
index 00000000000..579194913af
--- /dev/null
+++ b/chromium/net/disk_cache/flash/log_store_entry.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_LOG_STORE_ENTRY_H_
+#define NET_DISK_CACHE_FLASH_LOG_STORE_ENTRY_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/flash/format.h"
+
+namespace net {
+class IOBuffer;
+};
+
+namespace disk_cache {
+
+class LogStore;
+
+class NET_EXPORT_PRIVATE LogStoreEntry {
+ public:
+ explicit LogStoreEntry(LogStore* store);
+ LogStoreEntry(LogStore* store, int32 id);
+ ~LogStoreEntry();
+
+ bool Init();
+ bool Close();
+
+ int32 id() const;
+ bool IsNew() const;
+ int32 GetDataSize(int index) const;
+
+ int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len);
+ int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len);
+ void Delete();
+
+ private:
+ struct Stream {
+ Stream();
+ ~Stream();
+ int offset;
+ int size;
+ std::vector<char> write_buffer;
+ };
+
+ bool InvalidStream(int stream_index) const;
+ int32 Size() const;
+ bool Save();
+
+ LogStore* store_;
+ int32 id_;
+ Stream streams_[kFlashLogStoreEntryNumStreams];
+ bool init_;
+ bool closed_;
+ bool deleted_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogStoreEntry);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_LOG_STORE_ENTRY_H_
diff --git a/chromium/net/disk_cache/flash/log_store_entry_unittest.cc b/chromium/net/disk_cache/flash/log_store_entry_unittest.cc
new file mode 100644
index 00000000000..100c9a83ea0
--- /dev/null
+++ b/chromium/net/disk_cache/flash/log_store_entry_unittest.cc
@@ -0,0 +1,69 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "net/base/io_buffer.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/flash/flash_cache_test_base.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_store.h"
+#include "net/disk_cache/flash/log_store_entry.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using disk_cache::LogStoreEntry;
+
+// Tests the behavior of a LogStoreEntry with empty streams.
+TEST_F(FlashCacheTest, LogStoreEntryEmpty) {
+ disk_cache::LogStore log_store(path_, kStorageSize);
+ ASSERT_TRUE(log_store.Init());
+
+ scoped_ptr<LogStoreEntry> entry(new LogStoreEntry(&log_store));
+ EXPECT_TRUE(entry->Init());
+ EXPECT_TRUE(entry->Close());
+
+ entry.reset(new LogStoreEntry(&log_store, entry->id()));
+ EXPECT_TRUE(entry->Init());
+
+ for (int i = 0; i < disk_cache::kFlashLogStoreEntryNumStreams; ++i) {
+ const int kSize = 1024;
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
+ EXPECT_EQ(0, entry->GetDataSize(i));
+ EXPECT_EQ(0, entry->ReadData(i, 0, buf.get(), kSize));
+ }
+ EXPECT_TRUE(entry->Close());
+ ASSERT_TRUE(log_store.Close());
+}
+
+TEST_F(FlashCacheTest, LogStoreEntryWriteRead) {
+ disk_cache::LogStore log_store(path_, kStorageSize);
+ ASSERT_TRUE(log_store.Init());
+
+ scoped_ptr<LogStoreEntry> entry(new LogStoreEntry(&log_store));
+ EXPECT_TRUE(entry->Init());
+
+ int sizes[disk_cache::kFlashLogStoreEntryNumStreams] = {333, 444, 555, 666};
+ scoped_refptr<net::IOBuffer> buffers[
+ disk_cache::kFlashLogStoreEntryNumStreams];
+
+ for (int i = 0; i < disk_cache::kFlashLogStoreEntryNumStreams; ++i) {
+ buffers[i] = new net::IOBuffer(sizes[i]);
+ CacheTestFillBuffer(buffers[i]->data(), sizes[i], false);
+ EXPECT_EQ(sizes[i], entry->WriteData(i, 0, buffers[i].get(), sizes[i]));
+ }
+ EXPECT_TRUE(entry->Close());
+
+ int32 id = entry->id();
+ entry.reset(new LogStoreEntry(&log_store, id));
+ EXPECT_TRUE(entry->Init());
+
+ for (int i = 0; i < disk_cache::kFlashLogStoreEntryNumStreams; ++i) {
+ EXPECT_EQ(sizes[i], entry->GetDataSize(i));
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(sizes[i]));
+ EXPECT_EQ(sizes[i], entry->ReadData(i, 0, buffer.get(), sizes[i]));
+ EXPECT_EQ(0, memcmp(buffers[i]->data(), buffer->data(), sizes[i]));
+ }
+ EXPECT_TRUE(entry->Close());
+ EXPECT_EQ(id, entry->id());
+ ASSERT_TRUE(log_store.Close());
+}
diff --git a/chromium/net/disk_cache/flash/log_store_unittest.cc b/chromium/net/disk_cache/flash/log_store_unittest.cc
new file mode 100644
index 00000000000..2678316d499
--- /dev/null
+++ b/chromium/net/disk_cache/flash/log_store_unittest.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/flash/flash_cache_test_base.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_store.h"
+#include "net/disk_cache/flash/segment.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+TEST_F(FlashCacheTest, LogStoreCreateEntry) {
+ LogStore log_store(path_, kStorageSize);
+ EXPECT_TRUE(log_store.Init());
+
+ const int32 kSize = 100;
+ const std::string buf(kSize, 0);
+
+ int32 id;
+ EXPECT_TRUE(log_store.CreateEntry(kSize, &id));
+ EXPECT_TRUE(log_store.WriteData(buf.data(), kSize/2));
+ EXPECT_TRUE(log_store.WriteData(buf.data(), kSize/2));
+ log_store.CloseEntry(id);
+
+ EXPECT_TRUE(log_store.Close());
+}
+
+// Also tests reading from current segment.
+TEST_F(FlashCacheTest, LogStoreOpenEntry) {
+ LogStore log_store(path_, kStorageSize);
+ EXPECT_TRUE(log_store.Init());
+
+ const int32 kSize = 100;
+ const std::vector<char> expected(kSize, 'b');
+
+ int32 id;
+ EXPECT_TRUE(log_store.CreateEntry(kSize, &id));
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize));
+ log_store.CloseEntry(id);
+
+ EXPECT_TRUE(log_store.OpenEntry(id));
+ std::vector<char> actual(kSize, 0);
+ EXPECT_TRUE(log_store.ReadData(id, &actual[0], kSize, 0));
+ log_store.CloseEntry(id);
+
+ EXPECT_EQ(expected, actual);
+ EXPECT_TRUE(log_store.Close());
+}
+
+// Also tests that writing advances segments.
+TEST_F(FlashCacheTest, LogStoreReadFromClosedSegment) {
+ LogStore log_store(path_, kStorageSize);
+ EXPECT_TRUE(log_store.Init());
+
+ const int32 kSize = disk_cache::kFlashSegmentFreeSpace;
+ const std::vector<char> expected(kSize, 'a');
+
+ // First two entries go to segment 0.
+ int32 id1;
+ EXPECT_EQ(0, log_store.write_index_);
+ EXPECT_TRUE(log_store.CreateEntry(kSize/2, &id1));
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize/2));
+ log_store.CloseEntry(id1);
+
+ int32 id2;
+ EXPECT_EQ(0, log_store.write_index_);
+ EXPECT_TRUE(log_store.CreateEntry(kSize/2, &id2));
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize/2));
+ log_store.CloseEntry(id2);
+
+ // This entry goes to segment 1.
+ int32 id3;
+ EXPECT_TRUE(log_store.CreateEntry(kSize, &id3));
+ EXPECT_EQ(1, log_store.write_index_);
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize));
+ log_store.CloseEntry(id3);
+
+ // We read from segment 0.
+ EXPECT_TRUE(log_store.OpenEntry(id1));
+ std::vector<char> actual(kSize, 0);
+ EXPECT_TRUE(log_store.ReadData(id1, &actual[0], kSize, id1));
+ log_store.CloseEntry(id1);
+
+ EXPECT_EQ(expected, actual);
+ EXPECT_TRUE(log_store.Close());
+}
+
+TEST_F(FlashCacheTest, LogStoreReadFromCurrentAfterClose) {
+ LogStore log_store(path_, kStorageSize);
+ EXPECT_TRUE(log_store.Init());
+
+ const int32 kSize = disk_cache::kFlashSegmentFreeSpace;
+ const std::vector<char> expected(kSize, 'a');
+
+ int32 id1;
+ EXPECT_EQ(0, log_store.write_index_);
+ EXPECT_TRUE(log_store.CreateEntry(kSize/2, &id1));
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize/2));
+ log_store.CloseEntry(id1);
+
+ // Create a reference to above entry.
+ EXPECT_TRUE(log_store.OpenEntry(id1));
+
+ // This entry fills the first segment.
+ int32 id2;
+ EXPECT_EQ(0, log_store.write_index_);
+ EXPECT_TRUE(log_store.CreateEntry(kSize/2, &id2));
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize/2));
+ log_store.CloseEntry(id2);
+
+ // Creating this entry forces closing of the first segment.
+ int32 id3;
+ EXPECT_TRUE(log_store.CreateEntry(kSize, &id3));
+ EXPECT_EQ(1, log_store.write_index_);
+ EXPECT_TRUE(log_store.WriteData(&expected[0], kSize));
+ log_store.CloseEntry(id3);
+
+ // Now attempt to read from the closed segment.
+ std::vector<char> actual(kSize, 0);
+ EXPECT_TRUE(log_store.ReadData(id1, &actual[0], kSize, id1));
+ log_store.CloseEntry(id1);
+
+ EXPECT_EQ(expected, actual);
+ EXPECT_TRUE(log_store.Close());
+}
+
+// TODO(agayev): Add a test that confirms that in-use segment is not selected as
+// the next write segment.
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/segment.cc b/chromium/net/disk_cache/flash/segment.cc
new file mode 100644
index 00000000000..3457497a22f
--- /dev/null
+++ b/chromium/net/disk_cache/flash/segment.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/segment.h"
+#include "net/disk_cache/flash/storage.h"
+
+namespace disk_cache {
+
+Segment::Segment(int32 index, bool read_only, Storage* storage)
+ : index_(index),
+ num_users_(0),
+ read_only_(read_only),
+ init_(false),
+ storage_(storage),
+ offset_(index * kFlashSegmentSize),
+ summary_offset_(offset_ + kFlashSegmentSize - kFlashSummarySize),
+ write_offset_(offset_) {
+ DCHECK(storage);
+ DCHECK(storage->size() % kFlashSegmentSize == 0);
+}
+
+Segment::~Segment() {
+ DCHECK(!init_ || read_only_);
+ if (num_users_ != 0)
+ LOG(WARNING) << "Users exist, but we don't care? " << num_users_;
+}
+
+bool Segment::HaveOffset(int32 offset) const {
+ DCHECK(init_);
+ return std::binary_search(offsets_.begin(), offsets_.end(), offset);
+}
+
+void Segment::AddUser() {
+ DCHECK(init_);
+ ++num_users_;
+}
+
+void Segment::ReleaseUser() {
+ DCHECK(init_);
+ --num_users_;
+}
+
+bool Segment::HasNoUsers() const {
+ DCHECK(init_);
+ return num_users_ == 0;
+}
+
+bool Segment::Init() {
+ DCHECK(!init_);
+
+ if (offset_ < 0 || offset_ + kFlashSegmentSize > storage_->size())
+ return false;
+
+ if (!read_only_) {
+ init_ = true;
+ return true;
+ }
+
+ int32 summary[kFlashMaxEntryCount + 1];
+ if (!storage_->Read(summary, kFlashSummarySize, summary_offset_))
+ return false;
+
+ size_t entry_count = summary[0];
+ DCHECK_LE(entry_count, kFlashMaxEntryCount);
+
+ std::vector<int32> tmp(summary + 1, summary + 1 + entry_count);
+ offsets_.swap(tmp);
+ init_ = true;
+ return true;
+}
+
+bool Segment::WriteData(const void* buffer, int32 size) {
+ DCHECK(init_ && !read_only_);
+ DCHECK(write_offset_ + size <= summary_offset_);
+ if (!storage_->Write(buffer, size, write_offset_))
+ return false;
+ write_offset_ += size;
+ return true;
+}
+
+void Segment::StoreOffset(int32 offset) {
+ DCHECK(init_ && !read_only_);
+ DCHECK(offsets_.size() < kFlashMaxEntryCount);
+ offsets_.push_back(offset);
+}
+
+bool Segment::ReadData(void* buffer, int32 size, int32 offset) const {
+ DCHECK(init_);
+ DCHECK(offset >= offset_ && offset + size <= offset_ + kFlashSegmentSize);
+ return storage_->Read(buffer, size, offset);
+}
+
+bool Segment::Close() {
+ DCHECK(init_);
+ if (read_only_)
+ return true;
+
+ DCHECK(offsets_.size() <= kFlashMaxEntryCount);
+
+ int32 summary[kFlashMaxEntryCount + 1];
+ memset(summary, 0, kFlashSummarySize);
+ summary[0] = offsets_.size();
+ std::copy(offsets_.begin(), offsets_.end(), summary + 1);
+ if (!storage_->Write(summary, kFlashSummarySize, summary_offset_))
+ return false;
+
+ read_only_ = true;
+ return true;
+}
+
+bool Segment::CanHold(int32 size) const {
+ DCHECK(init_);
+ return offsets_.size() < kFlashMaxEntryCount &&
+ write_offset_ + size <= summary_offset_;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/segment.h b/chromium/net/disk_cache/flash/segment.h
new file mode 100644
index 00000000000..97551e2531d
--- /dev/null
+++ b/chromium/net/disk_cache/flash/segment.h
@@ -0,0 +1,118 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_SEGMENT_H_
+#define NET_DISK_CACHE_FLASH_SEGMENT_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+
+class Storage;
+
+// The underlying storage represented by Storage class, is divided into fixed
+// size logical segments, represented by this class. Since segment size is
+// fixed, the storage size should be a multiple of segment size. The picture
+// below describes the relation between storage and segments:
+//
+// |-----------+-----------+-----+-------------+-----------|
+// | segment 0 | segment 1 | ... | segment n-1 | segment n |
+// |-----------+-----------+-----+-------------+-----------|
+//
+// |-------------------------------------------------------|
+// | storage |
+// |-------------------------------------------------------|
+//
+// A segment is constructed by taking its index within the storage, a flag
+// indicating whether it is a read-only segment and a pointer to the storage on
+// which it resides. It provides an API for reading/writing entries residing on
+// it. Init() function must be called right after the construction of a segment
+// and one should proceed to calling other functions only if Init() has
+// succeeded. After a successful initialization, one may proceed to call
+// non-mutating functions; mutating functions can be called if the segment is
+// not read-only. Finally, Close() function must be called right before the
+// destruction. Calling Close() makes the segment immutable, which means
+// mutating functions cannot be called on the object after that.
+//
+// Segment can only be used as a log, i.e. all writes are laid out sequentially
+// on a segment. As a result, WriteData() function does not take an offset.
+// Current write offset can be learned by calling write_offset().
+//
+// Once the entries are written to the Segment and Close() called on it and the
+// object destroyed, we should later be able to instantiate a read-only Segment
+// object and recreate all the entries that were previously written to it. To
+// achieve this, a tiny region of Segment is used for its metadata and Segment
+// provides two calls for interacting with metadata: StoreOffset() and
+// GetOffsets(). The former can be used to store an offset that was returned by
+// write_offset() and the latter can be used to retrieve all the offsets that
+// were stored in the Segment. Before attempting to write an entry, the client
+// should call CanHold() to make sure that there is enough space in the segment.
+//
+// ReadData can be called over the range that was previously written with
+// WriteData. Reading from area that was not written will fail.
+
+class NET_EXPORT_PRIVATE Segment {
+ public:
+ // |index| is the index of this segment on |storage|. If the storage size is
+ // X and the segment size is Y, where X >> Y and X % Y == 0, then the valid
+ // values for the index are integers within the range [0, X/Y). Thus, if
+ // |index| is given value Z, then it covers bytes on storage starting at the
+ // offset Z*Y and ending at the offset Z*Y+Y-1.
+ Segment(int32 index, bool read_only, Storage* storage);
+ ~Segment();
+
+ int32 index() const { return index_; }
+ int32 write_offset() const { return write_offset_; }
+
+ bool HaveOffset(int32 offset) const;
+ std::vector<int32> GetOffsets() const { return offsets_; }
+
+ // Manage the number of users of this segment.
+ void AddUser();
+ void ReleaseUser();
+ bool HasNoUsers() const;
+
+ // Performs segment initialization. Must be the first function called on the
+ // segment and further calls should be made only if it is successful.
+ bool Init();
+
+ // Writes |size| bytes of data from |buffer| to segment, returns false if
+ // fails and true if succeeds. Can block for a long time.
+ bool WriteData(const void* buffer, int32 size);
+
+ // Reads |size| bytes of data living at |offset| into |buffer| returns true on
+ // success and false on failure.
+ bool ReadData(void* buffer, int32 size, int32 offset) const;
+
+ // Stores the offset in the metadata.
+ void StoreOffset(int32 offset);
+
+ // Closes the segment, returns true on success and false on failure. Closing
+ // a segment makes it immutable.
+ bool Close();
+
+ // Returns true if segment can accommodate an entry of |size| bytes.
+ bool CanHold(int32 size) const;
+
+ private:
+ int32 index_;
+ int32 num_users_;
+ bool read_only_; // Indicates whether the segment can be written to.
+ bool init_; // Indicates whether segment was initialized.
+ Storage* storage_; // Storage on which the segment resides.
+ const int32 offset_; // Offset of the segment on |storage_|.
+ const int32 summary_offset_; // Offset of the segment summary.
+ int32 write_offset_; // Current write offset.
+ std::vector<int32> offsets_;
+
+ DISALLOW_COPY_AND_ASSIGN(Segment);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_SEGMENT_H_
diff --git a/chromium/net/disk_cache/flash/segment_unittest.cc b/chromium/net/disk_cache/flash/segment_unittest.cc
new file mode 100644
index 00000000000..3f61701cbe0
--- /dev/null
+++ b/chromium/net/disk_cache/flash/segment_unittest.cc
@@ -0,0 +1,152 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/flash/segment.h"
+#include "net/disk_cache/flash/storage.h"
+#include "net/disk_cache/flash/flash_cache_test_base.h"
+#include "net/disk_cache/flash/format.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+template<int SIZE>
+struct Entry {
+ enum { size = SIZE };
+
+ Entry() { CacheTestFillBuffer(data, size, false); }
+
+ bool operator==(const Entry& rhs) const {
+ return std::equal(data, data + size, rhs.data);
+ }
+
+ char data[size];
+};
+
+const int32 kSmallEntrySize = 100;
+const int32 kLargeEntrySize = disk_cache::kFlashSegmentSize / 4;
+
+typedef Entry<kSmallEntrySize> SmallEntry;
+typedef Entry<kLargeEntrySize> LargeEntry;
+
+const int32 kSegmentFreeSpace = disk_cache::kFlashSegmentSize -
+ disk_cache::kFlashSummarySize;
+
+} // namespace
+
+TEST_F(FlashCacheTest, SegmentUserTracking) {
+ disk_cache::Storage storage(path_, kStorageSize);
+ ASSERT_TRUE(storage.Init());
+
+ scoped_ptr<disk_cache::Segment> segment(
+ new disk_cache::Segment(0, false, &storage));
+ EXPECT_TRUE(segment->Init());
+
+ EXPECT_TRUE(segment->HasNoUsers());
+ segment->AddUser();
+ segment->AddUser();
+ EXPECT_FALSE(segment->HasNoUsers());
+
+ segment->ReleaseUser();
+ EXPECT_FALSE(segment->HasNoUsers());
+ segment->ReleaseUser();
+ EXPECT_TRUE(segment->HasNoUsers());
+
+ EXPECT_TRUE(segment->Close());
+}
+
+TEST_F(FlashCacheTest, SegmentCreateDestroy) {
+ disk_cache::Storage storage(path_, kStorageSize);
+ ASSERT_TRUE(storage.Init());
+
+ int32 index = 0;
+ scoped_ptr<disk_cache::Segment> segment(
+ new disk_cache::Segment(index, false, &storage));
+ EXPECT_TRUE(segment->Init());
+ EXPECT_TRUE(segment->Close());
+
+ index = kNumTestSegments - 1;
+ segment.reset(new disk_cache::Segment(index, false, &storage));
+ EXPECT_TRUE(segment->Init());
+ EXPECT_TRUE(segment->Close());
+
+ int32 invalid_index = kNumTestSegments;
+ segment.reset(new disk_cache::Segment(invalid_index, false, &storage));
+ EXPECT_FALSE(segment->Init());
+
+ invalid_index = -1;
+ segment.reset(new disk_cache::Segment(invalid_index, false, &storage));
+ EXPECT_FALSE(segment->Init());
+}
+
+TEST_F(FlashCacheTest, SegmentWriteDataReadData) {
+ disk_cache::Storage storage(path_, kStorageSize);
+ ASSERT_TRUE(storage.Init());
+
+ int32 index = rand() % kNumTestSegments;
+ scoped_ptr<disk_cache::Segment> segment(
+ new disk_cache::Segment(index, false, &storage));
+
+ EXPECT_TRUE(segment->Init());
+ SmallEntry entry1;
+ EXPECT_TRUE(segment->CanHold(entry1.size));
+ int32 offset = segment->write_offset();
+ EXPECT_TRUE(segment->WriteData(entry1.data, entry1.size));
+ segment->StoreOffset(offset);
+ EXPECT_TRUE(segment->Close());
+
+ segment.reset(new disk_cache::Segment(index, true, &storage));
+ EXPECT_TRUE(segment->Init());
+ SmallEntry entry2;
+ EXPECT_TRUE(segment->ReadData(entry2.data, entry2.size, offset));
+ EXPECT_EQ(entry1, entry2);
+ EXPECT_TRUE(segment->Close());
+}
+
+TEST_F(FlashCacheTest, SegmentFillWithSmallEntries) {
+ disk_cache::Storage storage(path_, kStorageSize);
+ ASSERT_TRUE(storage.Init());
+
+ int32 index = rand() % kNumTestSegments;
+ scoped_ptr<disk_cache::Segment> segment(
+ new disk_cache::Segment(index, false, &storage));
+
+ EXPECT_TRUE(segment->Init());
+ SmallEntry entry;
+ int32 num_bytes_written = 0;
+ while (segment->CanHold(entry.size)) {
+ int32 offset = segment->write_offset();
+ EXPECT_TRUE(segment->WriteData(entry.data, entry.size));
+ segment->StoreOffset(offset);
+ num_bytes_written += entry.size;
+ }
+ int32 space_left = kSegmentFreeSpace - num_bytes_written;
+ EXPECT_GE(space_left, entry.size);
+ EXPECT_EQ(segment->GetOffsets().size(), disk_cache::kFlashMaxEntryCount);
+ EXPECT_TRUE(segment->Close());
+}
+
+TEST_F(FlashCacheTest, SegmentFillWithLargeEntries) {
+ disk_cache::Storage storage(path_, kStorageSize);
+ ASSERT_TRUE(storage.Init());
+
+ int32 index = rand() % kNumTestSegments;
+ scoped_ptr<disk_cache::Segment> segment(
+ new disk_cache::Segment(index, false, &storage));
+
+ EXPECT_TRUE(segment->Init());
+ scoped_ptr<LargeEntry> entry(new LargeEntry);
+ int32 num_bytes_written = 0;
+ while (segment->CanHold(entry->size)) {
+ int32 offset = segment->write_offset();
+ EXPECT_TRUE(segment->WriteData(entry->data, entry->size));
+ segment->StoreOffset(offset);
+ num_bytes_written += entry->size;
+ }
+ int32 space_left = kSegmentFreeSpace - num_bytes_written;
+ EXPECT_LT(space_left, entry->size);
+ EXPECT_LT(segment->GetOffsets().size(), disk_cache::kFlashMaxEntryCount);
+ EXPECT_TRUE(segment->Close());
+}
diff --git a/chromium/net/disk_cache/flash/storage.cc b/chromium/net/disk_cache/flash/storage.cc
new file mode 100644
index 00000000000..c7136ef869b
--- /dev/null
+++ b/chromium/net/disk_cache/flash/storage.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/flash/storage.h"
+
+#include <fcntl.h>
+
+#include "base/logging.h"
+#include "base/platform_file.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/flash/format.h"
+
+namespace disk_cache {
+
+Storage::Storage(const base::FilePath& path,
+ int32 size)
+ : path_(path), size_(size) {
+ COMPILE_ASSERT(kFlashPageSize % 2 == 0, invalid_page_size);
+ COMPILE_ASSERT(kFlashBlockSize % kFlashPageSize == 0, invalid_block_size);
+ DCHECK(size_ % kFlashBlockSize == 0);
+}
+
+bool Storage::Init() {
+ int flags = base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_OPEN_ALWAYS;
+
+ file_ = base::CreatePlatformFile(path_, flags, NULL, NULL);
+ if (file_ == base::kInvalidPlatformFileValue)
+ return false;
+
+ // TODO(agayev): if file already exists, do some validation and return either
+ // true or false based on the result.
+
+#if defined(OS_LINUX)
+ fallocate(file_, 0, 0, size_);
+#endif
+
+ return true;
+}
+
+Storage::~Storage() {
+ base::ClosePlatformFile(file_);
+}
+
+bool Storage::Read(void* buffer, int32 size, int32 offset) {
+ DCHECK(offset >= 0 && offset + size <= size_);
+
+ int rv = base::ReadPlatformFile(file_, offset, static_cast<char*>(buffer),
+ size);
+ return rv == size;
+}
+
+bool Storage::Write(const void* buffer, int32 size, int32 offset) {
+ DCHECK(offset >= 0 && offset + size <= size_);
+
+ int rv = base::WritePlatformFile(file_, offset,
+ static_cast<const char*>(buffer), size);
+ return rv == size;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/flash/storage.h b/chromium/net/disk_cache/flash/storage.h
new file mode 100644
index 00000000000..38c4a4e488b
--- /dev/null
+++ b/chromium/net/disk_cache/flash/storage.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_STORAGE_H_
+#define NET_DISK_CACHE_FLASH_STORAGE_H_
+
+#include "base/basictypes.h"
+#include "base/platform_file.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+
+class NET_EXPORT_PRIVATE Storage {
+ public:
+ Storage(const base::FilePath& path, int32 size);
+ bool Init();
+ ~Storage();
+
+ int32 size() const { return size_; }
+
+ bool Read(void* buffer, int32 size, int32 offset);
+ bool Write(const void* buffer, int32 size, int32 offset);
+
+ private:
+ base::FilePath path_;
+ int32 size_;
+ base::PlatformFile file_;
+
+ DISALLOW_COPY_AND_ASSIGN(Storage);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_STORAGE_H_
diff --git a/chromium/net/disk_cache/flash/storage_unittest.cc b/chromium/net/disk_cache/flash/storage_unittest.cc
new file mode 100644
index 00000000000..e8a1e3f18c8
--- /dev/null
+++ b/chromium/net/disk_cache/flash/storage_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+#include "base/files/scoped_temp_dir.h"
+#include "net/base/io_buffer.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/flash/flash_cache_test_base.h"
+#include "net/disk_cache/flash/storage.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int32 kSizes[] = {512, 1024, 4096, 133, 1333, 13333};
+const int32 kOffsets[] = {0, 1, 3333, 125, 12443, 4431};
+
+} // namespace
+
+TEST_F(FlashCacheTest, StorageReadWrite) {
+ disk_cache::Storage storage(path_, kStorageSize);
+ EXPECT_TRUE(storage.Init());
+
+ for (size_t i = 0; i < arraysize(kOffsets); ++i) {
+ int32 size = kSizes[i];
+ int32 offset = kOffsets[i];
+
+ scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(size));
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(size));
+
+ CacheTestFillBuffer(write_buffer->data(), size, false);
+
+ bool rv = storage.Write(write_buffer->data(), size, offset);
+ EXPECT_TRUE(rv);
+
+ rv = storage.Read(read_buffer->data(), size, offset);
+ EXPECT_TRUE(rv);
+
+ EXPECT_EQ(0, memcmp(read_buffer->data(), write_buffer->data(), size));
+ }
+}
diff --git a/chromium/net/disk_cache/histogram_macros.h b/chromium/net/disk_cache/histogram_macros.h
new file mode 100644
index 00000000000..3d8011c27f6
--- /dev/null
+++ b/chromium/net/disk_cache/histogram_macros.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros to simplify histogram reporting from the disk
+// cache. The main issue is that we want to have separate histograms for each
+// type of cache (regular vs. media, etc), without adding the complexity of
+// keeping track of a potentially large number of histogram objects that have to
+// survive the backend object that created them.
+
+#ifndef NET_DISK_CACHE_HISTOGRAM_MACROS_H_
+#define NET_DISK_CACHE_HISTOGRAM_MACROS_H_
+
+// -----------------------------------------------------------------------------
+
+// These histograms follow the definition of UMA_HISTOGRAMN_XXX except that
+// whenever the name changes (the experiment group changes), the histrogram
+// object is re-created.
+// Note: These macros are only run on one thread, so the declarations of
+// |counter| was made static (i.e., there will be no race for reinitialization).
+
+#define CACHE_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ do { \
+ static base::HistogramBase* counter(NULL); \
+ if (!counter || name != counter->histogram_name()) \
+ counter = base::Histogram::FactoryGet( \
+ name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ counter->Add(sample); \
+ } while (0)
+
+#define CACHE_HISTOGRAM_COUNTS(name, sample) CACHE_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define CACHE_HISTOGRAM_COUNTS_10000(name, sample) \
+ CACHE_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
+
+#define CACHE_HISTOGRAM_COUNTS_50000(name, sample) \
+ CACHE_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 50000000, 50)
+
+#define CACHE_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ do { \
+ static base::HistogramBase* counter(NULL); \
+ if (!counter || name != counter->histogram_name()) \
+ counter = base::Histogram::FactoryTimeGet( \
+ name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ counter->AddTime(sample); \
+ } while (0)
+
+#define CACHE_HISTOGRAM_TIMES(name, sample) CACHE_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define CACHE_HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
+ static base::HistogramBase* counter(NULL); \
+ if (!counter || name != counter->histogram_name()) \
+ counter = base::LinearHistogram::FactoryGet( \
+ name, 1, boundary_value, boundary_value + 1, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ counter->Add(sample); \
+ } while (0)
+
+#define CACHE_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ CACHE_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+// -----------------------------------------------------------------------------
+
+// HISTOGRAM_HOURS will collect time related data with a granularity of hours
+// and normal values of a few months.
+#define CACHE_HISTOGRAM_HOURS CACHE_HISTOGRAM_COUNTS_10000
+
+// HISTOGRAM_AGE will collect time elapsed since |initial_time|, with a
+// granularity of hours and normal values of a few months.
+#define CACHE_HISTOGRAM_AGE(name, initial_time) \
+ CACHE_HISTOGRAM_COUNTS_10000(name, \
+ (base::Time::Now() - initial_time).InHours())
+
+// HISTOGRAM_AGE_MS will collect time elapsed since |initial_time|, with the
+// normal resolution of the UMA_HISTOGRAM_TIMES.
+#define CACHE_HISTOGRAM_AGE_MS(name, initial_time)\
+ CACHE_HISTOGRAM_TIMES(name, base::TimeTicks::Now() - initial_time)
+
+#define CACHE_HISTOGRAM_CACHE_ERROR(name, sample) \
+ CACHE_HISTOGRAM_ENUMERATION(name, sample, 50)
+
+#ifdef NET_DISK_CACHE_BACKEND_IMPL_CC_
+#define BACKEND_OBJ this
+#else
+#define BACKEND_OBJ backend_
+#endif
+
+// Generates a UMA histogram of the given type, generating the proper name for
+// it (asking backend_->HistogramName), and adding the provided sample.
+// For example, to generate a regualar UMA_HISTOGRAM_COUNTS, this macro would
+// be used as:
+// CACHE_UMA(COUNTS, "MyName", 0, 20);
+// CACHE_UMA(COUNTS, "MyExperiment", 530, 55);
+// which roughly translates to:
+// UMA_HISTOGRAM_COUNTS("DiskCache.2.MyName", 20); // "2" is the CacheType.
+// UMA_HISTOGRAM_COUNTS("DiskCache.2.MyExperiment_530", 55);
+//
+#define CACHE_UMA(type, name, experiment, sample) {\
+ const std::string my_name = BACKEND_OBJ->HistogramName(name, experiment);\
+ switch (BACKEND_OBJ->cache_type()) {\
+ case net::DISK_CACHE:\
+ CACHE_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ case net::MEDIA_CACHE:\
+ CACHE_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ case net::APP_CACHE:\
+ CACHE_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ case net::SHADER_CACHE:\
+ CACHE_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ default:\
+ NOTREACHED();\
+ break;\
+ }\
+ }
+
+#endif // NET_DISK_CACHE_HISTOGRAM_MACROS_H_
diff --git a/chromium/net/disk_cache/in_flight_backend_io.cc b/chromium/net/disk_cache/in_flight_backend_io.cc
new file mode 100644
index 00000000000..3ed9e4dd191
--- /dev/null
+++ b/chromium/net/disk_cache/in_flight_backend_io.cc
@@ -0,0 +1,522 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/in_flight_backend_io.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/histogram_macros.h"
+
+namespace disk_cache {
+
+BackendIO::BackendIO(InFlightIO* controller, BackendImpl* backend,
+ const net::CompletionCallback& callback)
+ : BackgroundIO(controller),
+ backend_(backend),
+ callback_(callback),
+ operation_(OP_NONE),
+ entry_ptr_(NULL),
+ iter_ptr_(NULL),
+ iter_(NULL),
+ entry_(NULL),
+ index_(0),
+ offset_(0),
+ buf_len_(0),
+ truncate_(false),
+ offset64_(0),
+ start_(NULL) {
+ start_time_ = base::TimeTicks::Now();
+}
+
+// Runs on the background thread.
+void BackendIO::ExecuteOperation() {
+ if (IsEntryOperation())
+ return ExecuteEntryOperation();
+
+ ExecuteBackendOperation();
+}
+
+// Runs on the background thread.
+void BackendIO::OnIOComplete(int result) {
+ DCHECK(IsEntryOperation());
+ DCHECK_NE(result, net::ERR_IO_PENDING);
+ result_ = result;
+ NotifyController();
+}
+
+// Runs on the primary thread.
+void BackendIO::OnDone(bool cancel) {
+ if (IsEntryOperation()) {
+ CACHE_UMA(TIMES, "TotalIOTime", 0, ElapsedTime());
+ }
+
+ if (!ReturnsEntry())
+ return;
+
+ if (result() == net::OK) {
+ static_cast<EntryImpl*>(*entry_ptr_)->OnEntryCreated(backend_);
+ if (cancel)
+ (*entry_ptr_)->Close();
+ }
+}
+
+bool BackendIO::IsEntryOperation() {
+ return operation_ > OP_MAX_BACKEND;
+}
+
+// Runs on the background thread.
+void BackendIO::ReferenceEntry() {
+ entry_->AddRef();
+}
+
+void BackendIO::Init() {
+ operation_ = OP_INIT;
+}
+
+void BackendIO::OpenEntry(const std::string& key, Entry** entry) {
+ operation_ = OP_OPEN;
+ key_ = key;
+ entry_ptr_ = entry;
+}
+
+void BackendIO::CreateEntry(const std::string& key, Entry** entry) {
+ operation_ = OP_CREATE;
+ key_ = key;
+ entry_ptr_ = entry;
+}
+
+void BackendIO::DoomEntry(const std::string& key) {
+ operation_ = OP_DOOM;
+ key_ = key;
+}
+
+void BackendIO::DoomAllEntries() {
+ operation_ = OP_DOOM_ALL;
+}
+
+void BackendIO::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time) {
+ operation_ = OP_DOOM_BETWEEN;
+ initial_time_ = initial_time;
+ end_time_ = end_time;
+}
+
+void BackendIO::DoomEntriesSince(const base::Time initial_time) {
+ operation_ = OP_DOOM_SINCE;
+ initial_time_ = initial_time;
+}
+
+void BackendIO::OpenNextEntry(void** iter, Entry** next_entry) {
+ operation_ = OP_OPEN_NEXT;
+ iter_ptr_ = iter;
+ entry_ptr_ = next_entry;
+}
+
+void BackendIO::OpenPrevEntry(void** iter, Entry** prev_entry) {
+ operation_ = OP_OPEN_PREV;
+ iter_ptr_ = iter;
+ entry_ptr_ = prev_entry;
+}
+
+void BackendIO::EndEnumeration(void* iterator) {
+ operation_ = OP_END_ENUMERATION;
+ iter_ = iterator;
+}
+
+void BackendIO::OnExternalCacheHit(const std::string& key) {
+ operation_ = OP_ON_EXTERNAL_CACHE_HIT;
+ key_ = key;
+}
+
+void BackendIO::CloseEntryImpl(EntryImpl* entry) {
+ operation_ = OP_CLOSE_ENTRY;
+ entry_ = entry;
+}
+
+void BackendIO::DoomEntryImpl(EntryImpl* entry) {
+ operation_ = OP_DOOM_ENTRY;
+ entry_ = entry;
+}
+
+void BackendIO::FlushQueue() {
+ operation_ = OP_FLUSH_QUEUE;
+}
+
+void BackendIO::RunTask(const base::Closure& task) {
+ operation_ = OP_RUN_TASK;
+ task_ = task;
+}
+
+void BackendIO::ReadData(EntryImpl* entry, int index, int offset,
+ net::IOBuffer* buf, int buf_len) {
+ operation_ = OP_READ;
+ entry_ = entry;
+ index_ = index;
+ offset_ = offset;
+ buf_ = buf;
+ buf_len_ = buf_len;
+}
+
+void BackendIO::WriteData(EntryImpl* entry, int index, int offset,
+ net::IOBuffer* buf, int buf_len, bool truncate) {
+ operation_ = OP_WRITE;
+ entry_ = entry;
+ index_ = index;
+ offset_ = offset;
+ buf_ = buf;
+ buf_len_ = buf_len;
+ truncate_ = truncate;
+}
+
+void BackendIO::ReadSparseData(EntryImpl* entry, int64 offset,
+ net::IOBuffer* buf, int buf_len) {
+ operation_ = OP_READ_SPARSE;
+ entry_ = entry;
+ offset64_ = offset;
+ buf_ = buf;
+ buf_len_ = buf_len;
+}
+
+void BackendIO::WriteSparseData(EntryImpl* entry, int64 offset,
+ net::IOBuffer* buf, int buf_len) {
+ operation_ = OP_WRITE_SPARSE;
+ entry_ = entry;
+ offset64_ = offset;
+ buf_ = buf;
+ buf_len_ = buf_len;
+}
+
+void BackendIO::GetAvailableRange(EntryImpl* entry, int64 offset, int len,
+ int64* start) {
+ operation_ = OP_GET_RANGE;
+ entry_ = entry;
+ offset64_ = offset;
+ buf_len_ = len;
+ start_ = start;
+}
+
+void BackendIO::CancelSparseIO(EntryImpl* entry) {
+ operation_ = OP_CANCEL_IO;
+ entry_ = entry;
+}
+
+void BackendIO::ReadyForSparseIO(EntryImpl* entry) {
+ operation_ = OP_IS_READY;
+ entry_ = entry;
+}
+
+BackendIO::~BackendIO() {}
+
+bool BackendIO::ReturnsEntry() {
+ return (operation_ == OP_OPEN || operation_ == OP_CREATE ||
+ operation_ == OP_OPEN_NEXT || operation_ == OP_OPEN_PREV);
+}
+
+base::TimeDelta BackendIO::ElapsedTime() const {
+ return base::TimeTicks::Now() - start_time_;
+}
+
+// Runs on the background thread.
+void BackendIO::ExecuteBackendOperation() {
+ switch (operation_) {
+ case OP_INIT:
+ result_ = backend_->SyncInit();
+ break;
+ case OP_OPEN:
+ result_ = backend_->SyncOpenEntry(key_, entry_ptr_);
+ break;
+ case OP_CREATE:
+ result_ = backend_->SyncCreateEntry(key_, entry_ptr_);
+ break;
+ case OP_DOOM:
+ result_ = backend_->SyncDoomEntry(key_);
+ break;
+ case OP_DOOM_ALL:
+ result_ = backend_->SyncDoomAllEntries();
+ break;
+ case OP_DOOM_BETWEEN:
+ result_ = backend_->SyncDoomEntriesBetween(initial_time_, end_time_);
+ break;
+ case OP_DOOM_SINCE:
+ result_ = backend_->SyncDoomEntriesSince(initial_time_);
+ break;
+ case OP_OPEN_NEXT:
+ result_ = backend_->SyncOpenNextEntry(iter_ptr_, entry_ptr_);
+ break;
+ case OP_OPEN_PREV:
+ result_ = backend_->SyncOpenPrevEntry(iter_ptr_, entry_ptr_);
+ break;
+ case OP_END_ENUMERATION:
+ backend_->SyncEndEnumeration(iter_);
+ result_ = net::OK;
+ break;
+ case OP_ON_EXTERNAL_CACHE_HIT:
+ backend_->SyncOnExternalCacheHit(key_);
+ result_ = net::OK;
+ break;
+ case OP_CLOSE_ENTRY:
+ entry_->Release();
+ result_ = net::OK;
+ break;
+ case OP_DOOM_ENTRY:
+ entry_->DoomImpl();
+ result_ = net::OK;
+ break;
+ case OP_FLUSH_QUEUE:
+ result_ = net::OK;
+ break;
+ case OP_RUN_TASK:
+ task_.Run();
+ result_ = net::OK;
+ break;
+ default:
+ NOTREACHED() << "Invalid Operation";
+ result_ = net::ERR_UNEXPECTED;
+ }
+ DCHECK_NE(net::ERR_IO_PENDING, result_);
+ NotifyController();
+}
+
+// Runs on the background thread.
+void BackendIO::ExecuteEntryOperation() {
+ switch (operation_) {
+ case OP_READ:
+ result_ =
+ entry_->ReadDataImpl(index_, offset_, buf_.get(), buf_len_,
+ base::Bind(&BackendIO::OnIOComplete, this));
+ break;
+ case OP_WRITE:
+ result_ =
+ entry_->WriteDataImpl(index_, offset_, buf_.get(), buf_len_,
+ base::Bind(&BackendIO::OnIOComplete, this),
+ truncate_);
+ break;
+ case OP_READ_SPARSE:
+ result_ = entry_->ReadSparseDataImpl(
+ offset64_, buf_.get(), buf_len_,
+ base::Bind(&BackendIO::OnIOComplete, this));
+ break;
+ case OP_WRITE_SPARSE:
+ result_ = entry_->WriteSparseDataImpl(
+ offset64_, buf_.get(), buf_len_,
+ base::Bind(&BackendIO::OnIOComplete, this));
+ break;
+ case OP_GET_RANGE:
+ result_ = entry_->GetAvailableRangeImpl(offset64_, buf_len_, start_);
+ break;
+ case OP_CANCEL_IO:
+ entry_->CancelSparseIOImpl();
+ result_ = net::OK;
+ break;
+ case OP_IS_READY:
+ result_ = entry_->ReadyForSparseIOImpl(
+ base::Bind(&BackendIO::OnIOComplete, this));
+ break;
+ default:
+ NOTREACHED() << "Invalid Operation";
+ result_ = net::ERR_UNEXPECTED;
+ }
+ buf_ = NULL;
+ if (result_ != net::ERR_IO_PENDING)
+ NotifyController();
+}
+
+InFlightBackendIO::InFlightBackendIO(BackendImpl* backend,
+ base::MessageLoopProxy* background_thread)
+ : backend_(backend),
+ background_thread_(background_thread),
+ ptr_factory_(this) {
+}
+
+InFlightBackendIO::~InFlightBackendIO() {
+}
+
+void InFlightBackendIO::Init(const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->Init();
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::OpenEntry(const std::string& key, Entry** entry,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->OpenEntry(key, entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::CreateEntry(const std::string& key, Entry** entry,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->CreateEntry(key, entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::DoomEntry(const std::string& key,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->DoomEntry(key);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::DoomAllEntries(
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->DoomAllEntries();
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->DoomEntriesBetween(initial_time, end_time);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::DoomEntriesSince(
+ const base::Time initial_time, const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->DoomEntriesSince(initial_time);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::OpenNextEntry(void** iter, Entry** next_entry,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->OpenNextEntry(iter, next_entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::OpenPrevEntry(void** iter, Entry** prev_entry,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->OpenPrevEntry(iter, prev_entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::EndEnumeration(void* iterator) {
+ scoped_refptr<BackendIO> operation(
+ new BackendIO(this, backend_, net::CompletionCallback()));
+ operation->EndEnumeration(iterator);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::OnExternalCacheHit(const std::string& key) {
+ scoped_refptr<BackendIO> operation(
+ new BackendIO(this, backend_, net::CompletionCallback()));
+ operation->OnExternalCacheHit(key);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::CloseEntryImpl(EntryImpl* entry) {
+ scoped_refptr<BackendIO> operation(
+ new BackendIO(this, backend_, net::CompletionCallback()));
+ operation->CloseEntryImpl(entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::DoomEntryImpl(EntryImpl* entry) {
+ scoped_refptr<BackendIO> operation(
+ new BackendIO(this, backend_, net::CompletionCallback()));
+ operation->DoomEntryImpl(entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::FlushQueue(const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->FlushQueue();
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::RunTask(
+ const base::Closure& task, const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->RunTask(task);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::ReadData(EntryImpl* entry, int index, int offset,
+ net::IOBuffer* buf, int buf_len,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->ReadData(entry, index, offset, buf, buf_len);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::WriteData(EntryImpl* entry, int index, int offset,
+ net::IOBuffer* buf, int buf_len,
+ bool truncate,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->WriteData(entry, index, offset, buf, buf_len, truncate);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::ReadSparseData(
+ EntryImpl* entry, int64 offset, net::IOBuffer* buf, int buf_len,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->ReadSparseData(entry, offset, buf, buf_len);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::WriteSparseData(
+ EntryImpl* entry, int64 offset, net::IOBuffer* buf, int buf_len,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->WriteSparseData(entry, offset, buf, buf_len);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::GetAvailableRange(
+ EntryImpl* entry, int64 offset, int len, int64* start,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->GetAvailableRange(entry, offset, len, start);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::CancelSparseIO(EntryImpl* entry) {
+ scoped_refptr<BackendIO> operation(
+ new BackendIO(this, backend_, net::CompletionCallback()));
+ operation->CancelSparseIO(entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::ReadyForSparseIO(
+ EntryImpl* entry, const net::CompletionCallback& callback) {
+ scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
+ operation->ReadyForSparseIO(entry);
+ PostOperation(operation.get());
+}
+
+void InFlightBackendIO::WaitForPendingIO() {
+ InFlightIO::WaitForPendingIO();
+}
+
+void InFlightBackendIO::OnOperationComplete(BackgroundIO* operation,
+ bool cancel) {
+ BackendIO* op = static_cast<BackendIO*>(operation);
+ op->OnDone(cancel);
+
+ if (!op->callback().is_null() && (!cancel || op->IsEntryOperation()))
+ op->callback().Run(op->result());
+}
+
+void InFlightBackendIO::PostOperation(BackendIO* operation) {
+ background_thread_->PostTask(FROM_HERE,
+ base::Bind(&BackendIO::ExecuteOperation, operation));
+ OnOperationPosted(operation);
+}
+
+base::WeakPtr<InFlightBackendIO> InFlightBackendIO::GetWeakPtr() {
+ return ptr_factory_.GetWeakPtr();
+}
+
+} // namespace
diff --git a/chromium/net/disk_cache/in_flight_backend_io.h b/chromium/net/disk_cache/in_flight_backend_io.h
new file mode 100644
index 00000000000..4ff081d07c5
--- /dev/null
+++ b/chromium/net/disk_cache/in_flight_backend_io.h
@@ -0,0 +1,223 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_IN_FLIGHT_BACKEND_IO_H_
+#define NET_DISK_CACHE_IN_FLIGHT_BACKEND_IO_H_
+
+#include <list>
+#include <string>
+
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/time/time.h"
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+#include "net/disk_cache/in_flight_io.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class Entry;
+class EntryImpl;
+
+// This class represents a single asynchronous disk cache IO operation while it
+// is being bounced between threads.
+class BackendIO : public BackgroundIO {
+ public:
+ BackendIO(InFlightIO* controller, BackendImpl* backend,
+ const net::CompletionCallback& callback);
+
+ // Runs the actual operation on the background thread.
+ void ExecuteOperation();
+
+ // Callback implementation.
+ void OnIOComplete(int result);
+
+ // Called when we are finishing this operation. If |cancel| is true, the user
+ // callback will not be invoked.
+ void OnDone(bool cancel);
+
+ // Returns true if this operation is directed to an entry (vs. the backend).
+ bool IsEntryOperation();
+
+ net::CompletionCallback callback() const { return callback_; }
+
+ // Grabs an extra reference of entry_.
+ void ReferenceEntry();
+
+ // The operations we proxy:
+ void Init();
+ void OpenEntry(const std::string& key, Entry** entry);
+ void CreateEntry(const std::string& key, Entry** entry);
+ void DoomEntry(const std::string& key);
+ void DoomAllEntries();
+ void DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ void DoomEntriesSince(const base::Time initial_time);
+ void OpenNextEntry(void** iter, Entry** next_entry);
+ void OpenPrevEntry(void** iter, Entry** prev_entry);
+ void EndEnumeration(void* iterator);
+ void OnExternalCacheHit(const std::string& key);
+ void CloseEntryImpl(EntryImpl* entry);
+ void DoomEntryImpl(EntryImpl* entry);
+ void FlushQueue(); // Dummy operation.
+ void RunTask(const base::Closure& task);
+ void ReadData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf,
+ int buf_len);
+ void WriteData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf,
+ int buf_len, bool truncate);
+ void ReadSparseData(EntryImpl* entry, int64 offset, net::IOBuffer* buf,
+ int buf_len);
+ void WriteSparseData(EntryImpl* entry, int64 offset, net::IOBuffer* buf,
+ int buf_len);
+ void GetAvailableRange(EntryImpl* entry, int64 offset, int len, int64* start);
+ void CancelSparseIO(EntryImpl* entry);
+ void ReadyForSparseIO(EntryImpl* entry);
+
+ private:
+ // There are two types of operations to proxy: regular backend operations are
+ // executed sequentially (queued by the message loop). On the other hand,
+ // operations targeted to a given entry can be long lived and support multiple
+ // simultaneous users (multiple reads or writes to the same entry), and they
+ // are subject to throttling, so we keep an explicit queue.
+ enum Operation {
+ OP_NONE = 0,
+ OP_INIT,
+ OP_OPEN,
+ OP_CREATE,
+ OP_DOOM,
+ OP_DOOM_ALL,
+ OP_DOOM_BETWEEN,
+ OP_DOOM_SINCE,
+ OP_OPEN_NEXT,
+ OP_OPEN_PREV,
+ OP_END_ENUMERATION,
+ OP_ON_EXTERNAL_CACHE_HIT,
+ OP_CLOSE_ENTRY,
+ OP_DOOM_ENTRY,
+ OP_FLUSH_QUEUE,
+ OP_RUN_TASK,
+ OP_MAX_BACKEND,
+ OP_READ,
+ OP_WRITE,
+ OP_READ_SPARSE,
+ OP_WRITE_SPARSE,
+ OP_GET_RANGE,
+ OP_CANCEL_IO,
+ OP_IS_READY
+ };
+
+ virtual ~BackendIO();
+
+ // Returns true if this operation returns an entry.
+ bool ReturnsEntry();
+
+ // Returns the time that has passed since the operation was created.
+ base::TimeDelta ElapsedTime() const;
+
+ void ExecuteBackendOperation();
+ void ExecuteEntryOperation();
+
+ BackendImpl* backend_;
+ net::CompletionCallback callback_;
+ Operation operation_;
+
+ // The arguments of all the operations we proxy:
+ std::string key_;
+ Entry** entry_ptr_;
+ base::Time initial_time_;
+ base::Time end_time_;
+ void** iter_ptr_;
+ void* iter_;
+ EntryImpl* entry_;
+ int index_;
+ int offset_;
+ scoped_refptr<net::IOBuffer> buf_;
+ int buf_len_;
+ bool truncate_;
+ int64 offset64_;
+ int64* start_;
+ base::TimeTicks start_time_;
+ base::Closure task_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackendIO);
+};
+
+// The specialized controller that keeps track of current operations.
+class InFlightBackendIO : public InFlightIO {
+ public:
+ InFlightBackendIO(BackendImpl* backend,
+ base::MessageLoopProxy* background_thread);
+ virtual ~InFlightBackendIO();
+
+ // Proxied operations.
+ void Init(const net::CompletionCallback& callback);
+ void OpenEntry(const std::string& key, Entry** entry,
+ const net::CompletionCallback& callback);
+ void CreateEntry(const std::string& key, Entry** entry,
+ const net::CompletionCallback& callback);
+ void DoomEntry(const std::string& key,
+ const net::CompletionCallback& callback);
+ void DoomAllEntries(const net::CompletionCallback& callback);
+ void DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ const net::CompletionCallback& callback);
+ void DoomEntriesSince(const base::Time initial_time,
+ const net::CompletionCallback& callback);
+ void OpenNextEntry(void** iter, Entry** next_entry,
+ const net::CompletionCallback& callback);
+ void OpenPrevEntry(void** iter, Entry** prev_entry,
+ const net::CompletionCallback& callback);
+ void EndEnumeration(void* iterator);
+ void OnExternalCacheHit(const std::string& key);
+ void CloseEntryImpl(EntryImpl* entry);
+ void DoomEntryImpl(EntryImpl* entry);
+ void FlushQueue(const net::CompletionCallback& callback);
+ void RunTask(const base::Closure& task,
+ const net::CompletionCallback& callback);
+ void ReadData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf,
+ int buf_len, const net::CompletionCallback& callback);
+ void WriteData(
+ EntryImpl* entry, int index, int offset, net::IOBuffer* buf,
+ int buf_len, bool truncate, const net::CompletionCallback& callback);
+ void ReadSparseData(EntryImpl* entry, int64 offset, net::IOBuffer* buf,
+ int buf_len, const net::CompletionCallback& callback);
+ void WriteSparseData(EntryImpl* entry, int64 offset, net::IOBuffer* buf,
+ int buf_len, const net::CompletionCallback& callback);
+ void GetAvailableRange(EntryImpl* entry, int64 offset, int len, int64* start,
+ const net::CompletionCallback& callback);
+ void CancelSparseIO(EntryImpl* entry);
+ void ReadyForSparseIO(EntryImpl* entry,
+ const net::CompletionCallback& callback);
+
+ // Blocks until all operations are cancelled or completed.
+ void WaitForPendingIO();
+
+ scoped_refptr<base::MessageLoopProxy> background_thread() {
+ return background_thread_;
+ }
+
+ // Returns true if the current thread is the background thread.
+ bool BackgroundIsCurrentThread() {
+ return background_thread_->BelongsToCurrentThread();
+ }
+
+ base::WeakPtr<InFlightBackendIO> GetWeakPtr();
+
+ protected:
+ virtual void OnOperationComplete(BackgroundIO* operation,
+ bool cancel) OVERRIDE;
+
+ private:
+ void PostOperation(BackendIO* operation);
+
+ BackendImpl* backend_;
+ scoped_refptr<base::MessageLoopProxy> background_thread_;
+ base::WeakPtrFactory<InFlightBackendIO> ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(InFlightBackendIO);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_IN_FLIGHT_BACKEND_IO_H_
diff --git a/chromium/net/disk_cache/in_flight_io.cc b/chromium/net/disk_cache/in_flight_io.cc
new file mode 100644
index 00000000000..467814f22f1
--- /dev/null
+++ b/chromium/net/disk_cache/in_flight_io.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/in_flight_io.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace disk_cache {
+
+BackgroundIO::BackgroundIO(InFlightIO* controller)
+ : result_(-1), io_completed_(true, false), controller_(controller) {
+}
+
+// Runs on the primary thread.
+void BackgroundIO::OnIOSignalled() {
+ if (controller_)
+ controller_->InvokeCallback(this, false);
+}
+
+void BackgroundIO::Cancel() {
+ // controller_ may be in use from the background thread at this time.
+ base::AutoLock lock(controller_lock_);
+ DCHECK(controller_);
+ controller_ = NULL;
+}
+
+BackgroundIO::~BackgroundIO() {
+}
+
+// ---------------------------------------------------------------------------
+
+InFlightIO::InFlightIO()
+ : callback_thread_(base::MessageLoopProxy::current()),
+ running_(false), single_thread_(false) {
+}
+
+InFlightIO::~InFlightIO() {
+}
+
+// Runs on the background thread.
+void BackgroundIO::NotifyController() {
+ base::AutoLock lock(controller_lock_);
+ if (controller_)
+ controller_->OnIOComplete(this);
+}
+
+void InFlightIO::WaitForPendingIO() {
+ while (!io_list_.empty()) {
+ // Block the current thread until all pending IO completes.
+ IOList::iterator it = io_list_.begin();
+ InvokeCallback(it->get(), true);
+ }
+}
+
+void InFlightIO::DropPendingIO() {
+ while (!io_list_.empty()) {
+ IOList::iterator it = io_list_.begin();
+ BackgroundIO* operation = it->get();
+ operation->Cancel();
+ DCHECK(io_list_.find(operation) != io_list_.end());
+ io_list_.erase(make_scoped_refptr(operation));
+ }
+}
+
+// Runs on a background thread.
+void InFlightIO::OnIOComplete(BackgroundIO* operation) {
+#ifndef NDEBUG
+ if (callback_thread_->BelongsToCurrentThread()) {
+ DCHECK(single_thread_ || !running_);
+ single_thread_ = true;
+ }
+#endif
+
+ callback_thread_->PostTask(FROM_HERE,
+ base::Bind(&BackgroundIO::OnIOSignalled,
+ operation));
+ operation->io_completed()->Signal();
+}
+
+// Runs on the primary thread.
+void InFlightIO::InvokeCallback(BackgroundIO* operation, bool cancel_task) {
+ {
+ // http://crbug.com/74623
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ operation->io_completed()->Wait();
+ }
+ running_ = true;
+
+ if (cancel_task)
+ operation->Cancel();
+
+ // Make sure that we remove the operation from the list before invoking the
+ // callback (so that a subsequent cancel does not invoke the callback again).
+ DCHECK(io_list_.find(operation) != io_list_.end());
+ DCHECK(!operation->HasOneRef());
+ io_list_.erase(make_scoped_refptr(operation));
+ OnOperationComplete(operation, cancel_task);
+}
+
+// Runs on the primary thread.
+void InFlightIO::OnOperationPosted(BackgroundIO* operation) {
+ DCHECK(callback_thread_->BelongsToCurrentThread());
+ io_list_.insert(make_scoped_refptr(operation));
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/in_flight_io.h b/chromium/net/disk_cache/in_flight_io.h
new file mode 100644
index 00000000000..2a3330445ba
--- /dev/null
+++ b/chromium/net/disk_cache/in_flight_io.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_IN_FLIGHT_IO_H_
+#define NET_DISK_CACHE_IN_FLIGHT_IO_H_
+
+#include <set>
+
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace disk_cache {
+
+class InFlightIO;
+
+// This class represents a single asynchronous IO operation while it is being
+// bounced between threads.
+class BackgroundIO : public base::RefCountedThreadSafe<BackgroundIO> {
+ public:
+ // Other than the actual parameters for the IO operation (including the
+ // |callback| that must be notified at the end), we need the controller that
+ // is keeping track of all operations. When done, we notify the controller
+ // (we do NOT invoke the callback), in the worker thead that completed the
+ // operation.
+ explicit BackgroundIO(InFlightIO* controller);
+
+ // This method signals the controller that this operation is finished, in the
+ // original thread. In practice, this is a RunableMethod that allows
+ // cancellation.
+ void OnIOSignalled();
+
+ // Allows the cancellation of the task to notify the controller (step number 8
+ // in the diagram below). In practice, if the controller waits for the
+ // operation to finish it doesn't have to wait for the final task to be
+ // processed by the message loop so calling this method prevents its delivery.
+ // Note that this method is not intended to cancel the actual IO operation or
+ // to prevent the first notification to take place (OnIOComplete).
+ void Cancel();
+
+ int result() { return result_; }
+
+ base::WaitableEvent* io_completed() {
+ return &io_completed_;
+ }
+
+ protected:
+ virtual ~BackgroundIO();
+
+ // Notifies the controller about the end of the operation, from the background
+ // thread.
+ void NotifyController();
+
+ int result_; // Final operation result.
+
+ private:
+ friend class base::RefCountedThreadSafe<BackgroundIO>;
+
+ // An event to signal when the operation completes.
+ base::WaitableEvent io_completed_;
+ InFlightIO* controller_; // The controller that tracks all operations.
+ base::Lock controller_lock_; // A lock protecting clearing of controller_.
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundIO);
+};
+
+// This class keeps track of asynchronous IO operations. A single instance
+// of this class is meant to be used to start an asynchronous operation (using
+// PostXX, exposed by a derived class). This class will post the operation to a
+// worker thread, hanlde the notification when the operation finishes and
+// perform the callback on the same thread that was used to start the operation.
+//
+// The regular sequence of calls is:
+// Thread_1 Worker_thread
+// 1. DerivedInFlightIO::PostXX()
+// 2. -> PostTask ->
+// 3. InFlightIO::OnOperationPosted()
+// 4. DerivedBackgroundIO::XX()
+// 5. IO operation completes
+// 6. InFlightIO::OnIOComplete()
+// 7. <- PostTask <-
+// 8. BackgroundIO::OnIOSignalled()
+// 9. InFlightIO::InvokeCallback()
+// 10. DerivedInFlightIO::OnOperationComplete()
+// 11. invoke callback
+//
+// Shutdown is a special case that is handled though WaitForPendingIO() instead
+// of just waiting for step 7.
+class InFlightIO {
+ public:
+ InFlightIO();
+ virtual ~InFlightIO();
+
+ // Blocks the current thread until all IO operations tracked by this object
+ // complete.
+ void WaitForPendingIO();
+
+ // Drops current pending operations without waiting for them to complete.
+ void DropPendingIO();
+
+ // Called on a background thread when |operation| completes.
+ void OnIOComplete(BackgroundIO* operation);
+
+ // Invokes the users' completion callback at the end of the IO operation.
+ // |cancel_task| is true if the actual task posted to the thread is still
+ // queued (because we are inside WaitForPendingIO), and false if said task is
+ // the one performing the call.
+ void InvokeCallback(BackgroundIO* operation, bool cancel_task);
+
+ protected:
+ // This method is called to signal the completion of the |operation|. |cancel|
+ // is true if the operation is being cancelled. This method is called on the
+ // thread that created this object.
+ virtual void OnOperationComplete(BackgroundIO* operation, bool cancel) = 0;
+
+ // Signals this object that the derived class just posted the |operation| to
+ // be executed on a background thread. This method must be called on the same
+ // thread used to create this object.
+ void OnOperationPosted(BackgroundIO* operation);
+
+ private:
+ typedef std::set<scoped_refptr<BackgroundIO> > IOList;
+
+ IOList io_list_; // List of pending, in-flight io operations.
+ scoped_refptr<base::MessageLoopProxy> callback_thread_;
+
+ bool running_; // True after the first posted operation completes.
+ bool single_thread_; // True if we only have one thread.
+
+ DISALLOW_COPY_AND_ASSIGN(InFlightIO);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_IN_FLIGHT_IO_H_
diff --git a/chromium/net/disk_cache/mapped_file.cc b/chromium/net/disk_cache/mapped_file.cc
new file mode 100644
index 00000000000..f17a1004a90
--- /dev/null
+++ b/chromium/net/disk_cache/mapped_file.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const base::FilePath& name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ buffer_ = NULL;
+ init_ = true;
+ section_ = CreateFileMapping(platform_file(), NULL, PAGE_READWRITE, 0,
+ static_cast<DWORD>(size), NULL);
+ if (!section_)
+ return NULL;
+
+ buffer_ = MapViewOfFile(section_, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, size);
+ DCHECK(buffer_);
+ view_size_ = size;
+
+ // Make sure we detect hardware failures reading the headers.
+ size_t temp_len = size ? size : 4096;
+ scoped_ptr<char[]> temp(new char[temp_len]);
+ if (!Read(temp.get(), temp_len, 0))
+ return NULL;
+
+ return buffer_;
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ BOOL ret = UnmapViewOfFile(buffer_);
+ DCHECK(ret);
+ }
+
+ if (section_)
+ CloseHandle(section_);
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+void MappedFile::Flush() {
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mapped_file.h b/chromium/net/disk_cache/mapped_file.h
new file mode 100644
index 00000000000..4649b90d1c9
--- /dev/null
+++ b/chromium/net/disk_cache/mapped_file.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MAPPED_FILE_H_
+#define NET_DISK_CACHE_MAPPED_FILE_H_
+
+#include "net/base/net_export.h"
+#include "net/disk_cache/file.h"
+#include "net/disk_cache/file_block.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace disk_cache {
+
+// This class implements a memory mapped file used to access block-files. The
+// idea is that the header and bitmap will be memory mapped all the time, and
+// the actual data for the blocks will be access asynchronously (most of the
+// time).
+class NET_EXPORT_PRIVATE MappedFile : public File {
+ public:
+ MappedFile() : File(true), init_(false) {}
+
+ // Performs object initialization. name is the file to use, and size is the
+ // amount of data to memory map from the file. If size is 0, the whole file
+ // will be mapped in memory.
+ void* Init(const base::FilePath& name, size_t size);
+
+ void* buffer() const {
+ return buffer_;
+ }
+
+ // Loads or stores a given block from the backing file (synchronously).
+ bool Load(const FileBlock* block);
+ bool Store(const FileBlock* block);
+
+ // Flush the memory-mapped section to disk (synchronously).
+ void Flush();
+
+ private:
+ virtual ~MappedFile();
+
+ bool init_;
+#if defined(OS_WIN)
+ HANDLE section_;
+#endif
+ void* buffer_; // Address of the memory mapped buffer.
+ size_t view_size_; // Size of the memory pointed by buffer_.
+#if defined(POSIX_AVOID_MMAP)
+ void* snapshot_; // Copy of the buffer taken when it was last flushed.
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(MappedFile);
+};
+
+// Helper class for calling Flush() on exit from the current scope.
+class ScopedFlush {
+ public:
+ explicit ScopedFlush(MappedFile* file) : file_(file) {}
+ ~ScopedFlush() {
+ file_->Flush();
+ }
+ private:
+ MappedFile* file_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MAPPED_FILE_H_
diff --git a/chromium/net/disk_cache/mapped_file_avoid_mmap_posix.cc b/chromium/net/disk_cache/mapped_file_avoid_mmap_posix.cc
new file mode 100644
index 00000000000..cd514a366e2
--- /dev/null
+++ b/chromium/net/disk_cache/mapped_file_avoid_mmap_posix.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include <stdlib.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const base::FilePath& name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ if (!size)
+ size = GetLength();
+
+ buffer_ = malloc(size);
+ snapshot_ = malloc(size);
+ if (buffer_ && snapshot_ && Read(buffer_, size, 0)) {
+ memcpy(snapshot_, buffer_, size);
+ } else {
+ free(buffer_);
+ free(snapshot_);
+ buffer_ = snapshot_ = 0;
+ }
+
+ init_ = true;
+ view_size_ = size;
+ return buffer_;
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+void MappedFile::Flush() {
+ DCHECK(buffer_);
+ DCHECK(snapshot_);
+ const char* buffer_ptr = static_cast<const char*>(buffer_);
+ char* snapshot_ptr = static_cast<char*>(snapshot_);
+ const size_t block_size = 4096;
+ for (size_t offset = 0; offset < view_size_; offset += block_size) {
+ size_t size = std::min(view_size_ - offset, block_size);
+ if (memcmp(snapshot_ptr + offset, buffer_ptr + offset, size)) {
+ memcpy(snapshot_ptr + offset, buffer_ptr + offset, size);
+ Write(snapshot_ptr + offset, size, offset);
+ }
+ }
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_ && snapshot_) {
+ Flush();
+ }
+ free(buffer_);
+ free(snapshot_);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mapped_file_posix.cc b/chromium/net/disk_cache/mapped_file_posix.cc
new file mode 100644
index 00000000000..2146245d4aa
--- /dev/null
+++ b/chromium/net/disk_cache/mapped_file_posix.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const base::FilePath& name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ size_t temp_len = size ? size : 4096;
+ if (!size)
+ size = GetLength();
+
+ buffer_ = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ platform_file(), 0);
+ init_ = true;
+ view_size_ = size;
+ DCHECK(reinterpret_cast<intptr_t>(buffer_) != -1);
+ if (reinterpret_cast<intptr_t>(buffer_) == -1)
+ buffer_ = 0;
+
+ // Make sure we detect hardware failures reading the headers.
+ scoped_ptr<char[]> temp(new char[temp_len]);
+ if (!Read(temp.get(), temp_len, 0))
+ return NULL;
+
+ return buffer_;
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+void MappedFile::Flush() {
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ int ret = munmap(buffer_, view_size_);
+ DCHECK_EQ(0, ret);
+ }
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mapped_file_unittest.cc b/chromium/net/disk_cache/mapped_file_unittest.cc
new file mode 100644
index 00000000000..8798db0170c
--- /dev/null
+++ b/chromium/net/disk_cache/mapped_file_unittest.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/files/file_path.h"
+#include "base/strings/string_util.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mapped_file.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Implementation of FileIOCallback for the tests.
+class FileCallbackTest: public disk_cache::FileIOCallback {
+ public:
+ FileCallbackTest(int id, MessageLoopHelper* helper, int* max_id)
+ : id_(id),
+ helper_(helper),
+ max_id_(max_id) {
+ }
+ virtual ~FileCallbackTest() {}
+
+ virtual void OnFileIOComplete(int bytes_copied) OVERRIDE;
+
+ private:
+ int id_;
+ MessageLoopHelper* helper_;
+ int* max_id_;
+};
+
+void FileCallbackTest::OnFileIOComplete(int bytes_copied) {
+ if (id_ > *max_id_) {
+ NOTREACHED();
+ helper_->set_callback_reused_error(true);
+ }
+
+ helper_->CallbackWasCalled();
+}
+
+} // namespace
+
+TEST_F(DiskCacheTest, MappedFile_SyncIO) {
+ base::FilePath filename = cache_path_.AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ char buffer1[20];
+ char buffer2[20];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ base::strlcpy(buffer1, "the data", arraysize(buffer1));
+ EXPECT_TRUE(file->Write(buffer1, sizeof(buffer1), 8192));
+ EXPECT_TRUE(file->Read(buffer2, sizeof(buffer2), 8192));
+ EXPECT_STREQ(buffer1, buffer2);
+}
+
+TEST_F(DiskCacheTest, MappedFile_AsyncIO) {
+ base::FilePath filename = cache_path_.AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ int max_id = 0;
+ MessageLoopHelper helper;
+ FileCallbackTest callback(1, &helper, &max_id);
+
+ char buffer1[20];
+ char buffer2[20];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ base::strlcpy(buffer1, "the data", arraysize(buffer1));
+ bool completed;
+ EXPECT_TRUE(file->Write(buffer1, sizeof(buffer1), 1024 * 1024, &callback,
+ &completed));
+ int expected = completed ? 0 : 1;
+
+ max_id = 1;
+ helper.WaitUntilCacheIoFinished(expected);
+
+ EXPECT_TRUE(file->Read(buffer2, sizeof(buffer2), 1024 * 1024, &callback,
+ &completed));
+ if (!completed)
+ expected++;
+
+ helper.WaitUntilCacheIoFinished(expected);
+
+ EXPECT_EQ(expected, helper.callbacks_called());
+ EXPECT_FALSE(helper.callback_reused_error());
+ EXPECT_STREQ(buffer1, buffer2);
+}
diff --git a/chromium/net/disk_cache/mapped_file_win.cc b/chromium/net/disk_cache/mapped_file_win.cc
new file mode 100644
index 00000000000..f17a1004a90
--- /dev/null
+++ b/chromium/net/disk_cache/mapped_file_win.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const base::FilePath& name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ buffer_ = NULL;
+ init_ = true;
+ section_ = CreateFileMapping(platform_file(), NULL, PAGE_READWRITE, 0,
+ static_cast<DWORD>(size), NULL);
+ if (!section_)
+ return NULL;
+
+ buffer_ = MapViewOfFile(section_, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, size);
+ DCHECK(buffer_);
+ view_size_ = size;
+
+ // Make sure we detect hardware failures reading the headers.
+ size_t temp_len = size ? size : 4096;
+ scoped_ptr<char[]> temp(new char[temp_len]);
+ if (!Read(temp.get(), temp_len, 0))
+ return NULL;
+
+ return buffer_;
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ BOOL ret = UnmapViewOfFile(buffer_);
+ DCHECK(ret);
+ }
+
+ if (section_)
+ CloseHandle(section_);
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+void MappedFile::Flush() {
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mem_backend_impl.cc b/chromium/net/disk_cache/mem_backend_impl.cc
new file mode 100644
index 00000000000..a6f1bf13bed
--- /dev/null
+++ b/chromium/net/disk_cache/mem_backend_impl.cc
@@ -0,0 +1,337 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mem_backend_impl.h"
+
+#include "base/logging.h"
+#include "base/sys_info.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/mem_entry_impl.h"
+
+using base::Time;
+
+namespace {
+
+const int kDefaultCacheSize = 10 * 1024 * 1024;
+const int kCleanUpMargin = 1024 * 1024;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
+ : max_size_(0), current_size_(0), net_log_(net_log) {}
+
+MemBackendImpl::~MemBackendImpl() {
+ EntryMap::iterator it = entries_.begin();
+ while (it != entries_.end()) {
+ it->second->Doom();
+ it = entries_.begin();
+ }
+ DCHECK(!current_size_);
+}
+
+// Static.
+scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
+ net::NetLog* net_log) {
+ scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
+ cache->SetMaxSize(max_bytes);
+ if (cache->Init())
+ return cache.PassAs<Backend>();
+
+ LOG(ERROR) << "Unable to create cache";
+ return scoped_ptr<Backend>();
+}
+
+bool MemBackendImpl::Init() {
+ if (max_size_)
+ return true;
+
+ int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
+
+ if (total_memory <= 0) {
+ max_size_ = kDefaultCacheSize;
+ return true;
+ }
+
+ // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
+ // reached on systemd with more than 2.5 GB of RAM.
+ total_memory = total_memory * 2 / 100;
+ if (total_memory > kDefaultCacheSize * 5)
+ max_size_ = kDefaultCacheSize * 5;
+ else
+ max_size_ = static_cast<int32>(total_memory);
+
+ return true;
+}
+
+bool MemBackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ max_size_ = max_bytes;
+ return true;
+}
+
+void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
+ // Only parent entries can be passed into this method.
+ DCHECK(entry->type() == MemEntryImpl::kParentEntry);
+
+ rankings_.Remove(entry);
+ EntryMap::iterator it = entries_.find(entry->GetKey());
+ if (it != entries_.end())
+ entries_.erase(it);
+ else
+ NOTREACHED();
+
+ entry->InternalDoom();
+}
+
+void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
+ rankings_.UpdateRank(node);
+}
+
+void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (old_size >= new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+}
+
+int MemBackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
+ rankings_.Insert(entry);
+}
+
+void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
+ rankings_.Remove(entry);
+}
+
+net::CacheType MemBackendImpl::GetCacheType() const {
+ return net::MEMORY_CACHE;
+}
+
+int32 MemBackendImpl::GetEntryCount() const {
+ return static_cast<int32>(entries_.size());
+}
+
+int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ if (OpenEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ if (CreateEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntry(const std::string& key,
+ const CompletionCallback& callback) {
+ if (DoomEntry(key))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
+ if (DoomAllEntries())
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ const CompletionCallback& callback) {
+ if (DoomEntriesBetween(initial_time, end_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
+ const CompletionCallback& callback) {
+ if (DoomEntriesSince(initial_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) {
+ if (OpenNextEntry(iter, next_entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+void MemBackendImpl::EndEnumeration(void** iter) {
+ *iter = NULL;
+}
+
+void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end()) {
+ UpdateRank(it->second);
+ }
+}
+
+bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it == entries_.end())
+ return false;
+
+ it->second->Open();
+
+ *entry = it->second;
+ return true;
+}
+
+bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end())
+ return false;
+
+ MemEntryImpl* cache_entry = new MemEntryImpl(this);
+ if (!cache_entry->CreateEntry(key, net_log_)) {
+ delete entry;
+ return false;
+ }
+
+ rankings_.Insert(cache_entry);
+ entries_[key] = cache_entry;
+
+ *entry = cache_entry;
+ return true;
+}
+
+bool MemBackendImpl::DoomEntry(const std::string& key) {
+ Entry* entry;
+ if (!OpenEntry(key, &entry))
+ return false;
+
+ entry->Doom();
+ entry->Close();
+ return true;
+}
+
+bool MemBackendImpl::DoomAllEntries() {
+ TrimCache(true);
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ MemEntryImpl* node = rankings_.GetNext(NULL);
+ // Last valid entry before |node|.
+ // Note, that entries after |node| may become invalid during |node| doom in
+ // case when they are child entries of it. It is guaranteed that
+ // parent node will go prior to it childs in ranking list (see
+ // InternalReadSparseData and InternalWriteSparseData).
+ MemEntryImpl* last_valid = NULL;
+
+ // rankings_ is ordered by last used, this will descend through the cache
+ // and start dooming items before the end_time, and will stop once it reaches
+ // an item used before the initial time.
+ while (node) {
+ if (node->GetLastUsed() < initial_time)
+ break;
+
+ if (node->GetLastUsed() < end_time)
+ node->Doom();
+ else
+ last_valid = node;
+ node = rankings_.GetNext(last_valid);
+ }
+
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
+ for (;;) {
+ // Get the entry in the front.
+ Entry* entry = rankings_.GetNext(NULL);
+
+ // Break the loop when there are no more entries or the entry is too old.
+ if (!entry || entry->GetLastUsed() < initial_time)
+ return true;
+ entry->Doom();
+ }
+}
+
+bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
+ MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter);
+ MemEntryImpl* node = rankings_.GetNext(current);
+ // We should never return a child entry so iterate until we hit a parent
+ // entry.
+ while (node && node->type() != MemEntryImpl::kParentEntry) {
+ node = rankings_.GetNext(node);
+ }
+ *next_entry = node;
+ *iter = node;
+
+ if (node)
+ node->Open();
+
+ return NULL != node;
+}
+
+void MemBackendImpl::TrimCache(bool empty) {
+ MemEntryImpl* next = rankings_.GetPrev(NULL);
+ if (!next)
+ return;
+
+ int target_size = empty ? 0 : LowWaterAdjust(max_size_);
+ while (current_size_ > target_size && next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetPrev(next);
+ if (!node->InUse() || empty) {
+ node->Doom();
+ }
+ }
+
+ return;
+}
+
+void MemBackendImpl::AddStorageSize(int32 bytes) {
+ current_size_ += bytes;
+ DCHECK_GE(current_size_, 0);
+
+ if (current_size_ > max_size_)
+ TrimCache(false);
+}
+
+void MemBackendImpl::SubstractStorageSize(int32 bytes) {
+ current_size_ -= bytes;
+ DCHECK_GE(current_size_, 0);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mem_backend_impl.h b/chromium/net/disk_cache/mem_backend_impl.h
new file mode 100644
index 00000000000..8da39cc7f26
--- /dev/null
+++ b/chromium/net/disk_cache/mem_backend_impl.h
@@ -0,0 +1,120 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
+#define NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
+
+#include "base/compiler_specific.h"
+#include "base/containers/hash_tables.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/mem_rankings.h"
+
+namespace net {
+class NetLog;
+} // namespace net
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class implements the Backend interface. An object of this class handles
+// the operations of the cache without writing to disk.
+class NET_EXPORT_PRIVATE MemBackendImpl : public Backend {
+ public:
+ explicit MemBackendImpl(net::NetLog* net_log);
+ virtual ~MemBackendImpl();
+
+ // Returns an instance of a Backend implemented only in memory. The returned
+ // object should be deleted when not needed anymore. max_bytes is the maximum
+ // size the cache can grow to. If zero is passed in as max_bytes, the cache
+ // will determine the value to use based on the available memory. The returned
+ // pointer can be NULL if a fatal error is found.
+ static scoped_ptr<Backend> CreateBackend(int max_bytes, net::NetLog* net_log);
+
+ // Performs general initialization for this current instance of the cache.
+ bool Init();
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Permanently deletes an entry.
+ void InternalDoomEntry(MemEntryImpl* entry);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(MemEntryImpl* node);
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // Insert an MemEntryImpl into the ranking list. This method is only called
+ // from MemEntryImpl to insert child entries. The reference can be removed
+ // by calling RemoveFromRankingList(|entry|).
+ void InsertIntoRankingList(MemEntryImpl* entry);
+
+ // Remove |entry| from ranking list. This method is only called from
+ // MemEntryImpl to remove a child entry from the ranking list.
+ void RemoveFromRankingList(MemEntryImpl* entry);
+
+ // Backend interface.
+ virtual net::CacheType GetCacheType() const OVERRIDE;
+ virtual int32 GetEntryCount() const OVERRIDE;
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual void EndEnumeration(void** iter) OVERRIDE;
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) OVERRIDE {}
+ virtual void OnExternalCacheHit(const std::string& key) OVERRIDE;
+
+ private:
+ typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
+
+ // Old Backend interface.
+ bool OpenEntry(const std::string& key, Entry** entry);
+ bool CreateEntry(const std::string& key, Entry** entry);
+ bool DoomEntry(const std::string& key);
+ bool DoomAllEntries();
+ bool DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ bool DoomEntriesSince(const base::Time initial_time);
+ bool OpenNextEntry(void** iter, Entry** next_entry);
+
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ EntryMap entries_;
+ MemRankings rankings_; // Rankings to be able to trim the cache.
+ int32 max_size_; // Maximum data size for this instance.
+ int32 current_size_;
+
+ net::NetLog* net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemBackendImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
diff --git a/chromium/net/disk_cache/mem_entry_impl.cc b/chromium/net/disk_cache/mem_entry_impl.cc
new file mode 100644
index 00000000000..7d0095898b4
--- /dev/null
+++ b/chromium/net/disk_cache/mem_entry_impl.cc
@@ -0,0 +1,631 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mem_entry_impl.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/mem_backend_impl.h"
+#include "net/disk_cache/net_log_parameters.h"
+
+using base::Time;
+
+namespace {
+
+const int kSparseData = 1;
+
+// Maximum size of a sparse entry is 2 to the power of this number.
+const int kMaxSparseEntryBits = 12;
+
+// Sparse entry has maximum size of 4KB.
+const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits;
+
+// Convert global offset to child index.
+inline int ToChildIndex(int64 offset) {
+ return static_cast<int>(offset >> kMaxSparseEntryBits);
+}
+
+// Convert global offset to offset in child entry.
+inline int ToChildOffset(int64 offset) {
+ return static_cast<int>(offset & (kMaxSparseEntrySize - 1));
+}
+
+// Returns a name for a child entry given the base_name of the parent and the
+// child_id. This name is only used for logging purposes.
+// If the entry is called entry_name, child entries will be named something
+// like Range_entry_name:YYY where YYY is the number of the particular child.
+std::string GenerateChildName(const std::string& base_name, int child_id) {
+ return base::StringPrintf("Range_%s:%i", base_name.c_str(), child_id);
+}
+
+// Returns NetLog parameters for the creation of a child MemEntryImpl. Separate
+// function needed because child entries don't suppport GetKey().
+base::Value* NetLogChildEntryCreationCallback(
+ const disk_cache::MemEntryImpl* parent,
+ int child_id,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ dict->SetString("key", GenerateChildName(parent->GetKey(), child_id));
+ dict->SetBoolean("created", true);
+ return dict;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
+ doomed_ = false;
+ backend_ = backend;
+ ref_count_ = 0;
+ parent_ = NULL;
+ child_id_ = 0;
+ child_first_pos_ = 0;
+ next_ = NULL;
+ prev_ = NULL;
+ for (int i = 0; i < NUM_STREAMS; i++)
+ data_size_[i] = 0;
+}
+
+// ------------------------------------------------------------------------
+
+bool MemEntryImpl::CreateEntry(const std::string& key, net::NetLog* net_log) {
+ key_ = key;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+
+ net_log_ = net::BoundNetLog::Make(net_log,
+ net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
+ // Must be called after |key_| is set, so GetKey() works.
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
+ CreateNetLogEntryCreationCallback(this, true));
+
+ Open();
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ return true;
+}
+
+void MemEntryImpl::InternalDoom() {
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ doomed_ = true;
+ if (!ref_count_) {
+ if (type() == kParentEntry) {
+ // If this is a parent entry, we need to doom all the child entries.
+ if (children_.get()) {
+ EntryMap children;
+ children.swap(*children_);
+ for (EntryMap::iterator i = children.begin();
+ i != children.end(); ++i) {
+ // Since a pointer to this object is also saved in the map, avoid
+ // dooming it.
+ if (i->second != this)
+ i->second->Doom();
+ }
+ DCHECK(children_->empty());
+ }
+ } else {
+ // If this is a child entry, detach it from the parent.
+ parent_->DetachChild(child_id_);
+ }
+ delete this;
+ }
+}
+
+void MemEntryImpl::Open() {
+ // Only a parent entry can be opened.
+ // TODO(hclam): make sure it's correct to not apply the concept of ref
+ // counting to child entry.
+ DCHECK(type() == kParentEntry);
+ ref_count_++;
+ DCHECK_GE(ref_count_, 0);
+ DCHECK(!doomed_);
+}
+
+bool MemEntryImpl::InUse() {
+ if (type() == kParentEntry) {
+ return ref_count_ > 0;
+ } else {
+ // A child entry is always not in use. The consequence is that a child entry
+ // can always be evicted while the associated parent entry is currently in
+ // used (i.e. opened).
+ return false;
+ }
+}
+
+// ------------------------------------------------------------------------
+
+void MemEntryImpl::Doom() {
+ if (doomed_)
+ return;
+ if (type() == kParentEntry) {
+ // Perform internal doom from the backend if this is a parent entry.
+ backend_->InternalDoomEntry(this);
+ } else {
+ // Manually detach from the backend and perform internal doom.
+ backend_->RemoveFromRankingList(this);
+ InternalDoom();
+ }
+}
+
+void MemEntryImpl::Close() {
+ // Only a parent entry can be closed.
+ DCHECK(type() == kParentEntry);
+ ref_count_--;
+ DCHECK_GE(ref_count_, 0);
+ if (!ref_count_ && doomed_)
+ InternalDoom();
+}
+
+std::string MemEntryImpl::GetKey() const {
+ // A child entry doesn't have key so this method should not be called.
+ DCHECK(type() == kParentEntry);
+ return key_;
+}
+
+Time MemEntryImpl::GetLastUsed() const {
+ return last_used_;
+}
+
+Time MemEntryImpl::GetLastModified() const {
+ return last_modified_;
+}
+
+int32 MemEntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index >= NUM_STREAMS)
+ return 0;
+ return data_size_[index];
+}
+
+int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
+ }
+
+ int result = InternalReadData(index, offset, buf, buf_len);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
+ }
+
+ int result = InternalWriteData(index, offset, buf, buf_len, truncate);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int MemEntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_READ,
+ CreateNetLogSparseOperationCallback(offset, buf_len));
+ }
+ int result = InternalReadSparseData(offset, buf, buf_len);
+ if (net_log_.IsLoggingAllEvents())
+ net_log_.EndEvent(net::NetLog::TYPE_SPARSE_READ);
+ return result;
+}
+
+int MemEntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE,
+ CreateNetLogSparseOperationCallback(offset, buf_len));
+ }
+ int result = InternalWriteSparseData(offset, buf, buf_len);
+ if (net_log_.IsLoggingAllEvents())
+ net_log_.EndEvent(net::NetLog::TYPE_SPARSE_WRITE);
+ return result;
+}
+
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_GET_RANGE,
+ CreateNetLogSparseOperationCallback(offset, len));
+ }
+ int result = GetAvailableRange(offset, len, start);
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_SPARSE_GET_RANGE,
+ CreateNetLogGetAvailableRangeResultCallback(*start, result));
+ }
+ return result;
+}
+
+bool MemEntryImpl::CouldBeSparse() const {
+ DCHECK_EQ(kParentEntry, type());
+ return (children_.get() != NULL);
+}
+
+int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
+ return net::OK;
+}
+
+// ------------------------------------------------------------------------
+
+MemEntryImpl::~MemEntryImpl() {
+ for (int i = 0; i < NUM_STREAMS; i++)
+ backend_->ModifyStorageSize(data_size_[i], 0);
+ backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
+ net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL);
+}
+
+int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
+ int buf_len) {
+ DCHECK(type() == kParentEntry || index == kSparseData);
+
+ if (index < 0 || index >= NUM_STREAMS)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = GetDataSize(index);
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ memcpy(buf->data(), &(data_[index])[offset], buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
+ int buf_len, bool truncate) {
+ DCHECK(type() == kParentEntry || index == kSparseData);
+
+ if (index < 0 || index >= NUM_STREAMS)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ return net::ERR_FAILED;
+ }
+
+ // Read the size at this point.
+ int entry_size = GetDataSize(index);
+
+ PrepareTarget(index, offset, buf_len);
+
+ if (entry_size < offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ } else if (truncate) {
+ if (entry_size > offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ }
+ }
+
+ UpdateRank(true);
+
+ if (!buf_len)
+ return 0;
+
+ memcpy(&(data_[index])[offset], buf->data(), buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::InternalReadSparseData(int64 offset, IOBuffer* buf,
+ int buf_len) {
+ DCHECK(type() == kParentEntry);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ // We will keep using this buffer and adjust the offset in this buffer.
+ scoped_refptr<net::DrainableIOBuffer> io_buf(
+ new net::DrainableIOBuffer(buf, buf_len));
+
+ // Iterate until we have read enough.
+ while (io_buf->BytesRemaining()) {
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false);
+
+ // No child present for that offset.
+ if (!child)
+ break;
+
+ // We then need to prepare the child offset and len.
+ int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
+
+ // If we are trying to read from a position that the child entry has no data
+ // we should stop.
+ if (child_offset < child->child_first_pos_)
+ break;
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child->net_log().source(),
+ io_buf->BytesRemaining()));
+ }
+ int ret = child->ReadData(kSparseData, child_offset, io_buf.get(),
+ io_buf->BytesRemaining(), CompletionCallback());
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEventWithNetErrorCode(
+ net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, ret);
+ }
+
+ // If we encounter an error in one entry, return immediately.
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ break;
+
+ // Increment the counter by number of bytes read in the child entry.
+ io_buf->DidConsume(ret);
+ }
+
+ UpdateRank(false);
+
+ return io_buf->BytesConsumed();
+}
+
+int MemEntryImpl::InternalWriteSparseData(int64 offset, IOBuffer* buf,
+ int buf_len) {
+ DCHECK(type() == kParentEntry);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ scoped_refptr<net::DrainableIOBuffer> io_buf(
+ new net::DrainableIOBuffer(buf, buf_len));
+
+ // This loop walks through child entries continuously starting from |offset|
+ // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each
+ // child entry until all |buf_len| bytes are written. The write operation can
+ // start in the middle of an entry.
+ while (io_buf->BytesRemaining()) {
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true);
+ int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
+
+ // Find the right amount to write, this evaluates the remaining bytes to
+ // write and remaining capacity of this child entry.
+ int write_len = std::min(static_cast<int>(io_buf->BytesRemaining()),
+ kMaxSparseEntrySize - child_offset);
+
+ // Keep a record of the last byte position (exclusive) in the child.
+ int data_size = child->GetDataSize(kSparseData);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child->net_log().source(),
+ write_len));
+ }
+
+ // Always writes to the child entry. This operation may overwrite data
+ // previously written.
+ // TODO(hclam): if there is data in the entry and this write is not
+ // continuous we may want to discard this write.
+ int ret = child->WriteData(kSparseData, child_offset, io_buf.get(),
+ write_len, CompletionCallback(), true);
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEventWithNetErrorCode(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, ret);
+ }
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ break;
+
+ // Keep a record of the first byte position in the child if the write was
+ // not aligned nor continuous. This is to enable witting to the middle
+ // of an entry and still keep track of data off the aligned edge.
+ if (data_size != child_offset)
+ child->child_first_pos_ = child_offset;
+
+ // Adjust the offset in the IO buffer.
+ io_buf->DidConsume(ret);
+ }
+
+ UpdateRank(true);
+
+ return io_buf->BytesConsumed();
+}
+
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
+ DCHECK(type() == kParentEntry);
+ DCHECK(start);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || len < 0 || !start)
+ return net::ERR_INVALID_ARGUMENT;
+
+ MemEntryImpl* current_child = NULL;
+
+ // Find the first child and record the number of empty bytes.
+ int empty = FindNextChild(offset, len, &current_child);
+ if (current_child) {
+ *start = offset + empty;
+ len -= empty;
+
+ // Counts the number of continuous bytes.
+ int continuous = 0;
+
+ // This loop scan for continuous bytes.
+ while (len && current_child) {
+ // Number of bytes available in this child.
+ int data_size = current_child->GetDataSize(kSparseData) -
+ ToChildOffset(*start + continuous);
+ if (data_size > len)
+ data_size = len;
+
+ // We have found more continuous bytes so increment the count. Also
+ // decrement the length we should scan.
+ continuous += data_size;
+ len -= data_size;
+
+ // If the next child is discontinuous, break the loop.
+ if (FindNextChild(*start + continuous, len, &current_child))
+ break;
+ }
+ return continuous;
+ }
+ *start = offset;
+ return 0;
+}
+
+void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
+ int entry_size = GetDataSize(index);
+
+ if (entry_size >= offset + buf_len)
+ return; // Not growing the stored data.
+
+ if (static_cast<int>(data_[index].size()) < offset + buf_len)
+ data_[index].resize(offset + buf_len);
+
+ if (offset <= entry_size)
+ return; // There is no "hole" on the stored data.
+
+ // Cleanup the hole not written by the user. The point is to avoid returning
+ // random stuff later on.
+ memset(&(data_[index])[entry_size], 0, offset - entry_size);
+}
+
+void MemEntryImpl::UpdateRank(bool modified) {
+ Time current = Time::Now();
+ last_used_ = current;
+
+ if (modified)
+ last_modified_ = current;
+
+ if (!doomed_)
+ backend_->UpdateRank(this);
+}
+
+bool MemEntryImpl::InitSparseInfo() {
+ DCHECK(type() == kParentEntry);
+
+ if (!children_.get()) {
+ // If we already have some data in sparse stream but we are being
+ // initialized as a sparse entry, we should fail.
+ if (GetDataSize(kSparseData))
+ return false;
+ children_.reset(new EntryMap());
+
+ // The parent entry stores data for the first block, so save this object to
+ // index 0.
+ (*children_)[0] = this;
+ }
+ return true;
+}
+
+bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id,
+ net::NetLog* net_log) {
+ DCHECK(!parent_);
+ DCHECK(!child_id_);
+
+ net_log_ = net::BoundNetLog::Make(net_log,
+ net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
+ base::Bind(&NetLogChildEntryCreationCallback, parent, child_id_));
+
+ parent_ = parent;
+ child_id_ = child_id;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+ // Insert this to the backend's ranking list.
+ backend_->InsertIntoRankingList(this);
+ return true;
+}
+
+MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) {
+ DCHECK(type() == kParentEntry);
+ int index = ToChildIndex(offset);
+ EntryMap::iterator i = children_->find(index);
+ if (i != children_->end()) {
+ return i->second;
+ } else if (create) {
+ MemEntryImpl* child = new MemEntryImpl(backend_);
+ child->InitChildEntry(this, index, net_log_.net_log());
+ (*children_)[index] = child;
+ return child;
+ }
+ return NULL;
+}
+
+int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) {
+ DCHECK(child);
+ *child = NULL;
+ int scanned_len = 0;
+
+ // This loop tries to find the first existing child.
+ while (scanned_len < len) {
+ // This points to the current offset in the child.
+ int current_child_offset = ToChildOffset(offset + scanned_len);
+ MemEntryImpl* current_child = OpenChild(offset + scanned_len, false);
+ if (current_child) {
+ int child_first_pos = current_child->child_first_pos_;
+
+ // This points to the first byte that we should be reading from, we need
+ // to take care of the filled region and the current offset in the child.
+ int first_pos = std::max(current_child_offset, child_first_pos);
+
+ // If the first byte position we should read from doesn't exceed the
+ // filled region, we have found the first child.
+ if (first_pos < current_child->GetDataSize(kSparseData)) {
+ *child = current_child;
+
+ // We need to advance the scanned length.
+ scanned_len += first_pos - current_child_offset;
+ break;
+ }
+ }
+ scanned_len += kMaxSparseEntrySize - current_child_offset;
+ }
+ return scanned_len;
+}
+
+void MemEntryImpl::DetachChild(int child_id) {
+ children_->erase(child_id);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mem_entry_impl.h b/chromium/net/disk_cache/mem_entry_impl.h
new file mode 100644
index 00000000000..ef91f6d7b0c
--- /dev/null
+++ b/chromium/net/disk_cache/mem_entry_impl.h
@@ -0,0 +1,189 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_MEM_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_MEM_ENTRY_IMPL_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_log.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+class MemBackendImpl;
+
+// This class implements the Entry interface for the memory-only cache. An
+// object of this class represents a single entry on the cache. We use two
+// types of entries, parent and child to support sparse caching.
+//
+// A parent entry is non-sparse until a sparse method is invoked (i.e.
+// ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information
+// is initialized. It then manages a list of child entries and delegates the
+// sparse API calls to the child entries. It creates and deletes child entries
+// and updates the list when needed.
+//
+// A child entry is used to carry partial cache content, non-sparse methods like
+// ReadData and WriteData cannot be applied to them. The lifetime of a child
+// entry is managed by the parent entry that created it except that the entry
+// can be evicted independently. A child entry does not have a key and it is not
+// registered in the backend's entry map. It is registered in the backend's
+// ranking list to enable eviction of a partial content.
+//
+// A sparse entry has a fixed maximum size and can be partially filled. There
+// can only be one continous filled region in a sparse entry, as illustrated by
+// the following example:
+// | xxx ooooo |
+// x = unfilled region
+// o = filled region
+// It is guranteed that there is at most one unfilled region and one filled
+// region, and the unfilled region (if there is one) is always before the filled
+// region. The book keeping for filled region in a sparse entry is done by using
+// the variable |child_first_pos_| (inclusive).
+
+class MemEntryImpl : public Entry {
+ public:
+ enum EntryType {
+ kParentEntry,
+ kChildEntry,
+ };
+
+ explicit MemEntryImpl(MemBackendImpl* backend);
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(const std::string& key, net::NetLog* net_log);
+
+ // Permanently destroys this entry.
+ void InternalDoom();
+
+ void Open();
+ bool InUse();
+
+ MemEntryImpl* next() const {
+ return next_;
+ }
+
+ MemEntryImpl* prev() const {
+ return prev_;
+ }
+
+ void set_next(MemEntryImpl* next) {
+ next_ = next;
+ }
+
+ void set_prev(MemEntryImpl* prev) {
+ prev_ = prev;
+ }
+
+ EntryType type() const {
+ return parent_ ? kChildEntry : kParentEntry;
+ }
+
+ std::string& key() {
+ return key_;
+ }
+
+ net::BoundNetLog& net_log() {
+ return net_log_;
+ }
+
+ // Entry interface.
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE {}
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ typedef base::hash_map<int, MemEntryImpl*> EntryMap;
+
+ enum {
+ NUM_STREAMS = 3
+ };
+
+ virtual ~MemEntryImpl();
+
+ // Do all the work for corresponding public functions. Implemented as
+ // separate functions to make logging of results simpler.
+ int InternalReadData(int index, int offset, IOBuffer* buf, int buf_len);
+ int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ bool truncate);
+ int InternalReadSparseData(int64 offset, IOBuffer* buf, int buf_len);
+ int InternalWriteSparseData(int64 offset, IOBuffer* buf, int buf_len);
+
+ // Old Entry interface.
+ int GetAvailableRange(int64 offset, int len, int64* start);
+
+ // Grows and cleans up the data buffer.
+ void PrepareTarget(int index, int offset, int buf_len);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Initializes the children map and sparse info. This method is only called
+ // on a parent entry.
+ bool InitSparseInfo();
+
+ // Performs the initialization of a MemEntryImpl as a child entry.
+ // |parent| is the pointer to the parent entry. |child_id| is the ID of
+ // the new child.
+ bool InitChildEntry(MemEntryImpl* parent, int child_id, net::NetLog* net_log);
+
+ // Returns an entry responsible for |offset|. The returned entry can be a
+ // child entry or this entry itself if |offset| points to the first range.
+ // If such entry does not exist and |create| is true, a new child entry is
+ // created.
+ MemEntryImpl* OpenChild(int64 offset, bool create);
+
+ // Finds the first child located within the range [|offset|, |offset + len|).
+ // Returns the number of bytes ahead of |offset| to reach the first available
+ // bytes in the entry. The first child found is output to |child|.
+ int FindNextChild(int64 offset, int len, MemEntryImpl** child);
+
+ // Removes child indexed by |child_id| from the children map.
+ void DetachChild(int child_id);
+
+ std::string key_;
+ std::vector<char> data_[NUM_STREAMS]; // User data.
+ int32 data_size_[NUM_STREAMS];
+ int ref_count_;
+
+ int child_id_; // The ID of a child entry.
+ int child_first_pos_; // The position of the first byte in a child
+ // entry.
+ MemEntryImpl* next_; // Pointers for the LRU list.
+ MemEntryImpl* prev_;
+ MemEntryImpl* parent_; // Pointer to the parent entry.
+ scoped_ptr<EntryMap> children_;
+
+ base::Time last_modified_; // LRU information.
+ base::Time last_used_;
+ MemBackendImpl* backend_; // Back pointer to the cache.
+ bool doomed_; // True if this entry was removed from the cache.
+
+ net::BoundNetLog net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemEntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_ENTRY_IMPL_H_
diff --git a/chromium/net/disk_cache/mem_rankings.cc b/chromium/net/disk_cache/mem_rankings.cc
new file mode 100644
index 00000000000..d5f4a6536a7
--- /dev/null
+++ b/chromium/net/disk_cache/mem_rankings.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mem_rankings.h"
+
+#include "base/logging.h"
+#include "net/disk_cache/mem_entry_impl.h"
+
+namespace disk_cache {
+
+MemRankings::~MemRankings() {
+ DCHECK(!head_ && !tail_);
+}
+
+void MemRankings::Insert(MemEntryImpl* node) {
+ if (head_)
+ head_->set_prev(node);
+
+ if (!tail_)
+ tail_ = node;
+
+ node->set_prev(NULL);
+ node->set_next(head_);
+ head_ = node;
+}
+
+void MemRankings::Remove(MemEntryImpl* node) {
+ MemEntryImpl* prev = node->prev();
+ MemEntryImpl* next = node->next();
+
+ if (head_ == node)
+ head_ = next;
+
+ if (tail_ == node)
+ tail_ = prev;
+
+ if (prev)
+ prev->set_next(next);
+
+ if (next)
+ next->set_prev(prev);
+
+ node->set_next(NULL);
+ node->set_prev(NULL);
+}
+
+void MemRankings::UpdateRank(MemEntryImpl* node) {
+ Remove(node);
+ Insert(node);
+}
+
+MemEntryImpl* MemRankings::GetNext(MemEntryImpl* node) {
+ if (!node)
+ return head_;
+
+ return node->next();
+}
+
+MemEntryImpl* MemRankings::GetPrev(MemEntryImpl* node) {
+ if (!node)
+ return tail_;
+
+ return node->prev();
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/mem_rankings.h b/chromium/net/disk_cache/mem_rankings.h
new file mode 100644
index 00000000000..fa906888639
--- /dev/null
+++ b/chromium/net/disk_cache/mem_rankings.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_MEM_RANKINGS_H__
+#define NET_DISK_CACHE_MEM_RANKINGS_H__
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class handles the ranking information for the memory-only cache.
+class MemRankings {
+ public:
+ MemRankings() : head_(NULL), tail_(NULL) {}
+ ~MemRankings();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(MemEntryImpl* node);
+
+ // Removes a given entry from the LRU list.
+ void Remove(MemEntryImpl* node);
+
+ // Moves a given entry to the head.
+ void UpdateRank(MemEntryImpl* node);
+
+ // Iterates through the list.
+ MemEntryImpl* GetNext(MemEntryImpl* node);
+ MemEntryImpl* GetPrev(MemEntryImpl* node);
+
+ private:
+ MemEntryImpl* head_;
+ MemEntryImpl* tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemRankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_RANKINGS_H__
diff --git a/chromium/net/disk_cache/net_log_parameters.cc b/chromium/net/disk_cache/net_log_parameters.cc
new file mode 100644
index 00000000000..5d7e50f5595
--- /dev/null
+++ b/chromium/net/disk_cache/net_log_parameters.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/net_log_parameters.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/values.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace {
+
+base::Value* NetLogEntryCreationCallback(
+ const disk_cache::Entry* entry,
+ bool created,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ dict->SetString("key", entry->GetKey());
+ dict->SetBoolean("created", created);
+ return dict;
+}
+
+base::Value* NetLogReadWriteDataCallback(
+ int index,
+ int offset,
+ int buf_len,
+ bool truncate,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ dict->SetInteger("index", index);
+ dict->SetInteger("offset", offset);
+ dict->SetInteger("buf_len", buf_len);
+ if (truncate)
+ dict->SetBoolean("truncate", truncate);
+ return dict;
+}
+
+base::Value* NetLogReadWriteCompleteCallback(
+ int bytes_copied,
+ net::NetLog::LogLevel /* log_level */) {
+ DCHECK_NE(bytes_copied, net::ERR_IO_PENDING);
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ if (bytes_copied < 0) {
+ dict->SetInteger("net_error", bytes_copied);
+ } else {
+ dict->SetInteger("bytes_copied", bytes_copied);
+ }
+ return dict;
+}
+
+base::Value* NetLogSparseOperationCallback(
+ int64 offset,
+ int buff_len,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ // Values can only be created with at most 32-bit integers. Using a string
+ // instead circumvents that restriction.
+ dict->SetString("offset", base::Int64ToString(offset));
+ dict->SetInteger("buff_len", buff_len);
+ return dict;
+}
+
+base::Value* NetLogSparseReadWriteCallback(
+ const net::NetLog::Source& source,
+ int child_len,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ source.AddToEventParameters(dict);
+ dict->SetInteger("child_len", child_len);
+ return dict;
+}
+
+base::Value* NetLogGetAvailableRangeResultCallback(
+ int64 start,
+ int result,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ if (result > 0) {
+ dict->SetInteger("length", result);
+ dict->SetString("start", base::Int64ToString(start));
+ } else {
+ dict->SetInteger("net_error", result);
+ }
+ return dict;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+net::NetLog::ParametersCallback CreateNetLogEntryCreationCallback(
+ const Entry* entry,
+ bool created) {
+ DCHECK(entry);
+ return base::Bind(&NetLogEntryCreationCallback, entry, created);
+}
+
+net::NetLog::ParametersCallback CreateNetLogReadWriteDataCallback(
+ int index,
+ int offset,
+ int buf_len,
+ bool truncate) {
+ return base::Bind(&NetLogReadWriteDataCallback,
+ index, offset, buf_len, truncate);
+}
+
+net::NetLog::ParametersCallback CreateNetLogReadWriteCompleteCallback(
+ int bytes_copied) {
+ return base::Bind(&NetLogReadWriteCompleteCallback, bytes_copied);
+}
+
+net::NetLog::ParametersCallback CreateNetLogSparseOperationCallback(
+ int64 offset,
+ int buff_len) {
+ return base::Bind(&NetLogSparseOperationCallback, offset, buff_len);
+}
+
+net::NetLog::ParametersCallback CreateNetLogSparseReadWriteCallback(
+ const net::NetLog::Source& source,
+ int child_len) {
+ return base::Bind(&NetLogSparseReadWriteCallback, source, child_len);
+}
+
+net::NetLog::ParametersCallback CreateNetLogGetAvailableRangeResultCallback(
+ int64 start,
+ int result) {
+ return base::Bind(&NetLogGetAvailableRangeResultCallback, start, result);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/net_log_parameters.h b/chromium/net/disk_cache/net_log_parameters.h
new file mode 100644
index 00000000000..3598cda7b12
--- /dev/null
+++ b/chromium/net/disk_cache/net_log_parameters.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_NET_LOG_PARAMETERS_H_
+#define NET_DISK_CACHE_NET_LOG_PARAMETERS_H_
+
+#include <string>
+
+#include "net/base/net_log.h"
+
+// This file contains a set of functions to create NetLog::ParametersCallbacks
+// shared by EntryImpls and MemEntryImpls.
+namespace disk_cache {
+
+class Entry;
+
+// Creates a NetLog callback that returns parameters for the creation of an
+// Entry. Contains the Entry's key and whether it was created or opened.
+// |entry| can't be NULL, must support GetKey(), and must outlive the returned
+// callback.
+net::NetLog::ParametersCallback CreateNetLogEntryCreationCallback(
+ const Entry* entry,
+ bool created);
+
+// Creates a NetLog callback that returns parameters for start of a non-sparse
+// read or write of an Entry. For reads, |truncate| must be false.
+net::NetLog::ParametersCallback CreateNetLogReadWriteDataCallback(
+ int index,
+ int offset,
+ int buf_len,
+ bool truncate);
+
+// Creates a NetLog callback that returns parameters for when a non-sparse
+// read or write completes. For reads, |truncate| must be false.
+// |bytes_copied| is either the number of bytes copied or a network error
+// code. |bytes_copied| must not be ERR_IO_PENDING, as it's not a valid
+// result for an operation.
+net::NetLog::ParametersCallback CreateNetLogReadWriteCompleteCallback(
+ int bytes_copied);
+
+// Creates a NetLog callback that returns parameters for when a sparse
+// operation is started.
+net::NetLog::ParametersCallback CreateNetLogSparseOperationCallback(
+ int64 offset,
+ int buff_len);
+
+// Creates a NetLog callback that returns parameters for when a read or write
+// for a sparse entry's child is started.
+net::NetLog::ParametersCallback CreateNetLogSparseReadWriteCallback(
+ const net::NetLog::Source& source,
+ int child_len);
+
+// Creates a NetLog callback that returns parameters for when a call to
+// GetAvailableRange returns.
+net::NetLog::ParametersCallback CreateNetLogGetAvailableRangeResultCallback(
+ int64 start,
+ int result);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_NET_LOG_CACHE_PARAMETERS_H_
diff --git a/chromium/net/disk_cache/rankings.cc b/chromium/net/disk_cache/rankings.cc
new file mode 100644
index 00000000000..ff9913e252a
--- /dev/null
+++ b/chromium/net/disk_cache/rankings.cc
@@ -0,0 +1,922 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/rankings.h"
+
+#include "base/metrics/histogram.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/stress_support.h"
+
+using base::Time;
+using base::TimeTicks;
+
+namespace disk_cache {
+// This is used by crash_cache.exe to generate unit test files.
+NET_EXPORT_PRIVATE RankCrashes g_rankings_crash = NO_CRASH;
+}
+
+namespace {
+
+enum Operation {
+ INSERT = 1,
+ REMOVE
+};
+
+// This class provides a simple lock for the LRU list of rankings. Whenever an
+// entry is to be inserted or removed from the list, a transaction object should
+// be created to keep track of the operation. If the process crashes before
+// finishing the operation, the transaction record (stored as part of the user
+// data on the file header) can be used to finish the operation.
+class Transaction {
+ public:
+ // addr is the cache addres of the node being inserted or removed. We want to
+ // avoid having the compiler doing optimizations on when to read or write
+ // from user_data because it is the basis of the crash detection. Maybe
+ // volatile is not enough for that, but it should be a good hint.
+ Transaction(volatile disk_cache::LruData* data, disk_cache::Addr addr,
+ Operation op, int list);
+ ~Transaction();
+ private:
+ volatile disk_cache::LruData* data_;
+ DISALLOW_COPY_AND_ASSIGN(Transaction);
+};
+
+Transaction::Transaction(volatile disk_cache::LruData* data,
+ disk_cache::Addr addr, Operation op, int list)
+ : data_(data) {
+ DCHECK(!data_->transaction);
+ DCHECK(addr.is_initialized());
+ data_->operation = op;
+ data_->operation_list = list;
+ data_->transaction = addr.value();
+}
+
+Transaction::~Transaction() {
+ DCHECK(data_->transaction);
+ data_->transaction = 0;
+ data_->operation = 0;
+ data_->operation_list = 0;
+}
+
+// Code locations that can generate crashes.
+enum CrashLocation {
+ ON_INSERT_1, ON_INSERT_2, ON_INSERT_3, ON_INSERT_4, ON_REMOVE_1, ON_REMOVE_2,
+ ON_REMOVE_3, ON_REMOVE_4, ON_REMOVE_5, ON_REMOVE_6, ON_REMOVE_7, ON_REMOVE_8
+};
+
+#ifndef NDEBUG
+void TerminateSelf() {
+#if defined(OS_WIN)
+ // Windows does more work on _exit() than we would like, so we force exit.
+ TerminateProcess(GetCurrentProcess(), 0);
+#elif defined(OS_POSIX)
+ // On POSIX, _exit() will terminate the process with minimal cleanup,
+ // and it is cleaner than killing.
+ _exit(0);
+#endif
+}
+#endif // NDEBUG
+
+// Generates a crash on debug builds, acording to the value of g_rankings_crash.
+// This used by crash_cache.exe to generate unit-test files.
+void GenerateCrash(CrashLocation location) {
+#ifndef NDEBUG
+ if (disk_cache::NO_CRASH == disk_cache::g_rankings_crash)
+ return;
+ switch (location) {
+ case ON_INSERT_1:
+ switch (disk_cache::g_rankings_crash) {
+ case disk_cache::INSERT_ONE_1:
+ case disk_cache::INSERT_LOAD_1:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_INSERT_2:
+ if (disk_cache::INSERT_EMPTY_1 == disk_cache::g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_INSERT_3:
+ switch (disk_cache::g_rankings_crash) {
+ case disk_cache::INSERT_EMPTY_2:
+ case disk_cache::INSERT_ONE_2:
+ case disk_cache::INSERT_LOAD_2:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_INSERT_4:
+ switch (disk_cache::g_rankings_crash) {
+ case disk_cache::INSERT_EMPTY_3:
+ case disk_cache::INSERT_ONE_3:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_REMOVE_1:
+ switch (disk_cache::g_rankings_crash) {
+ case disk_cache::REMOVE_ONE_1:
+ case disk_cache::REMOVE_HEAD_1:
+ case disk_cache::REMOVE_TAIL_1:
+ case disk_cache::REMOVE_LOAD_1:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_REMOVE_2:
+ if (disk_cache::REMOVE_ONE_2 == disk_cache::g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_3:
+ if (disk_cache::REMOVE_ONE_3 == disk_cache::g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_4:
+ if (disk_cache::REMOVE_HEAD_2 == disk_cache::g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_5:
+ if (disk_cache::REMOVE_TAIL_2 == disk_cache::g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_6:
+ if (disk_cache::REMOVE_TAIL_3 == disk_cache::g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_7:
+ switch (disk_cache::g_rankings_crash) {
+ case disk_cache::REMOVE_ONE_4:
+ case disk_cache::REMOVE_LOAD_2:
+ case disk_cache::REMOVE_HEAD_3:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_REMOVE_8:
+ switch (disk_cache::g_rankings_crash) {
+ case disk_cache::REMOVE_HEAD_4:
+ case disk_cache::REMOVE_LOAD_3:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+#endif // NDEBUG
+}
+
+// Update the timestamp fields of |node|.
+void UpdateTimes(disk_cache::CacheRankingsBlock* node, bool modified) {
+ base::Time now = base::Time::Now();
+ node->Data()->last_used = now.ToInternalValue();
+ if (modified)
+ node->Data()->last_modified = now.ToInternalValue();
+}
+
+} // namespace
+
+namespace disk_cache {
+
+Rankings::ScopedRankingsBlock::ScopedRankingsBlock() : rankings_(NULL) {}
+
+Rankings::ScopedRankingsBlock::ScopedRankingsBlock(Rankings* rankings)
+ : rankings_(rankings) {}
+
+Rankings::ScopedRankingsBlock::ScopedRankingsBlock(
+ Rankings* rankings, CacheRankingsBlock* node)
+ : scoped_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
+
+Rankings::Iterator::Iterator(Rankings* rankings) {
+ memset(this, 0, sizeof(Iterator));
+ my_rankings = rankings;
+}
+
+Rankings::Iterator::~Iterator() {
+ for (int i = 0; i < 3; i++)
+ ScopedRankingsBlock(my_rankings, nodes[i]);
+}
+
+Rankings::Rankings() : init_(false) {}
+
+Rankings::~Rankings() {}
+
+bool Rankings::Init(BackendImpl* backend, bool count_lists) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ backend_ = backend;
+ control_data_ = backend_->GetLruData();
+ count_lists_ = count_lists;
+
+ ReadHeads();
+ ReadTails();
+
+ if (control_data_->transaction)
+ CompleteTransaction();
+
+ init_ = true;
+ return true;
+}
+
+void Rankings::Reset() {
+ init_ = false;
+ for (int i = 0; i < LAST_ELEMENT; i++) {
+ heads_[i].set_value(0);
+ tails_[i].set_value(0);
+ }
+ control_data_ = NULL;
+}
+
+void Rankings::Insert(CacheRankingsBlock* node, bool modified, List list) {
+ Trace("Insert 0x%x l %d", node->address().value(), list);
+ DCHECK(node->HasData());
+ Addr& my_head = heads_[list];
+ Addr& my_tail = tails_[list];
+ Transaction lock(control_data_, node->address(), INSERT, list);
+ CacheRankingsBlock head(backend_->File(my_head), my_head);
+ if (my_head.is_initialized()) {
+ if (!GetRanking(&head))
+ return;
+
+ if (head.Data()->prev != my_head.value() && // Normal path.
+ head.Data()->prev != node->address().value()) { // FinishInsert().
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return;
+ }
+
+ head.Data()->prev = node->address().value();
+ head.Store();
+ GenerateCrash(ON_INSERT_1);
+ UpdateIterators(&head);
+ }
+
+ node->Data()->next = my_head.value();
+ node->Data()->prev = node->address().value();
+ my_head.set_value(node->address().value());
+
+ if (!my_tail.is_initialized() || my_tail.value() == node->address().value()) {
+ my_tail.set_value(node->address().value());
+ node->Data()->next = my_tail.value();
+ WriteTail(list);
+ GenerateCrash(ON_INSERT_2);
+ }
+
+ UpdateTimes(node, modified);
+ node->Store();
+ GenerateCrash(ON_INSERT_3);
+
+ // The last thing to do is move our head to point to a node already stored.
+ WriteHead(list);
+ IncrementCounter(list);
+ GenerateCrash(ON_INSERT_4);
+ backend_->FlushIndex();
+}
+
+// If a, b and r are elements on the list, and we want to remove r, the possible
+// states for the objects if a crash happens are (where y(x, z) means for object
+// y, prev is x and next is z):
+// A. One element:
+// 1. r(r, r), head(r), tail(r) initial state
+// 2. r(r, r), head(0), tail(r) WriteHead()
+// 3. r(r, r), head(0), tail(0) WriteTail()
+// 4. r(0, 0), head(0), tail(0) next.Store()
+//
+// B. Remove a random element:
+// 1. a(x, r), r(a, b), b(r, y), head(x), tail(y) initial state
+// 2. a(x, r), r(a, b), b(a, y), head(x), tail(y) next.Store()
+// 3. a(x, b), r(a, b), b(a, y), head(x), tail(y) prev.Store()
+// 4. a(x, b), r(0, 0), b(a, y), head(x), tail(y) node.Store()
+//
+// C. Remove head:
+// 1. r(r, b), b(r, y), head(r), tail(y) initial state
+// 2. r(r, b), b(r, y), head(b), tail(y) WriteHead()
+// 3. r(r, b), b(b, y), head(b), tail(y) next.Store()
+// 4. r(0, 0), b(b, y), head(b), tail(y) prev.Store()
+//
+// D. Remove tail:
+// 1. a(x, r), r(a, r), head(x), tail(r) initial state
+// 2. a(x, r), r(a, r), head(x), tail(a) WriteTail()
+// 3. a(x, a), r(a, r), head(x), tail(a) prev.Store()
+// 4. a(x, a), r(0, 0), head(x), tail(a) next.Store()
+void Rankings::Remove(CacheRankingsBlock* node, List list, bool strict) {
+ Trace("Remove 0x%x (0x%x 0x%x) l %d", node->address().value(),
+ node->Data()->next, node->Data()->prev, list);
+ DCHECK(node->HasData());
+ if (strict)
+ InvalidateIterators(node);
+
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || next_addr.is_separate_file() ||
+ !prev_addr.is_initialized() || prev_addr.is_separate_file()) {
+ if (next_addr.is_initialized() || prev_addr.is_initialized()) {
+ LOG(ERROR) << "Invalid rankings info.";
+ STRESS_NOTREACHED();
+ }
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!GetRanking(&next) || !GetRanking(&prev)) {
+ STRESS_NOTREACHED();
+ return;
+ }
+
+ if (!CheckLinks(node, &prev, &next, &list))
+ return;
+
+ Transaction lock(control_data_, node->address(), REMOVE, list);
+ prev.Data()->next = next.address().value();
+ next.Data()->prev = prev.address().value();
+ GenerateCrash(ON_REMOVE_1);
+
+ CacheAddr node_value = node->address().value();
+ Addr& my_head = heads_[list];
+ Addr& my_tail = tails_[list];
+ if (node_value == my_head.value() || node_value == my_tail.value()) {
+ if (my_head.value() == my_tail.value()) {
+ my_head.set_value(0);
+ my_tail.set_value(0);
+
+ WriteHead(list);
+ GenerateCrash(ON_REMOVE_2);
+ WriteTail(list);
+ GenerateCrash(ON_REMOVE_3);
+ } else if (node_value == my_head.value()) {
+ my_head.set_value(next.address().value());
+ next.Data()->prev = next.address().value();
+
+ WriteHead(list);
+ GenerateCrash(ON_REMOVE_4);
+ } else if (node_value == my_tail.value()) {
+ my_tail.set_value(prev.address().value());
+ prev.Data()->next = prev.address().value();
+
+ WriteTail(list);
+ GenerateCrash(ON_REMOVE_5);
+
+ // Store the new tail to make sure we can undo the operation if we crash.
+ prev.Store();
+ GenerateCrash(ON_REMOVE_6);
+ }
+ }
+
+ // Nodes out of the list can be identified by invalid pointers.
+ node->Data()->next = 0;
+ node->Data()->prev = 0;
+
+ // The last thing to get to disk is the node itself, so before that there is
+ // enough info to recover.
+ next.Store();
+ GenerateCrash(ON_REMOVE_7);
+ prev.Store();
+ GenerateCrash(ON_REMOVE_8);
+ node->Store();
+ DecrementCounter(list);
+ UpdateIterators(&next);
+ UpdateIterators(&prev);
+ backend_->FlushIndex();
+}
+
+// A crash in between Remove and Insert will lead to a dirty entry not on the
+// list. We want to avoid that case as much as we can (as while waiting for IO),
+// but the net effect is just an assert on debug when attempting to remove the
+// entry. Otherwise we'll need reentrant transactions, which is an overkill.
+void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) {
+ Addr& my_head = heads_[list];
+ if (my_head.value() == node->address().value()) {
+ UpdateTimes(node, modified);
+ node->set_modified();
+ return;
+ }
+
+ TimeTicks start = TimeTicks::Now();
+ Remove(node, list, true);
+ Insert(node, modified, list);
+ CACHE_UMA(AGE_MS, "UpdateRank", 0, start);
+}
+
+CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node, List list) {
+ ScopedRankingsBlock next(this);
+ if (!node) {
+ Addr& my_head = heads_[list];
+ if (!my_head.is_initialized())
+ return NULL;
+ next.reset(new CacheRankingsBlock(backend_->File(my_head), my_head));
+ } else {
+ if (!node->HasData())
+ node->Load();
+ Addr& my_tail = tails_[list];
+ if (!my_tail.is_initialized())
+ return NULL;
+ if (my_tail.value() == node->address().value())
+ return NULL;
+ Addr address(node->Data()->next);
+ if (address.value() == node->address().value())
+ return NULL; // Another tail? fail it.
+ next.reset(new CacheRankingsBlock(backend_->File(address), address));
+ }
+
+ TrackRankingsBlock(next.get(), true);
+
+ if (!GetRanking(next.get()))
+ return NULL;
+
+ ConvertToLongLived(next.get());
+ if (node && !CheckSingleLink(node, next.get()))
+ return NULL;
+
+ return next.release();
+}
+
+CacheRankingsBlock* Rankings::GetPrev(CacheRankingsBlock* node, List list) {
+ ScopedRankingsBlock prev(this);
+ if (!node) {
+ Addr& my_tail = tails_[list];
+ if (!my_tail.is_initialized())
+ return NULL;
+ prev.reset(new CacheRankingsBlock(backend_->File(my_tail), my_tail));
+ } else {
+ if (!node->HasData())
+ node->Load();
+ Addr& my_head = heads_[list];
+ if (!my_head.is_initialized())
+ return NULL;
+ if (my_head.value() == node->address().value())
+ return NULL;
+ Addr address(node->Data()->prev);
+ if (address.value() == node->address().value())
+ return NULL; // Another head? fail it.
+ prev.reset(new CacheRankingsBlock(backend_->File(address), address));
+ }
+
+ TrackRankingsBlock(prev.get(), true);
+
+ if (!GetRanking(prev.get()))
+ return NULL;
+
+ ConvertToLongLived(prev.get());
+ if (node && !CheckSingleLink(prev.get(), node))
+ return NULL;
+
+ return prev.release();
+}
+
+void Rankings::FreeRankingsBlock(CacheRankingsBlock* node) {
+ TrackRankingsBlock(node, false);
+}
+
+void Rankings::TrackRankingsBlock(CacheRankingsBlock* node,
+ bool start_tracking) {
+ if (!node)
+ return;
+
+ IteratorPair current(node->address().value(), node);
+
+ if (start_tracking)
+ iterators_.push_back(current);
+ else
+ iterators_.remove(current);
+}
+
+int Rankings::SelfCheck() {
+ int total = 0;
+ int error = 0;
+ for (int i = 0; i < LAST_ELEMENT; i++) {
+ int partial = CheckList(static_cast<List>(i));
+ if (partial < 0 && !error)
+ error = partial;
+ else if (partial > 0)
+ total += partial;
+ }
+
+ return error ? error : total;
+}
+
+bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) const {
+ if (!node->VerifyHash())
+ return false;
+
+ const RankingsNode* data = node->Data();
+
+ if ((!data->next && data->prev) || (data->next && !data->prev))
+ return false;
+
+ // Both pointers on zero is a node out of the list.
+ if (!data->next && !data->prev && from_list)
+ return false;
+
+ List list = NO_USE; // Initialize it to something.
+ if ((node->address().value() == data->prev) && !IsHead(data->prev, &list))
+ return false;
+
+ if ((node->address().value() == data->next) && !IsTail(data->next, &list))
+ return false;
+
+ if (!data->next && !data->prev)
+ return true;
+
+ Addr next_addr(data->next);
+ Addr prev_addr(data->prev);
+ if (!next_addr.SanityCheckV2() || next_addr.file_type() != RANKINGS ||
+ !prev_addr.SanityCheckV2() || prev_addr.file_type() != RANKINGS)
+ return false;
+
+ return true;
+}
+
+bool Rankings::DataSanityCheck(CacheRankingsBlock* node, bool from_list) const {
+ const RankingsNode* data = node->Data();
+ if (!data->contents)
+ return false;
+
+ // It may have never been inserted.
+ if (from_list && (!data->last_used || !data->last_modified))
+ return false;
+
+ return true;
+}
+
+void Rankings::SetContents(CacheRankingsBlock* node, CacheAddr address) {
+ node->Data()->contents = address;
+ node->Store();
+}
+
+void Rankings::ReadHeads() {
+ for (int i = 0; i < LAST_ELEMENT; i++)
+ heads_[i] = Addr(control_data_->heads[i]);
+}
+
+void Rankings::ReadTails() {
+ for (int i = 0; i < LAST_ELEMENT; i++)
+ tails_[i] = Addr(control_data_->tails[i]);
+}
+
+void Rankings::WriteHead(List list) {
+ control_data_->heads[list] = heads_[list].value();
+}
+
+void Rankings::WriteTail(List list) {
+ control_data_->tails[list] = tails_[list].value();
+}
+
+bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
+ if (!rankings->address().is_initialized())
+ return false;
+
+ TimeTicks start = TimeTicks::Now();
+ if (!rankings->Load())
+ return false;
+
+ if (!SanityCheck(rankings, true)) {
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ backend_->OnEvent(Stats::OPEN_RANKINGS);
+
+ // Note that if the cache is in read_only mode, open entries are not marked
+ // as dirty, except when an entry is doomed. We have to look for open entries.
+ if (!backend_->read_only() && !rankings->Data()->dirty)
+ return true;
+
+ EntryImpl* entry = backend_->GetOpenEntry(rankings);
+ if (!entry) {
+ if (backend_->read_only())
+ return true;
+
+ // We cannot trust this entry, but we cannot initiate a cleanup from this
+ // point (we may be in the middle of a cleanup already). The entry will be
+ // deleted when detected from a regular open/create path.
+ rankings->Data()->dirty = backend_->GetCurrentEntryId() - 1;
+ if (!rankings->Data()->dirty)
+ rankings->Data()->dirty--;
+ return true;
+ }
+
+ // Note that we should not leave this module without deleting rankings first.
+ rankings->SetData(entry->rankings()->Data());
+
+ CACHE_UMA(AGE_MS, "GetRankings", 0, start);
+ return true;
+}
+
+void Rankings::ConvertToLongLived(CacheRankingsBlock* rankings) {
+ if (rankings->own_data())
+ return;
+
+ // We cannot return a shared node because we are not keeping a reference
+ // to the entry that owns the buffer. Make this node a copy of the one that
+ // we have, and let the iterator logic update it when the entry changes.
+ CacheRankingsBlock temp(NULL, Addr(0));
+ *temp.Data() = *rankings->Data();
+ rankings->StopSharingData();
+ *rankings->Data() = *temp.Data();
+}
+
+void Rankings::CompleteTransaction() {
+ Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
+ if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid rankings info.";
+ return;
+ }
+
+ Trace("CompleteTransaction 0x%x", node_addr.value());
+
+ CacheRankingsBlock node(backend_->File(node_addr), node_addr);
+ if (!node.Load())
+ return;
+
+ node.Store();
+
+ Addr& my_head = heads_[control_data_->operation_list];
+ Addr& my_tail = tails_[control_data_->operation_list];
+
+ // We want to leave the node inside the list. The entry must me marked as
+ // dirty, and will be removed later. Otherwise, we'll get assertions when
+ // attempting to remove the dirty entry.
+ if (INSERT == control_data_->operation) {
+ Trace("FinishInsert h:0x%x t:0x%x", my_head.value(), my_tail.value());
+ FinishInsert(&node);
+ } else if (REMOVE == control_data_->operation) {
+ Trace("RevertRemove h:0x%x t:0x%x", my_head.value(), my_tail.value());
+ RevertRemove(&node);
+ } else {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid operation to recover.";
+ }
+}
+
+void Rankings::FinishInsert(CacheRankingsBlock* node) {
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
+ Addr& my_head = heads_[control_data_->operation_list];
+ Addr& my_tail = tails_[control_data_->operation_list];
+ if (my_head.value() != node->address().value()) {
+ if (my_tail.value() == node->address().value()) {
+ // This part will be skipped by the logic of Insert.
+ node->Data()->next = my_tail.value();
+ }
+
+ Insert(node, true, static_cast<List>(control_data_->operation_list));
+ }
+
+ // Tell the backend about this entry.
+ backend_->RecoveredEntry(node);
+}
+
+void Rankings::RevertRemove(CacheRankingsBlock* node) {
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
+ // The operation actually finished. Nothing to do.
+ control_data_->transaction = 0;
+ return;
+ }
+ if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(WARNING) << "Invalid rankings info.";
+ control_data_->transaction = 0;
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!next.Load() || !prev.Load())
+ return;
+
+ CacheAddr node_value = node->address().value();
+ DCHECK(prev.Data()->next == node_value ||
+ prev.Data()->next == prev_addr.value() ||
+ prev.Data()->next == next.address().value());
+ DCHECK(next.Data()->prev == node_value ||
+ next.Data()->prev == next_addr.value() ||
+ next.Data()->prev == prev.address().value());
+
+ if (node_value != prev_addr.value())
+ prev.Data()->next = node_value;
+ if (node_value != next_addr.value())
+ next.Data()->prev = node_value;
+
+ List my_list = static_cast<List>(control_data_->operation_list);
+ Addr& my_head = heads_[my_list];
+ Addr& my_tail = tails_[my_list];
+ if (!my_head.is_initialized() || !my_tail.is_initialized()) {
+ my_head.set_value(node_value);
+ my_tail.set_value(node_value);
+ WriteHead(my_list);
+ WriteTail(my_list);
+ } else if (my_head.value() == next.address().value()) {
+ my_head.set_value(node_value);
+ prev.Data()->next = next.address().value();
+ WriteHead(my_list);
+ } else if (my_tail.value() == prev.address().value()) {
+ my_tail.set_value(node_value);
+ next.Data()->prev = prev.address().value();
+ WriteTail(my_list);
+ }
+
+ next.Store();
+ prev.Store();
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
+ backend_->FlushIndex();
+}
+
+bool Rankings::CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
+ CacheRankingsBlock* next, List* list) {
+ CacheAddr node_addr = node->address().value();
+ if (prev->Data()->next == node_addr &&
+ next->Data()->prev == node_addr) {
+ // A regular linked node.
+ return true;
+ }
+
+ Trace("CheckLinks 0x%x (0x%x 0x%x)", node_addr,
+ prev->Data()->next, next->Data()->prev);
+
+ if (node_addr != prev->address().value() &&
+ node_addr != next->address().value() &&
+ prev->Data()->next == next->address().value() &&
+ next->Data()->prev == prev->address().value()) {
+ // The list is actually ok, node is wrong.
+ Trace("node 0x%x out of list %d", node_addr, list);
+ node->Data()->next = 0;
+ node->Data()->prev = 0;
+ node->Store();
+ return false;
+ }
+
+ if (prev->Data()->next == node_addr ||
+ next->Data()->prev == node_addr) {
+ // Only one link is weird, lets double check.
+ if (prev->Data()->next != node_addr && IsHead(node_addr, list))
+ return true;
+
+ if (next->Data()->prev != node_addr && IsTail(node_addr, list))
+ return true;
+ }
+
+ LOG(ERROR) << "Inconsistent LRU.";
+ STRESS_NOTREACHED();
+
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+}
+
+bool Rankings::CheckSingleLink(CacheRankingsBlock* prev,
+ CacheRankingsBlock* next) {
+ if (prev->Data()->next != next->address().value() ||
+ next->Data()->prev != prev->address().value()) {
+ LOG(ERROR) << "Inconsistent LRU.";
+
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ return true;
+}
+
+int Rankings::CheckList(List list) {
+ Addr last1, last2;
+ int head_items;
+ int rv = CheckListSection(list, last1, last2, true, // Head to tail.
+ &last1, &last2, &head_items);
+ if (rv == ERR_NO_ERROR)
+ return head_items;
+
+ return rv;
+}
+
+// Note that the returned error codes assume a forward walk (from head to tail)
+// so they have to be adjusted accordingly by the caller. We use two stop values
+// to be able to detect a corrupt node at the end that is not linked going back.
+int Rankings::CheckListSection(List list, Addr end1, Addr end2, bool forward,
+ Addr* last, Addr* second_last, int* num_items) {
+ Addr current = forward ? heads_[list] : tails_[list];
+ *last = *second_last = current;
+ *num_items = 0;
+ if (!current.is_initialized())
+ return ERR_NO_ERROR;
+
+ if (!current.SanityCheckForRankings())
+ return ERR_INVALID_HEAD;
+
+ scoped_ptr<CacheRankingsBlock> node;
+ Addr prev_addr(current);
+ do {
+ node.reset(new CacheRankingsBlock(backend_->File(current), current));
+ node->Load();
+ if (!SanityCheck(node.get(), true))
+ return ERR_INVALID_ENTRY;
+
+ CacheAddr next = forward ? node->Data()->next : node->Data()->prev;
+ CacheAddr prev = forward ? node->Data()->prev : node->Data()->next;
+
+ if (prev != prev_addr.value())
+ return ERR_INVALID_PREV;
+
+ Addr next_addr(next);
+ if (!next_addr.SanityCheckForRankings())
+ return ERR_INVALID_NEXT;
+
+ prev_addr = current;
+ current = next_addr;
+ *second_last = *last;
+ *last = current;
+ (*num_items)++;
+
+ if (next_addr == prev_addr) {
+ Addr last = forward ? tails_[list] : heads_[list];
+ if (next_addr == last)
+ return ERR_NO_ERROR;
+ return ERR_INVALID_TAIL;
+ }
+ } while (current != end1 && current != end2);
+ return ERR_NO_ERROR;
+}
+
+bool Rankings::IsHead(CacheAddr addr, List* list) const {
+ for (int i = 0; i < LAST_ELEMENT; i++) {
+ if (addr == heads_[i].value()) {
+ if (*list != i)
+ Trace("Changing list %d to %d", *list, i);
+ *list = static_cast<List>(i);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Rankings::IsTail(CacheAddr addr, List* list) const {
+ for (int i = 0; i < LAST_ELEMENT; i++) {
+ if (addr == tails_[i].value()) {
+ if (*list != i)
+ Trace("Changing list %d to %d", *list, i);
+ *list = static_cast<List>(i);
+ return true;
+ }
+ }
+ return false;
+}
+
+// We expect to have just a few iterators at any given time, maybe two or three,
+// But we could have more than one pointing at the same mode. We walk the list
+// of cache iterators and update all that are pointing to the given node.
+void Rankings::UpdateIterators(CacheRankingsBlock* node) {
+ CacheAddr address = node->address().value();
+ for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
+ ++it) {
+ if (it->first == address && it->second->HasData()) {
+ CacheRankingsBlock* other = it->second;
+ *other->Data() = *node->Data();
+ }
+ }
+}
+
+void Rankings::InvalidateIterators(CacheRankingsBlock* node) {
+ CacheAddr address = node->address().value();
+ for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
+ ++it) {
+ if (it->first == address) {
+ DLOG(INFO) << "Invalidating iterator at 0x" << std::hex << address;
+ it->second->Discard();
+ }
+ }
+}
+
+void Rankings::IncrementCounter(List list) {
+ if (!count_lists_)
+ return;
+
+ DCHECK(control_data_->sizes[list] < kint32max);
+ if (control_data_->sizes[list] < kint32max)
+ control_data_->sizes[list]++;
+}
+
+void Rankings::DecrementCounter(List list) {
+ if (!count_lists_)
+ return;
+
+ DCHECK(control_data_->sizes[list] > 0);
+ if (control_data_->sizes[list] > 0)
+ control_data_->sizes[list]--;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/rankings.h b/chromium/net/disk_cache/rankings.h
new file mode 100644
index 00000000000..cd94eaf1c59
--- /dev/null
+++ b/chromium/net/disk_cache/rankings.h
@@ -0,0 +1,214 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_RANKINGS_H_
+#define NET_DISK_CACHE_RANKINGS_H_
+
+#include <list>
+
+#include "base/memory/scoped_ptr.h"
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+#include "net/disk_cache/storage_block.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+struct LruData;
+struct RankingsNode;
+typedef StorageBlock<RankingsNode> CacheRankingsBlock;
+
+// Type of crashes generated for the unit tests.
+enum RankCrashes {
+ NO_CRASH = 0,
+ INSERT_EMPTY_1,
+ INSERT_EMPTY_2,
+ INSERT_EMPTY_3,
+ INSERT_ONE_1,
+ INSERT_ONE_2,
+ INSERT_ONE_3,
+ INSERT_LOAD_1,
+ INSERT_LOAD_2,
+ REMOVE_ONE_1,
+ REMOVE_ONE_2,
+ REMOVE_ONE_3,
+ REMOVE_ONE_4,
+ REMOVE_HEAD_1,
+ REMOVE_HEAD_2,
+ REMOVE_HEAD_3,
+ REMOVE_HEAD_4,
+ REMOVE_TAIL_1,
+ REMOVE_TAIL_2,
+ REMOVE_TAIL_3,
+ REMOVE_LOAD_1,
+ REMOVE_LOAD_2,
+ REMOVE_LOAD_3,
+ MAX_CRASH
+};
+
+// This class handles the ranking information for the cache.
+class Rankings {
+ public:
+ // Possible lists of entries.
+ enum List {
+ NO_USE = 0, // List of entries that have not been reused.
+ LOW_USE, // List of entries with low reuse.
+ HIGH_USE, // List of entries with high reuse.
+ RESERVED, // Reserved for future use.
+ DELETED, // List of recently deleted or doomed entries.
+ LAST_ELEMENT
+ };
+
+ // This class provides a specialized version of scoped_ptr, that calls
+ // Rankings whenever a CacheRankingsBlock is deleted, to keep track of cache
+ // iterators that may go stale.
+ class ScopedRankingsBlock : public scoped_ptr<CacheRankingsBlock> {
+ public:
+ ScopedRankingsBlock();
+ explicit ScopedRankingsBlock(Rankings* rankings);
+ ScopedRankingsBlock(Rankings* rankings, CacheRankingsBlock* node);
+
+ ~ScopedRankingsBlock() {
+ rankings_->FreeRankingsBlock(get());
+ }
+
+ void set_rankings(Rankings* rankings) {
+ rankings_ = rankings;
+ }
+
+ // scoped_ptr::reset will delete the object.
+ void reset(CacheRankingsBlock* p = NULL) {
+ if (p != get())
+ rankings_->FreeRankingsBlock(get());
+ scoped_ptr<CacheRankingsBlock>::reset(p);
+ }
+
+ private:
+ Rankings* rankings_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedRankingsBlock);
+ };
+
+ // If we have multiple lists, we have to iterate through all at the same time.
+ // This structure keeps track of where we are on the iteration.
+ struct Iterator {
+ explicit Iterator(Rankings* rankings);
+ ~Iterator();
+
+ List list; // Which entry was returned to the user.
+ CacheRankingsBlock* nodes[3]; // Nodes on the first three lists.
+ Rankings* my_rankings;
+ };
+
+ Rankings();
+ ~Rankings();
+
+ bool Init(BackendImpl* backend, bool count_lists);
+
+ // Restores original state, leaving the object ready for initialization.
+ void Reset();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(CacheRankingsBlock* node, bool modified, List list);
+
+ // Removes a given entry from the LRU list. If |strict| is true, this method
+ // assumes that |node| is not pointed to by an active iterator. On the other
+ // hand, removing that restriction allows the current "head" of an iterator
+ // to be removed from the list (basically without control of the code that is
+ // performing the iteration), so it should be used with extra care.
+ void Remove(CacheRankingsBlock* node, List list, bool strict);
+
+ // Moves a given entry to the head.
+ void UpdateRank(CacheRankingsBlock* node, bool modified, List list);
+
+ // Iterates through the list.
+ CacheRankingsBlock* GetNext(CacheRankingsBlock* node, List list);
+ CacheRankingsBlock* GetPrev(CacheRankingsBlock* node, List list);
+ void FreeRankingsBlock(CacheRankingsBlock* node);
+
+ // Controls tracking of nodes used for enumerations.
+ void TrackRankingsBlock(CacheRankingsBlock* node, bool start_tracking);
+
+ // Peforms a simple self-check of the lists, and returns the number of items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Returns false if the entry is clearly invalid. from_list is true if the
+ // node comes from the LRU list.
+ bool SanityCheck(CacheRankingsBlock* node, bool from_list) const;
+ bool DataSanityCheck(CacheRankingsBlock* node, bool from_list) const;
+
+ // Sets the |contents| field of |node| to |address|.
+ void SetContents(CacheRankingsBlock* node, CacheAddr address);
+
+ private:
+ typedef std::pair<CacheAddr, CacheRankingsBlock*> IteratorPair;
+ typedef std::list<IteratorPair> IteratorList;
+
+ void ReadHeads();
+ void ReadTails();
+ void WriteHead(List list);
+ void WriteTail(List list);
+
+ // Gets the rankings information for a given rankings node. We may end up
+ // sharing the actual memory with a loaded entry, but we are not taking a
+ // reference to that entry, so |rankings| must be short lived.
+ bool GetRanking(CacheRankingsBlock* rankings);
+
+ // Makes |rankings| suitable to live a long life.
+ void ConvertToLongLived(CacheRankingsBlock* rankings);
+
+ // Finishes a list modification after a crash.
+ void CompleteTransaction();
+ void FinishInsert(CacheRankingsBlock* rankings);
+ void RevertRemove(CacheRankingsBlock* rankings);
+
+ // Returns false if node is not properly linked. This method may change the
+ // provided |list| to reflect the list where this node is actually stored.
+ bool CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
+ CacheRankingsBlock* next, List* list);
+
+ // Checks the links between two consecutive nodes.
+ bool CheckSingleLink(CacheRankingsBlock* prev, CacheRankingsBlock* next);
+
+ // Peforms a simple check of the list, and returns the number of items or an
+ // error code (negative value).
+ int CheckList(List list);
+
+ // Walks a list in the desired direction until the nodes |end1| or |end2| are
+ // reached. Returns an error code (0 on success), the number of items verified
+ // and the addresses of the last nodes visited.
+ int CheckListSection(List list, Addr end1, Addr end2, bool forward,
+ Addr* last, Addr* second_last, int* num_items);
+
+ // Returns true if addr is the head or tail of any list. When there is a
+ // match |list| will contain the list number for |addr|.
+ bool IsHead(CacheAddr addr, List* list) const;
+ bool IsTail(CacheAddr addr, List* list) const;
+
+ // Updates the iterators whenever node is being changed.
+ void UpdateIterators(CacheRankingsBlock* node);
+
+ // Invalidates the iterators pointing to this node.
+ void InvalidateIterators(CacheRankingsBlock* node);
+
+ // Keeps track of the number of entries on a list.
+ void IncrementCounter(List list);
+ void DecrementCounter(List list);
+
+ bool init_;
+ bool count_lists_;
+ Addr heads_[LAST_ELEMENT];
+ Addr tails_[LAST_ELEMENT];
+ BackendImpl* backend_;
+ LruData* control_data_; // Data related to the LRU lists.
+ IteratorList iterators_;
+
+ DISALLOW_COPY_AND_ASSIGN(Rankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_RANKINGS_H_
diff --git a/chromium/net/disk_cache/simple/OWNERS b/chromium/net/disk_cache/simple/OWNERS
new file mode 100644
index 00000000000..6ed8b171fe5
--- /dev/null
+++ b/chromium/net/disk_cache/simple/OWNERS
@@ -0,0 +1,2 @@
+gavinp@chromium.org
+pasko@chromium.org
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.cc b/chromium/net/disk_cache/simple/simple_backend_impl.cc
new file mode 100644
index 00000000000..2877c01f701
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.cc
@@ -0,0 +1,570 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_backend_impl.h"
+
+#include <algorithm>
+#include <cstdlib>
+
+#if defined(OS_POSIX)
+#include <sys/resource.h>
+#endif
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/file_util.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/single_thread_task_runner.h"
+#include "base/sys_info.h"
+#include "base/task_runner_util.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/time/time.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_entry_impl.h"
+#include "net/disk_cache/simple/simple_index.h"
+#include "net/disk_cache/simple/simple_index_file.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+#include "net/disk_cache/simple/simple_util.h"
+
+using base::Closure;
+using base::FilePath;
+using base::MessageLoopProxy;
+using base::SequencedWorkerPool;
+using base::SingleThreadTaskRunner;
+using base::Time;
+using base::DirectoryExists;
+using file_util::CreateDirectory;
+
+namespace {
+
+// Maximum number of concurrent worker pool threads, which also is the limit
+// on concurrent IO (as we use one thread per IO request).
+const int kDefaultMaxWorkerThreads = 50;
+
+const char kThreadNamePrefix[] = "SimpleCache";
+
+// Cache size when all other size heuristics failed.
+const uint64 kDefaultCacheSize = 80 * 1024 * 1024;
+
+// Maximum fraction of the cache that one entry can consume.
+const int kMaxFileRatio = 8;
+
+// A global sequenced worker pool to use for launching all tasks.
+SequencedWorkerPool* g_sequenced_worker_pool = NULL;
+
+void MaybeCreateSequencedWorkerPool() {
+ if (!g_sequenced_worker_pool) {
+ int max_worker_threads = kDefaultMaxWorkerThreads;
+
+ const std::string thread_count_field_trial =
+ base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
+ if (!thread_count_field_trial.empty()) {
+ max_worker_threads =
+ std::max(1, std::atoi(thread_count_field_trial.c_str()));
+ }
+
+ g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads,
+ kThreadNamePrefix);
+ g_sequenced_worker_pool->AddRef(); // Leak it.
+ }
+}
+
+bool g_fd_limit_histogram_has_been_populated = false;
+
+void MaybeHistogramFdLimit() {
+ if (g_fd_limit_histogram_has_been_populated)
+ return;
+
+ // Used in histograms; add new entries at end.
+ enum FdLimitStatus {
+ FD_LIMIT_STATUS_UNSUPPORTED = 0,
+ FD_LIMIT_STATUS_FAILED = 1,
+ FD_LIMIT_STATUS_SUCCEEDED = 2,
+ FD_LIMIT_STATUS_MAX = 3
+ };
+ FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
+ int soft_fd_limit = 0;
+ int hard_fd_limit = 0;
+
+#if defined(OS_POSIX)
+ struct rlimit nofile;
+ if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
+ soft_fd_limit = nofile.rlim_cur;
+ hard_fd_limit = nofile.rlim_max;
+ fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
+ } else {
+ fd_limit_status = FD_LIMIT_STATUS_FAILED;
+ }
+#endif
+
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.FileDescriptorLimitStatus",
+ fd_limit_status, FD_LIMIT_STATUS_MAX);
+ if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
+ UMA_HISTOGRAM_SPARSE_SLOWLY("SimpleCache.FileDescriptorLimitSoft",
+ soft_fd_limit);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("SimpleCache.FileDescriptorLimitHard",
+ hard_fd_limit);
+ }
+
+ g_fd_limit_histogram_has_been_populated = true;
+}
+
+// Must run on IO Thread.
+void DeleteBackendImpl(disk_cache::Backend** backend,
+ const net::CompletionCallback& callback,
+ int result) {
+ DCHECK(*backend);
+ delete *backend;
+ *backend = NULL;
+ callback.Run(result);
+}
+
+// Detects if the files in the cache directory match the current disk cache
+// backend type and version. If the directory contains no cache, occupies it
+// with the fresh structure.
+//
+// There is a convention among disk cache backends: looking at the magic in the
+// file "index" it should be sufficient to determine if the cache belongs to the
+// currently running backend. The Simple Backend stores its index in the file
+// "the-real-index" (see simple_index.cc) and the file "index" only signifies
+// presence of the implementation's magic and version. There are two reasons for
+// that:
+// 1. Absence of the index is itself not a fatal error in the Simple Backend
+// 2. The Simple Backend has pickled file format for the index making it hacky
+// to have the magic in the right place.
+bool FileStructureConsistent(const base::FilePath& path) {
+ if (!base::PathExists(path) && !file_util::CreateDirectory(path)) {
+ LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
+ return false;
+ }
+ const base::FilePath fake_index = path.AppendASCII("index");
+ base::PlatformFileError error;
+ base::PlatformFile fake_index_file = base::CreatePlatformFile(
+ fake_index,
+ base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ,
+ NULL,
+ &error);
+ if (error == base::PLATFORM_FILE_ERROR_NOT_FOUND) {
+ base::PlatformFile file = base::CreatePlatformFile(
+ fake_index,
+ base::PLATFORM_FILE_CREATE | base::PLATFORM_FILE_WRITE,
+ NULL, &error);
+ disk_cache::SimpleFileHeader file_contents;
+ file_contents.initial_magic_number = disk_cache::kSimpleInitialMagicNumber;
+ file_contents.version = disk_cache::kSimpleVersion;
+ int bytes_written = base::WritePlatformFile(
+ file, 0, reinterpret_cast<char*>(&file_contents),
+ sizeof(file_contents));
+ if (!base::ClosePlatformFile(file) ||
+ bytes_written != sizeof(file_contents)) {
+ LOG(ERROR) << "Failed to write cache structure file: "
+ << path.LossyDisplayName();
+ return false;
+ }
+ return true;
+ } else if (error != base::PLATFORM_FILE_OK) {
+ LOG(ERROR) << "Could not open cache structure file: "
+ << path.LossyDisplayName();
+ return false;
+ } else {
+ disk_cache::SimpleFileHeader file_header;
+ int bytes_read = base::ReadPlatformFile(
+ fake_index_file, 0, reinterpret_cast<char*>(&file_header),
+ sizeof(file_header));
+ if (!base::ClosePlatformFile(fake_index_file) ||
+ bytes_read != sizeof(file_header) ||
+ file_header.initial_magic_number !=
+ disk_cache::kSimpleInitialMagicNumber ||
+ file_header.version != disk_cache::kSimpleVersion) {
+ LOG(ERROR) << "File structure does not match the disk cache backend.";
+ return false;
+ }
+ return true;
+ }
+}
+
+void CallCompletionCallback(const net::CompletionCallback& callback,
+ int error_code) {
+ DCHECK(!callback.is_null());
+ callback.Run(error_code);
+}
+
+void RecordIndexLoad(base::TimeTicks constructed_since, int result) {
+ const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
+ constructed_since;
+ if (result == net::OK)
+ UMA_HISTOGRAM_TIMES("SimpleCache.CreationToIndex", creation_to_index);
+ else
+ UMA_HISTOGRAM_TIMES("SimpleCache.CreationToIndexFail", creation_to_index);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+SimpleBackendImpl::SimpleBackendImpl(const FilePath& path,
+ int max_bytes,
+ net::CacheType type,
+ base::SingleThreadTaskRunner* cache_thread,
+ net::NetLog* net_log)
+ : path_(path),
+ cache_thread_(cache_thread),
+ orig_max_size_(max_bytes),
+ entry_operations_mode_(
+ type == net::DISK_CACHE ?
+ SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
+ SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
+ net_log_(net_log) {
+ MaybeHistogramFdLimit();
+}
+
+SimpleBackendImpl::~SimpleBackendImpl() {
+ index_->WriteToDisk();
+}
+
+int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
+ MaybeCreateSequencedWorkerPool();
+
+ worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior(
+ SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
+
+ index_.reset(
+ new SimpleIndex(MessageLoopProxy::current().get(),
+ path_,
+ make_scoped_ptr(new SimpleIndexFile(
+ cache_thread_.get(), worker_pool_.get(), path_))));
+ index_->ExecuteWhenReady(base::Bind(&RecordIndexLoad,
+ base::TimeTicks::Now()));
+
+ PostTaskAndReplyWithResult(
+ cache_thread_,
+ FROM_HERE,
+ base::Bind(&SimpleBackendImpl::InitCacheStructureOnDisk, path_,
+ orig_max_size_),
+ base::Bind(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
+ completion_callback));
+ return net::ERR_IO_PENDING;
+}
+
+bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
+ orig_max_size_ = max_bytes;
+ return index_->SetMaxSize(max_bytes);
+}
+
+int SimpleBackendImpl::GetMaxFileSize() const {
+ return index_->max_size() / kMaxFileRatio;
+}
+
+void SimpleBackendImpl::OnDeactivated(const SimpleEntryImpl* entry) {
+ active_entries_.erase(entry->entry_hash());
+}
+
+net::CacheType SimpleBackendImpl::GetCacheType() const {
+ return net::DISK_CACHE;
+}
+
+int32 SimpleBackendImpl::GetEntryCount() const {
+ // TODO(pasko): Use directory file count when index is not ready.
+ return index_->GetEntryCount();
+}
+
+int SimpleBackendImpl::OpenEntry(const std::string& key,
+ Entry** entry,
+ const CompletionCallback& callback) {
+ scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveEntry(key);
+ CompletionCallback backend_callback =
+ base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
+ AsWeakPtr(),
+ key,
+ entry,
+ simple_entry,
+ callback);
+ return simple_entry->OpenEntry(entry, backend_callback);
+}
+
+int SimpleBackendImpl::CreateEntry(const std::string& key,
+ Entry** entry,
+ const CompletionCallback& callback) {
+ DCHECK(key.size() > 0);
+ scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveEntry(key);
+ return simple_entry->CreateEntry(entry, callback);
+}
+
+int SimpleBackendImpl::DoomEntry(const std::string& key,
+ const net::CompletionCallback& callback) {
+ scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveEntry(key);
+ return simple_entry->DoomEntry(callback);
+}
+
+int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
+ return DoomEntriesBetween(Time(), Time(), callback);
+}
+
+void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
+ Time end_time,
+ const CompletionCallback& callback,
+ int result) {
+ if (result != net::OK) {
+ callback.Run(result);
+ return;
+ }
+ scoped_ptr<std::vector<uint64> > removed_key_hashes(
+ index_->RemoveEntriesBetween(initial_time, end_time).release());
+
+ // If any of the entries we are dooming are currently open, we need to remove
+ // them from |active_entries_|, so that attempts to create new entries will
+ // succeed and attempts to open them will fail.
+ for (int i = removed_key_hashes->size() - 1; i >= 0; --i) {
+ const uint64 entry_hash = (*removed_key_hashes)[i];
+ EntryMap::iterator it = active_entries_.find(entry_hash);
+ if (it == active_entries_.end())
+ continue;
+ SimpleEntryImpl* entry = it->second.get();
+ entry->Doom();
+
+ (*removed_key_hashes)[i] = removed_key_hashes->back();
+ removed_key_hashes->resize(removed_key_hashes->size() - 1);
+ }
+
+ PostTaskAndReplyWithResult(
+ worker_pool_, FROM_HERE,
+ base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
+ base::Passed(&removed_key_hashes), path_),
+ base::Bind(&CallCompletionCallback, callback));
+}
+
+int SimpleBackendImpl::DoomEntriesBetween(
+ const Time initial_time,
+ const Time end_time,
+ const CompletionCallback& callback) {
+ return index_->ExecuteWhenReady(
+ base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
+ initial_time, end_time, callback));
+}
+
+int SimpleBackendImpl::DoomEntriesSince(
+ const Time initial_time,
+ const CompletionCallback& callback) {
+ return DoomEntriesBetween(initial_time, Time(), callback);
+}
+
+int SimpleBackendImpl::OpenNextEntry(void** iter,
+ Entry** next_entry,
+ const CompletionCallback& callback) {
+ CompletionCallback get_next_entry =
+ base::Bind(&SimpleBackendImpl::GetNextEntryInIterator, AsWeakPtr(), iter,
+ next_entry, callback);
+ return index_->ExecuteWhenReady(get_next_entry);
+}
+
+void SimpleBackendImpl::EndEnumeration(void** iter) {
+ SimpleIndex::HashList* entry_list =
+ static_cast<SimpleIndex::HashList*>(*iter);
+ delete entry_list;
+ *iter = NULL;
+}
+
+void SimpleBackendImpl::GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) {
+ std::pair<std::string, std::string> item;
+ item.first = "Cache type";
+ item.second = "Simple Cache";
+ stats->push_back(item);
+}
+
+void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
+ index_->UseIfExists(key);
+}
+
+void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
+ const DiskStatResult& result) {
+ if (result.net_error == net::OK) {
+ index_->SetMaxSize(result.max_size);
+ index_->Initialize(result.cache_dir_mtime);
+ }
+ callback.Run(result.net_error);
+}
+
+SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
+ const base::FilePath& path,
+ uint64 suggested_max_size) {
+ DiskStatResult result;
+ result.max_size = suggested_max_size;
+ result.net_error = net::OK;
+ if (!FileStructureConsistent(path)) {
+ LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
+ << path.LossyDisplayName();
+ result.net_error = net::ERR_FAILED;
+ } else {
+ bool mtime_result =
+ disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
+ DCHECK(mtime_result);
+ if (!result.max_size) {
+ int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
+ if (available < 0)
+ result.max_size = kDefaultCacheSize;
+ else
+ // TODO(pasko): Move PreferedCacheSize() to cache_util.h. Also fix the
+ // spelling.
+ result.max_size = disk_cache::PreferedCacheSize(available);
+ }
+ DCHECK(result.max_size);
+ }
+ return result;
+}
+
+scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
+ const std::string& key) {
+ const uint64 entry_hash = simple_util::GetEntryHashKey(key);
+
+ std::pair<EntryMap::iterator, bool> insert_result =
+ active_entries_.insert(std::make_pair(entry_hash,
+ base::WeakPtr<SimpleEntryImpl>()));
+ EntryMap::iterator& it = insert_result.first;
+ if (insert_result.second)
+ DCHECK(!it->second.get());
+ if (!it->second.get()) {
+ SimpleEntryImpl* entry = new SimpleEntryImpl(
+ path_, entry_hash, entry_operations_mode_, this, net_log_);
+ entry->SetKey(key);
+ it->second = entry->AsWeakPtr();
+ }
+ DCHECK(it->second.get());
+ // It's possible, but unlikely, that we have an entry hash collision with a
+ // currently active entry.
+ if (key != it->second->key()) {
+ it->second->Doom();
+ DCHECK_EQ(0U, active_entries_.count(entry_hash));
+ return CreateOrFindActiveEntry(key);
+ }
+ return make_scoped_refptr(it->second.get());
+}
+
+int SimpleBackendImpl::OpenEntryFromHash(uint64 hash,
+ Entry** entry,
+ const CompletionCallback& callback) {
+ EntryMap::iterator has_active = active_entries_.find(hash);
+ if (has_active != active_entries_.end())
+ return OpenEntry(has_active->second->key(), entry, callback);
+
+ scoped_refptr<SimpleEntryImpl> simple_entry =
+ new SimpleEntryImpl(path_, hash, entry_operations_mode_, this, net_log_);
+ CompletionCallback backend_callback =
+ base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
+ AsWeakPtr(),
+ hash, entry, simple_entry, callback);
+ return simple_entry->OpenEntry(entry, backend_callback);
+}
+
+void SimpleBackendImpl::GetNextEntryInIterator(
+ void** iter,
+ Entry** next_entry,
+ const CompletionCallback& callback,
+ int error_code) {
+ if (error_code != net::OK) {
+ CallCompletionCallback(callback, error_code);
+ return;
+ }
+ if (*iter == NULL) {
+ *iter = index()->GetAllHashes().release();
+ }
+ SimpleIndex::HashList* entry_list =
+ static_cast<SimpleIndex::HashList*>(*iter);
+ while (entry_list->size() > 0) {
+ uint64 entry_hash = entry_list->back();
+ entry_list->pop_back();
+ if (index()->Has(entry_hash)) {
+ *next_entry = NULL;
+ CompletionCallback continue_iteration = base::Bind(
+ &SimpleBackendImpl::CheckIterationReturnValue,
+ AsWeakPtr(),
+ iter,
+ next_entry,
+ callback);
+ int error_code_open = OpenEntryFromHash(entry_hash,
+ next_entry,
+ continue_iteration);
+ if (error_code_open == net::ERR_IO_PENDING)
+ return;
+ if (error_code_open != net::ERR_FAILED) {
+ CallCompletionCallback(callback, error_code_open);
+ return;
+ }
+ }
+ }
+ CallCompletionCallback(callback, net::ERR_FAILED);
+}
+
+void SimpleBackendImpl::OnEntryOpenedFromHash(
+ uint64 hash,
+ Entry** entry,
+ scoped_refptr<SimpleEntryImpl> simple_entry,
+ const CompletionCallback& callback,
+ int error_code) {
+ if (error_code != net::OK) {
+ CallCompletionCallback(callback, error_code);
+ return;
+ }
+ DCHECK(*entry);
+ std::pair<EntryMap::iterator, bool> insert_result =
+ active_entries_.insert(std::make_pair(hash,
+ base::WeakPtr<SimpleEntryImpl>()));
+ EntryMap::iterator& it = insert_result.first;
+ const bool did_insert = insert_result.second;
+ if (did_insert) {
+ // There is no active entry corresponding to this hash. The entry created
+ // is put in the map of active entries and returned to the caller.
+ it->second = simple_entry->AsWeakPtr();
+ CallCompletionCallback(callback, error_code);
+ } else {
+ // The entry was made active with the key while the creation from hash
+ // occurred. The entry created from hash needs to be closed, and the one
+ // coming from the key returned to the caller.
+ simple_entry->Close();
+ it->second->OpenEntry(entry, callback);
+ }
+}
+
+void SimpleBackendImpl::OnEntryOpenedFromKey(
+ const std::string key,
+ Entry** entry,
+ scoped_refptr<SimpleEntryImpl> simple_entry,
+ const CompletionCallback& callback,
+ int error_code) {
+ int final_code = error_code;
+ if (final_code == net::OK) {
+ bool key_matches = key.compare(simple_entry->key()) == 0;
+ if (!key_matches) {
+ // TODO(clamy): Add a unit test to check this code path.
+ DLOG(WARNING) << "Key mismatch on open.";
+ simple_entry->Doom();
+ simple_entry->Close();
+ final_code = net::ERR_FAILED;
+ } else {
+ DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
+ }
+ UMA_HISTOGRAM_BOOLEAN("SimpleCache.KeyMatchedOnOpen", key_matches);
+ }
+ CallCompletionCallback(callback, final_code);
+}
+
+void SimpleBackendImpl::CheckIterationReturnValue(
+ void** iter,
+ Entry** entry,
+ const CompletionCallback& callback,
+ int error_code) {
+ if (error_code == net::ERR_FAILED) {
+ OpenNextEntry(iter, entry, callback);
+ return;
+ }
+ CallCompletionCallback(callback, error_code);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_backend_impl.h b/chromium/net/disk_cache/simple/simple_backend_impl.h
new file mode 100644
index 00000000000..4f01351752e
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_backend_impl.h
@@ -0,0 +1,182 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_BACKEND_IMPL_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/task_runner.h"
+#include "base/time/time.h"
+#include "net/base/cache_type.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/simple/simple_entry_impl.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+class TaskRunner;
+}
+
+namespace disk_cache {
+
+// SimpleBackendImpl is a new cache backend that stores entries in individual
+// files.
+// See http://www.chromium.org/developers/design-documents/network-stack/disk-cache/very-simple-backend
+//
+// The non-static functions below must be called on the IO thread unless
+// otherwise stated.
+
+class SimpleEntryImpl;
+class SimpleIndex;
+
+class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend,
+ public base::SupportsWeakPtr<SimpleBackendImpl> {
+ public:
+ SimpleBackendImpl(const base::FilePath& path, int max_bytes,
+ net::CacheType type,
+ base::SingleThreadTaskRunner* cache_thread,
+ net::NetLog* net_log);
+
+ virtual ~SimpleBackendImpl();
+
+ SimpleIndex* index() { return index_.get(); }
+
+ base::TaskRunner* worker_pool() { return worker_pool_.get(); }
+
+ int Init(const CompletionCallback& completion_callback);
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Returns the maximum file size permitted in this backend.
+ int GetMaxFileSize() const;
+
+ // Removes |entry| from the |active_entries_| set, forcing future Open/Create
+ // operations to construct a new object.
+ void OnDeactivated(const SimpleEntryImpl* entry);
+
+ // Backend:
+ virtual net::CacheType GetCacheType() const OVERRIDE;
+ virtual int32 GetEntryCount() const OVERRIDE;
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual void EndEnumeration(void** iter) OVERRIDE;
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) OVERRIDE;
+ virtual void OnExternalCacheHit(const std::string& key) OVERRIDE;
+
+ private:
+ typedef base::hash_map<uint64, base::WeakPtr<SimpleEntryImpl> > EntryMap;
+
+ typedef base::Callback<void(base::Time mtime, uint64 max_size, int result)>
+ InitializeIndexCallback;
+
+ // Return value of InitCacheStructureOnDisk().
+ struct DiskStatResult {
+ base::Time cache_dir_mtime;
+ uint64 max_size;
+ bool detected_magic_number_mismatch;
+ int net_error;
+ };
+
+ void InitializeIndex(const CompletionCallback& callback,
+ const DiskStatResult& result);
+
+ // Dooms all entries previously accessed between |initial_time| and
+ // |end_time|. Invoked when the index is ready.
+ void IndexReadyForDoom(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback,
+ int result);
+
+ // Try to create the directory if it doesn't exist. This must run on the IO
+ // thread.
+ static DiskStatResult InitCacheStructureOnDisk(const base::FilePath& path,
+ uint64 suggested_max_size);
+
+ // Searches |active_entries_| for the entry corresponding to |key|. If found,
+ // returns the found entry. Otherwise, creates a new entry and returns that.
+ scoped_refptr<SimpleEntryImpl> CreateOrFindActiveEntry(
+ const std::string& key);
+
+ // Given a hash, will try to open the corresponding Entry. If we have an Entry
+ // corresponding to |hash| in the map of active entries, opens it. Otherwise,
+ // a new empty Entry will be created, opened and filled with information from
+ // the disk.
+ int OpenEntryFromHash(uint64 hash,
+ Entry** entry,
+ const CompletionCallback& callback);
+
+ // Called when the index is initilized to find the next entry in the iterator
+ // |iter|. If there are no more hashes in the iterator list, net::ERR_FAILED
+ // is returned. Otherwise, calls OpenEntryFromHash.
+ void GetNextEntryInIterator(void** iter,
+ Entry** next_entry,
+ const CompletionCallback& callback,
+ int error_code);
+
+ // Called when we tried to open an entry with hash alone. When a blank entry
+ // has been created and filled in with information from the disk - based on a
+ // hash alone - this checks that a duplicate active entry was not created
+ // using a key in the meantime.
+ void OnEntryOpenedFromHash(uint64 hash,
+ Entry** entry,
+ scoped_refptr<SimpleEntryImpl> simple_entry,
+ const CompletionCallback& callback,
+ int error_code);
+
+ // Called when we tried to open an entry from key. When the entry has been
+ // opened, a check for key mismatch is performed.
+ void OnEntryOpenedFromKey(const std::string key,
+ Entry** entry,
+ scoped_refptr<SimpleEntryImpl> simple_entry,
+ const CompletionCallback& callback,
+ int error_code);
+
+ // Called at the end of the asynchronous operation triggered by
+ // OpenEntryFromHash. Makes sure to continue iterating if the open entry was
+ // not a success.
+ void CheckIterationReturnValue(void** iter,
+ Entry** entry,
+ const CompletionCallback& callback,
+ int error_code);
+
+ const base::FilePath path_;
+ scoped_ptr<SimpleIndex> index_;
+ const scoped_refptr<base::SingleThreadTaskRunner> cache_thread_;
+ scoped_refptr<base::TaskRunner> worker_pool_;
+
+ int orig_max_size_;
+ const SimpleEntryImpl::OperationsMode entry_operations_mode_;
+
+ // TODO(gavinp): Store the entry_hash in SimpleEntryImpl, and index this map
+ // by hash. This will save memory, and make IndexReadyForDoom easier.
+ EntryMap active_entries_;
+
+ net::NetLog* const net_log_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_BACKEND_IMPL_H_
diff --git a/chromium/net/disk_cache/simple/simple_entry_format.cc b/chromium/net/disk_cache/simple/simple_entry_format.cc
new file mode 100644
index 00000000000..d35174a9a7f
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_entry_format.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_entry_format.h"
+
+#include <cstring>
+
+namespace disk_cache {
+
+SimpleFileHeader::SimpleFileHeader() {
+ // Make hashing repeatable: leave no padding bytes untouched.
+ std::memset(this, 0, sizeof(*this));
+}
+
+SimpleFileEOF::SimpleFileEOF() {
+ // Make hashing repeatable: leave no padding bytes untouched.
+ std::memset(this, 0, sizeof(*this));
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_entry_format.h b/chromium/net/disk_cache/simple/simple_entry_format.h
new file mode 100644
index 00000000000..d06ab1139c5
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_entry_format.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_FORMAT_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_FORMAT_H_
+
+
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "net/base/net_export.h"
+
+namespace base {
+class Time;
+}
+
+namespace disk_cache {
+
+const uint64 kSimpleInitialMagicNumber = GG_UINT64_C(0xfcfb6d1ba7725c30);
+const uint64 kSimpleFinalMagicNumber = GG_UINT64_C(0xf4fa6f45970d41d8);
+
+// A file in the Simple cache consists of a SimpleFileHeader followed
+// by data.
+
+// A file in the Simple cache consists of:
+// - a SimpleFileHeader.
+// - the key.
+// - the data.
+// - at the end, a SimpleFileEOF record.
+const uint32 kSimpleVersion = 4;
+
+static const int kSimpleEntryFileCount = 3;
+
+struct NET_EXPORT_PRIVATE SimpleFileHeader {
+ SimpleFileHeader();
+
+ uint64 initial_magic_number;
+ uint32 version;
+ uint32 key_length;
+ uint32 key_hash;
+};
+
+struct SimpleFileEOF {
+ enum Flags {
+ FLAG_HAS_CRC32 = (1U << 0),
+ };
+
+ SimpleFileEOF();
+
+ uint64 final_magic_number;
+ uint32 flags;
+ uint32 data_crc32;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_FORMAT_H_
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.cc b/chromium/net/disk_cache/simple/simple_entry_impl.cc
new file mode 100644
index 00000000000..3c3ec7daff6
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.cc
@@ -0,0 +1,1187 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_entry_impl.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/metrics/histogram.h"
+#include "base/task_runner.h"
+#include "base/time/time.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/net_log_parameters.h"
+#include "net/disk_cache/simple/simple_backend_impl.h"
+#include "net/disk_cache/simple/simple_index.h"
+#include "net/disk_cache/simple/simple_net_log_parameters.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "third_party/zlib/zlib.h"
+
+namespace {
+
+// Used in histograms, please only add entries at the end.
+enum ReadResult {
+ READ_RESULT_SUCCESS = 0,
+ READ_RESULT_INVALID_ARGUMENT = 1,
+ READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
+ READ_RESULT_BAD_STATE = 3,
+ READ_RESULT_FAST_EMPTY_RETURN = 4,
+ READ_RESULT_SYNC_READ_FAILURE = 5,
+ READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
+ READ_RESULT_MAX = 7,
+};
+
+// Used in histograms, please only add entries at the end.
+enum WriteResult {
+ WRITE_RESULT_SUCCESS = 0,
+ WRITE_RESULT_INVALID_ARGUMENT = 1,
+ WRITE_RESULT_OVER_MAX_SIZE = 2,
+ WRITE_RESULT_BAD_STATE = 3,
+ WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
+ WRITE_RESULT_MAX = 5,
+};
+
+// Used in histograms, please only add entries at the end.
+enum HeaderSizeChange {
+ HEADER_SIZE_CHANGE_INITIAL,
+ HEADER_SIZE_CHANGE_SAME,
+ HEADER_SIZE_CHANGE_INCREASE,
+ HEADER_SIZE_CHANGE_DECREASE,
+ HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
+ HEADER_SIZE_CHANGE_MAX
+};
+
+void RecordReadResult(ReadResult result) {
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
+};
+
+void RecordWriteResult(WriteResult result) {
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
+ result, WRITE_RESULT_MAX);
+};
+
+// TODO(ttuttle): Consider removing this once we have a good handle on header
+// size changes.
+void RecordHeaderSizeChange(int old_size, int new_size) {
+ HeaderSizeChange size_change;
+
+ UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSize", new_size);
+
+ if (old_size == 0) {
+ size_change = HEADER_SIZE_CHANGE_INITIAL;
+ } else if (new_size == old_size) {
+ size_change = HEADER_SIZE_CHANGE_SAME;
+ } else if (new_size > old_size) {
+ int delta = new_size - old_size;
+ UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeIncreaseAbsolute",
+ delta);
+ UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeIncreasePercentage",
+ delta * 100 / old_size);
+ size_change = HEADER_SIZE_CHANGE_INCREASE;
+ } else { // new_size < old_size
+ int delta = old_size - new_size;
+ UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeDecreaseAbsolute",
+ delta);
+ UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeDecreasePercentage",
+ delta * 100 / old_size);
+ size_change = HEADER_SIZE_CHANGE_DECREASE;
+ }
+
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
+ size_change,
+ HEADER_SIZE_CHANGE_MAX);
+}
+
+void RecordUnexpectedStream0Write() {
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
+ HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
+ HEADER_SIZE_CHANGE_MAX);
+}
+
+// Short trampoline to take an owned input parameter and call a net completion
+// callback with its value.
+void CallCompletionCallback(const net::CompletionCallback& callback,
+ scoped_ptr<int> result) {
+ DCHECK(result);
+ if (!callback.is_null())
+ callback.Run(*result);
+}
+
+int g_open_entry_count = 0;
+
+void AdjustOpenEntryCountBy(int offset) {
+ g_open_entry_count += offset;
+ UMA_HISTOGRAM_COUNTS_10000("SimpleCache.GlobalOpenEntryCount",
+ g_open_entry_count);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+using base::Closure;
+using base::FilePath;
+using base::MessageLoopProxy;
+using base::Time;
+using base::TaskRunner;
+
+// A helper class to insure that RunNextOperationIfNeeded() is called when
+// exiting the current stack frame.
+class SimpleEntryImpl::ScopedOperationRunner {
+ public:
+ explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
+ }
+
+ ~ScopedOperationRunner() {
+ entry_->RunNextOperationIfNeeded();
+ }
+
+ private:
+ SimpleEntryImpl* const entry_;
+};
+
+SimpleEntryImpl::SimpleEntryImpl(const FilePath& path,
+ const uint64 entry_hash,
+ OperationsMode operations_mode,
+ SimpleBackendImpl* backend,
+ net::NetLog* net_log)
+ : backend_(backend->AsWeakPtr()),
+ worker_pool_(backend->worker_pool()),
+ path_(path),
+ entry_hash_(entry_hash),
+ use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
+ last_used_(Time::Now()),
+ last_modified_(last_used_),
+ open_count_(0),
+ state_(STATE_UNINITIALIZED),
+ synchronous_entry_(NULL),
+ net_log_(net::BoundNetLog::Make(
+ net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)) {
+ COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
+ arrays_should_be_same_size);
+ COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
+ arrays_should_be_same_size);
+ COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
+ arrays_should_be_same_size);
+ COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
+ arrays_should_be_same_size);
+ MakeUninitialized();
+ net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
+ CreateNetLogSimpleEntryConstructionCallback(this));
+}
+
+int SimpleEntryImpl::OpenEntry(Entry** out_entry,
+ const CompletionCallback& callback) {
+ DCHECK(backend_.get());
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
+
+ bool have_index = backend_->index()->initialized();
+ // This enumeration is used in histograms, add entries only at end.
+ enum OpenEntryIndexEnum {
+ INDEX_NOEXIST = 0,
+ INDEX_MISS = 1,
+ INDEX_HIT = 2,
+ INDEX_MAX = 3,
+ };
+ OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
+ if (have_index) {
+ if (backend_->index()->Has(entry_hash_))
+ open_entry_index_enum = INDEX_HIT;
+ else
+ open_entry_index_enum = INDEX_MISS;
+ }
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
+ open_entry_index_enum, INDEX_MAX);
+
+ // If entry is not known to the index, initiate fast failover to the network.
+ if (open_entry_index_enum == INDEX_MISS) {
+ net_log_.AddEventWithNetErrorCode(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
+ net::ERR_FAILED);
+ return net::ERR_FAILED;
+ }
+
+ pending_operations_.push(SimpleEntryOperation::OpenOperation(
+ this, have_index, callback, out_entry));
+ RunNextOperationIfNeeded();
+ return net::ERR_IO_PENDING;
+}
+
+int SimpleEntryImpl::CreateEntry(Entry** out_entry,
+ const CompletionCallback& callback) {
+ DCHECK(backend_.get());
+ DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
+
+ bool have_index = backend_->index()->initialized();
+ int ret_value = net::ERR_FAILED;
+ if (use_optimistic_operations_ &&
+ state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
+
+ ReturnEntryToCaller(out_entry);
+ pending_operations_.push(SimpleEntryOperation::CreateOperation(
+ this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
+ ret_value = net::OK;
+ } else {
+ pending_operations_.push(SimpleEntryOperation::CreateOperation(
+ this, have_index, callback, out_entry));
+ ret_value = net::ERR_IO_PENDING;
+ }
+
+ // We insert the entry in the index before creating the entry files in the
+ // SimpleSynchronousEntry, because this way the worst scenario is when we
+ // have the entry in the index but we don't have the created files yet, this
+ // way we never leak files. CreationOperationComplete will remove the entry
+ // from the index if the creation fails.
+ backend_->index()->Insert(key_);
+
+ RunNextOperationIfNeeded();
+ return ret_value;
+}
+
+int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
+
+ MarkAsDoomed();
+ scoped_ptr<int> result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
+ entry_hash_, result.get());
+ Closure reply = base::Bind(&CallCompletionCallback,
+ callback, base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+ return net::ERR_IO_PENDING;
+}
+
+void SimpleEntryImpl::SetKey(const std::string& key) {
+ key_ = key;
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
+ net::NetLog::StringCallback("key", &key));
+}
+
+void SimpleEntryImpl::Doom() {
+ DoomEntry(CompletionCallback());
+}
+
+void SimpleEntryImpl::Close() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK_LT(0, open_count_);
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
+
+ if (--open_count_ > 0) {
+ DCHECK(!HasOneRef());
+ Release(); // Balanced in ReturnEntryToCaller().
+ return;
+ }
+
+ pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
+ DCHECK(!HasOneRef());
+ Release(); // Balanced in ReturnEntryToCaller().
+ RunNextOperationIfNeeded();
+}
+
+std::string SimpleEntryImpl::GetKey() const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ return key_;
+}
+
+Time SimpleEntryImpl::GetLastUsed() const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ return last_used_;
+}
+
+Time SimpleEntryImpl::GetLastModified() const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ return last_modified_;
+}
+
+int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK_LE(0, data_size_[stream_index]);
+ return data_size_[stream_index];
+}
+
+int SimpleEntryImpl::ReadData(int stream_index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
+ CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
+ false));
+ }
+
+ if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
+ buf_len < 0) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
+ CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
+ }
+
+ RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
+ return net::ERR_INVALID_ARGUMENT;
+ }
+ if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
+ offset < 0 || !buf_len)) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
+ CreateNetLogReadWriteCompleteCallback(0));
+ }
+
+ RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
+ return 0;
+ }
+
+ // TODO(felipeg): Optimization: Add support for truly parallel read
+ // operations.
+ bool alone_in_queue =
+ pending_operations_.size() == 0 && state_ == STATE_READY;
+ pending_operations_.push(SimpleEntryOperation::ReadOperation(
+ this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
+ RunNextOperationIfNeeded();
+ return net::ERR_IO_PENDING;
+}
+
+int SimpleEntryImpl::WriteData(int stream_index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
+ CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
+ truncate));
+ }
+
+ if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
+ buf_len < 0) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
+ CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
+ }
+ RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
+ return net::ERR_INVALID_ARGUMENT;
+ }
+ if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
+ CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
+ }
+ RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
+ return net::ERR_FAILED;
+ }
+ ScopedOperationRunner operation_runner(this);
+
+ // Currently, Simple Cache is only used for HTTP, which stores the headers in
+ // stream 0 and always writes them with a single, truncating write. Detect
+ // these writes and record the size and size changes of the headers. Also,
+ // note writes to stream 0 that violate those assumptions.
+ if (stream_index == 0) {
+ if (offset == 0 && truncate)
+ RecordHeaderSizeChange(data_size_[0], buf_len);
+ else
+ RecordUnexpectedStream0Write();
+ }
+
+ // We can only do optimistic Write if there is no pending operations, so
+ // that we are sure that the next call to RunNextOperationIfNeeded will
+ // actually run the write operation that sets the stream size. It also
+ // prevents from previous possibly-conflicting writes that could be stacked
+ // in the |pending_operations_|. We could optimize this for when we have
+ // only read operations enqueued.
+ const bool optimistic =
+ (use_optimistic_operations_ && state_ == STATE_READY &&
+ pending_operations_.size() == 0);
+ CompletionCallback op_callback;
+ scoped_refptr<net::IOBuffer> op_buf;
+ int ret_value = net::ERR_FAILED;
+ if (!optimistic) {
+ op_buf = buf;
+ op_callback = callback;
+ ret_value = net::ERR_IO_PENDING;
+ } else {
+ // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
+ // here to avoid paying the price of the RefCountedThreadSafe atomic
+ // operations.
+ if (buf) {
+ op_buf = new IOBuffer(buf_len);
+ memcpy(op_buf->data(), buf->data(), buf_len);
+ }
+ op_callback = CompletionCallback();
+ ret_value = buf_len;
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
+ CreateNetLogReadWriteCompleteCallback(buf_len));
+ }
+ }
+
+ pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
+ stream_index,
+ offset,
+ buf_len,
+ op_buf.get(),
+ truncate,
+ optimistic,
+ op_callback));
+ return ret_value;
+}
+
+int SimpleEntryImpl::ReadSparseData(int64 offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(gavinp): Determine if the simple backend should support sparse data.
+ NOTIMPLEMENTED();
+ return net::ERR_FAILED;
+}
+
+int SimpleEntryImpl::WriteSparseData(int64 offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(gavinp): Determine if the simple backend should support sparse data.
+ NOTIMPLEMENTED();
+ return net::ERR_FAILED;
+}
+
+int SimpleEntryImpl::GetAvailableRange(int64 offset,
+ int len,
+ int64* start,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(gavinp): Determine if the simple backend should support sparse data.
+ NOTIMPLEMENTED();
+ return net::ERR_FAILED;
+}
+
+bool SimpleEntryImpl::CouldBeSparse() const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(gavinp): Determine if the simple backend should support sparse data.
+ return false;
+}
+
+void SimpleEntryImpl::CancelSparseIO() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(gavinp): Determine if the simple backend should support sparse data.
+ NOTIMPLEMENTED();
+}
+
+int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(gavinp): Determine if the simple backend should support sparse data.
+ NOTIMPLEMENTED();
+ return net::ERR_FAILED;
+}
+
+SimpleEntryImpl::~SimpleEntryImpl() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(0U, pending_operations_.size());
+ DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
+ DCHECK(!synchronous_entry_);
+ RemoveSelfFromBackend();
+ net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
+}
+
+void SimpleEntryImpl::MakeUninitialized() {
+ state_ = STATE_UNINITIALIZED;
+ std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
+ std::memset(crc32s_, 0, sizeof(crc32s_));
+ std::memset(have_written_, 0, sizeof(have_written_));
+ std::memset(data_size_, 0, sizeof(data_size_));
+ for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
+ crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
+ }
+}
+
+void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
+ DCHECK(out_entry);
+ ++open_count_;
+ AddRef(); // Balanced in Close()
+ *out_entry = this;
+}
+
+void SimpleEntryImpl::RemoveSelfFromBackend() {
+ if (!backend_.get())
+ return;
+ backend_->OnDeactivated(this);
+ backend_.reset();
+}
+
+void SimpleEntryImpl::MarkAsDoomed() {
+ if (!backend_.get())
+ return;
+ backend_->index()->Remove(key_);
+ RemoveSelfFromBackend();
+}
+
+void SimpleEntryImpl::RunNextOperationIfNeeded() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
+ pending_operations_.size(), 0, 100, 20);
+ if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
+ scoped_ptr<SimpleEntryOperation> operation(
+ new SimpleEntryOperation(pending_operations_.front()));
+ pending_operations_.pop();
+ switch (operation->type()) {
+ case SimpleEntryOperation::TYPE_OPEN:
+ OpenEntryInternal(operation->have_index(),
+ operation->callback(),
+ operation->out_entry());
+ break;
+ case SimpleEntryOperation::TYPE_CREATE:
+ CreateEntryInternal(operation->have_index(),
+ operation->callback(),
+ operation->out_entry());
+ break;
+ case SimpleEntryOperation::TYPE_CLOSE:
+ CloseInternal();
+ break;
+ case SimpleEntryOperation::TYPE_READ:
+ RecordReadIsParallelizable(*operation);
+ ReadDataInternal(operation->index(),
+ operation->offset(),
+ operation->buf(),
+ operation->length(),
+ operation->callback());
+ break;
+ case SimpleEntryOperation::TYPE_WRITE:
+ RecordWriteDependencyType(*operation);
+ WriteDataInternal(operation->index(),
+ operation->offset(),
+ operation->buf(),
+ operation->length(),
+ operation->callback(),
+ operation->truncate());
+ break;
+ default:
+ NOTREACHED();
+ }
+ // The operation is kept for histograms. Makes sure it does not leak
+ // resources.
+ executing_operation_.swap(operation);
+ executing_operation_->ReleaseReferences();
+ // |this| may have been deleted.
+ }
+}
+
+void SimpleEntryImpl::OpenEntryInternal(bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry) {
+ ScopedOperationRunner operation_runner(this);
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
+
+ if (state_ == STATE_READY) {
+ ReturnEntryToCaller(out_entry);
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
+ net::OK));
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
+ CreateNetLogSimpleEntryCreationCallback(this, net::OK));
+ return;
+ } else if (state_ == STATE_FAILURE) {
+ if (!callback.is_null()) {
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ callback, net::ERR_FAILED));
+ }
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
+ CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
+ return;
+ }
+
+ DCHECK_EQ(STATE_UNINITIALIZED, state_);
+ DCHECK(!synchronous_entry_);
+ state_ = STATE_IO_PENDING;
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ scoped_ptr<SimpleEntryCreationResults> results(
+ new SimpleEntryCreationResults(
+ SimpleEntryStat(last_used_, last_modified_, data_size_)));
+ Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
+ path_,
+ entry_hash_,
+ have_index,
+ results.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
+ this,
+ callback,
+ start_time,
+ base::Passed(&results),
+ out_entry,
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::CreateEntryInternal(bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry) {
+ ScopedOperationRunner operation_runner(this);
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
+
+ if (state_ != STATE_UNINITIALIZED) {
+ // There is already an active normal entry.
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
+ CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
+
+ if (!callback.is_null()) {
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ callback, net::ERR_FAILED));
+ }
+ return;
+ }
+ DCHECK_EQ(STATE_UNINITIALIZED, state_);
+ DCHECK(!synchronous_entry_);
+
+ state_ = STATE_IO_PENDING;
+
+ // Since we don't know the correct values for |last_used_| and
+ // |last_modified_| yet, we make this approximation.
+ last_used_ = last_modified_ = base::Time::Now();
+
+ // If creation succeeds, we should mark all streams to be saved on close.
+ for (int i = 0; i < kSimpleEntryFileCount; ++i)
+ have_written_[i] = true;
+
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ scoped_ptr<SimpleEntryCreationResults> results(
+ new SimpleEntryCreationResults(
+ SimpleEntryStat(last_used_, last_modified_, data_size_)));
+ Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
+ path_,
+ key_,
+ entry_hash_,
+ have_index,
+ results.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
+ this,
+ callback,
+ start_time,
+ base::Passed(&results),
+ out_entry,
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::CloseInternal() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
+ scoped_ptr<std::vector<CRCRecord> >
+ crc32s_to_write(new std::vector<CRCRecord>());
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
+
+ if (state_ == STATE_READY) {
+ DCHECK(synchronous_entry_);
+ state_ = STATE_IO_PENDING;
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ if (have_written_[i]) {
+ if (GetDataSize(i) == crc32s_end_offset_[i]) {
+ int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
+ crc32s_to_write->push_back(CRCRecord(i, true, crc));
+ } else {
+ crc32s_to_write->push_back(CRCRecord(i, false, 0));
+ }
+ }
+ }
+ } else {
+ DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
+ }
+
+ if (synchronous_entry_) {
+ Closure task =
+ base::Bind(&SimpleSynchronousEntry::Close,
+ base::Unretained(synchronous_entry_),
+ SimpleEntryStat(last_used_, last_modified_, data_size_),
+ base::Passed(&crc32s_to_write));
+ Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
+ synchronous_entry_ = NULL;
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ if (!have_written_[i]) {
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
+ crc_check_state_[i], CRC_CHECK_MAX);
+ }
+ }
+ } else {
+ synchronous_entry_ = NULL;
+ CloseOperationComplete();
+ }
+}
+
+void SimpleEntryImpl::ReadDataInternal(int stream_index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
+ CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
+ false));
+ }
+
+ if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
+ if (!callback.is_null()) {
+ RecordReadResult(READ_RESULT_BAD_STATE);
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ callback, net::ERR_FAILED));
+ }
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
+ CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
+ }
+ return;
+ }
+ DCHECK_EQ(STATE_READY, state_);
+ if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
+ RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
+ // If there is nothing to read, we bail out before setting state_ to
+ // STATE_IO_PENDING.
+ if (!callback.is_null())
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ callback, 0));
+ return;
+ }
+
+ buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
+
+ state_ = STATE_IO_PENDING;
+ if (backend_.get())
+ backend_->index()->UseIfExists(key_);
+
+ scoped_ptr<uint32> read_crc32(new uint32());
+ scoped_ptr<int> result(new int());
+ scoped_ptr<base::Time> last_used(new base::Time());
+ Closure task = base::Bind(
+ &SimpleSynchronousEntry::ReadData,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
+ make_scoped_refptr(buf),
+ read_crc32.get(),
+ last_used.get(),
+ result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
+ this,
+ stream_index,
+ offset,
+ callback,
+ base::Passed(&read_crc32),
+ base::Passed(&last_used),
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::WriteDataInternal(int stream_index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
+ CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
+ truncate));
+ }
+
+ if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
+ RecordWriteResult(WRITE_RESULT_BAD_STATE);
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
+ CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
+ }
+ if (!callback.is_null()) {
+ // We need to posttask so that we don't go in a loop when we call the
+ // callback directly.
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ callback, net::ERR_FAILED));
+ }
+ // |this| may be destroyed after return here.
+ return;
+ }
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+ if (backend_.get())
+ backend_->index()->UseIfExists(key_);
+ // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
+ // if |offset == 0| or we have already computed the CRC for [0 .. offset).
+ // We rely on most write operations being sequential, start to end to compute
+ // the crc of the data. When we write to an entry and close without having
+ // done a sequential write, we don't check the CRC on read.
+ if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
+ uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
+ : crc32(0, Z_NULL, 0);
+ if (buf_len > 0) {
+ crc32s_[stream_index] = crc32(initial_crc,
+ reinterpret_cast<const Bytef*>(buf->data()),
+ buf_len);
+ }
+ crc32s_end_offset_[stream_index] = offset + buf_len;
+ }
+
+ // |entry_stat| needs to be initialized before modifying |data_size_|.
+ scoped_ptr<SimpleEntryStat> entry_stat(
+ new SimpleEntryStat(last_used_, last_modified_, data_size_));
+ if (truncate) {
+ data_size_[stream_index] = offset + buf_len;
+ } else {
+ data_size_[stream_index] = std::max(offset + buf_len,
+ GetDataSize(stream_index));
+ }
+
+ // Since we don't know the correct values for |last_used_| and
+ // |last_modified_| yet, we make this approximation.
+ last_used_ = last_modified_ = base::Time::Now();
+
+ have_written_[stream_index] = true;
+
+ scoped_ptr<int> result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ stream_index, offset, buf_len, truncate),
+ make_scoped_refptr(buf),
+ entry_stat.get(),
+ result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
+ this,
+ stream_index,
+ callback,
+ base::Passed(&entry_stat),
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::CreationOperationComplete(
+ const CompletionCallback& completion_callback,
+ const base::TimeTicks& start_time,
+ scoped_ptr<SimpleEntryCreationResults> in_results,
+ Entry** out_entry,
+ net::NetLog::EventType end_event_type) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(state_, STATE_IO_PENDING);
+ DCHECK(in_results);
+ ScopedOperationRunner operation_runner(this);
+ UMA_HISTOGRAM_BOOLEAN(
+ "SimpleCache.EntryCreationResult", in_results->result == net::OK);
+ if (in_results->result != net::OK) {
+ if (in_results->result != net::ERR_FILE_EXISTS)
+ MarkAsDoomed();
+
+ net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
+
+ if (!completion_callback.is_null()) {
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ completion_callback, net::ERR_FAILED));
+ }
+ MakeUninitialized();
+ return;
+ }
+ // If out_entry is NULL, it means we already called ReturnEntryToCaller from
+ // the optimistic Create case.
+ if (out_entry)
+ ReturnEntryToCaller(out_entry);
+
+ state_ = STATE_READY;
+ synchronous_entry_ = in_results->sync_entry;
+ if (key_.empty()) {
+ SetKey(synchronous_entry_->key());
+ } else {
+ // This should only be triggered when creating an entry. The key check in
+ // the open case is handled in SimpleBackendImpl.
+ DCHECK_EQ(key_, synchronous_entry_->key());
+ }
+ UpdateDataFromEntryStat(in_results->entry_stat);
+ UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
+ (base::TimeTicks::Now() - start_time));
+ AdjustOpenEntryCountBy(1);
+
+ net_log_.AddEvent(end_event_type);
+ if (!completion_callback.is_null()) {
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ completion_callback, net::OK));
+ }
+}
+
+void SimpleEntryImpl::EntryOperationComplete(
+ int stream_index,
+ const CompletionCallback& completion_callback,
+ const SimpleEntryStat& entry_stat,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK_EQ(STATE_IO_PENDING, state_);
+ DCHECK(result);
+ state_ = STATE_READY;
+ if (*result < 0) {
+ MarkAsDoomed();
+ state_ = STATE_FAILURE;
+ crc32s_end_offset_[stream_index] = 0;
+ } else {
+ UpdateDataFromEntryStat(entry_stat);
+ }
+
+ if (!completion_callback.is_null()) {
+ MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
+ completion_callback, *result));
+ }
+ RunNextOperationIfNeeded();
+}
+
+void SimpleEntryImpl::ReadOperationComplete(
+ int stream_index,
+ int offset,
+ const CompletionCallback& completion_callback,
+ scoped_ptr<uint32> read_crc32,
+ scoped_ptr<base::Time> last_used,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK_EQ(STATE_IO_PENDING, state_);
+ DCHECK(read_crc32);
+ DCHECK(result);
+
+ if (*result > 0 &&
+ crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
+ crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
+ }
+
+ if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
+ uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
+ : crc32s_[stream_index];
+ crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
+ crc32s_end_offset_[stream_index] += *result;
+ if (!have_written_[stream_index] &&
+ GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
+ // We have just read a file from start to finish, and so we have
+ // computed a crc of the entire file. We can check it now. If a cache
+ // entry has a single reader, the normal pattern is to read from start
+ // to finish.
+
+ // Other cases are possible. In the case of two readers on the same
+ // entry, one reader can be behind the other. In this case we compute
+ // the crc as the most advanced reader progresses, and check it for
+ // both readers as they read the last byte.
+
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
+
+ scoped_ptr<int> new_result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
+ base::Unretained(synchronous_entry_),
+ stream_index,
+ data_size_[stream_index],
+ crc32s_[stream_index],
+ new_result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
+ this, *result, stream_index,
+ completion_callback,
+ base::Passed(&new_result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+ crc_check_state_[stream_index] = CRC_CHECK_DONE;
+ return;
+ }
+ }
+
+ if (*result < 0) {
+ RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
+ } else {
+ RecordReadResult(READ_RESULT_SUCCESS);
+ if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
+ offset + *result == GetDataSize(stream_index)) {
+ crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
+ }
+ }
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
+ CreateNetLogReadWriteCompleteCallback(*result));
+ }
+
+ EntryOperationComplete(
+ stream_index,
+ completion_callback,
+ SimpleEntryStat(*last_used, last_modified_, data_size_),
+ result.Pass());
+}
+
+void SimpleEntryImpl::WriteOperationComplete(
+ int stream_index,
+ const CompletionCallback& completion_callback,
+ scoped_ptr<SimpleEntryStat> entry_stat,
+ scoped_ptr<int> result) {
+ if (*result >= 0)
+ RecordWriteResult(WRITE_RESULT_SUCCESS);
+ else
+ RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
+ CreateNetLogReadWriteCompleteCallback(*result));
+ }
+
+ EntryOperationComplete(
+ stream_index, completion_callback, *entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::ChecksumOperationComplete(
+ int orig_result,
+ int stream_index,
+ const CompletionCallback& completion_callback,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK_EQ(STATE_IO_PENDING, state_);
+ DCHECK(result);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEventWithNetErrorCode(
+ net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
+ *result);
+ }
+
+ if (*result == net::OK) {
+ *result = orig_result;
+ if (orig_result >= 0)
+ RecordReadResult(READ_RESULT_SUCCESS);
+ else
+ RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
+ } else {
+ RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
+ }
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
+ CreateNetLogReadWriteCompleteCallback(*result));
+ }
+
+ EntryOperationComplete(
+ stream_index,
+ completion_callback,
+ SimpleEntryStat(last_used_, last_modified_, data_size_),
+ result.Pass());
+}
+
+void SimpleEntryImpl::CloseOperationComplete() {
+ DCHECK(!synchronous_entry_);
+ DCHECK_EQ(0, open_count_);
+ DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
+ STATE_UNINITIALIZED == state_);
+ net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
+ AdjustOpenEntryCountBy(-1);
+ MakeUninitialized();
+ RunNextOperationIfNeeded();
+}
+
+void SimpleEntryImpl::UpdateDataFromEntryStat(
+ const SimpleEntryStat& entry_stat) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK_EQ(STATE_READY, state_);
+
+ last_used_ = entry_stat.last_used;
+ last_modified_ = entry_stat.last_modified;
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ data_size_[i] = entry_stat.data_size[i];
+ }
+ if (backend_.get())
+ backend_->index()->UpdateEntrySize(key_, GetDiskUsage());
+}
+
+int64 SimpleEntryImpl::GetDiskUsage() const {
+ int64 file_size = 0;
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ file_size +=
+ simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
+ }
+ return file_size;
+}
+
+void SimpleEntryImpl::RecordReadIsParallelizable(
+ const SimpleEntryOperation& operation) const {
+ if (!executing_operation_)
+ return;
+ // TODO(clamy): The values of this histogram should be changed to something
+ // more useful.
+ bool parallelizable_read =
+ !operation.alone_in_queue() &&
+ executing_operation_->type() == SimpleEntryOperation::TYPE_READ;
+ UMA_HISTOGRAM_BOOLEAN("SimpleCache.ReadIsParallelizable",
+ parallelizable_read);
+}
+
+void SimpleEntryImpl::RecordWriteDependencyType(
+ const SimpleEntryOperation& operation) const {
+ if (!executing_operation_)
+ return;
+ // Used in histograms, please only add entries at the end.
+ enum WriteDependencyType {
+ WRITE_OPTIMISTIC = 0,
+ WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
+ WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
+ WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
+ WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
+ WRITE_FOLLOWS_CONFLICTING_READ = 5,
+ WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
+ WRITE_FOLLOWS_OTHER = 7,
+ WRITE_DEPENDENCY_TYPE_MAX = 8,
+ };
+
+ WriteDependencyType type = WRITE_FOLLOWS_OTHER;
+ if (operation.optimistic()) {
+ type = WRITE_OPTIMISTIC;
+ } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
+ executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
+ bool conflicting = executing_operation_->ConflictsWith(operation);
+
+ if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
+ type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
+ : WRITE_FOLLOWS_NON_CONFLICTING_READ;
+ } else if (executing_operation_->optimistic()) {
+ type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
+ : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
+ } else {
+ type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
+ : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
+ }
+ }
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.WriteDependencyType", type, WRITE_DEPENDENCY_TYPE_MAX);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_entry_impl.h b/chromium/net/disk_cache/simple/simple_entry_impl.h
new file mode 100644
index 00000000000..7eb8914e873
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_entry_impl.h
@@ -0,0 +1,290 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_IMPL_H_
+
+#include <queue>
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "net/base/net_log.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_entry_operation.h"
+
+namespace base {
+class TaskRunner;
+}
+
+namespace net {
+class IOBuffer;
+}
+
+namespace disk_cache {
+
+class SimpleBackendImpl;
+class SimpleSynchronousEntry;
+struct SimpleEntryStat;
+struct SimpleEntryCreationResults;
+
+// SimpleEntryImpl is the IO thread interface to an entry in the very simple
+// disk cache. It proxies for the SimpleSynchronousEntry, which performs IO
+// on the worker thread.
+class SimpleEntryImpl : public Entry, public base::RefCounted<SimpleEntryImpl>,
+ public base::SupportsWeakPtr<SimpleEntryImpl> {
+ friend class base::RefCounted<SimpleEntryImpl>;
+ public:
+ enum OperationsMode {
+ NON_OPTIMISTIC_OPERATIONS,
+ OPTIMISTIC_OPERATIONS,
+ };
+
+ SimpleEntryImpl(const base::FilePath& path,
+ uint64 entry_hash,
+ OperationsMode operations_mode,
+ SimpleBackendImpl* backend,
+ net::NetLog* net_log);
+
+ // Adds another reader/writer to this entry, if possible, returning |this| to
+ // |entry|.
+ int OpenEntry(Entry** entry, const CompletionCallback& callback);
+
+ // Creates this entry, if possible. Returns |this| to |entry|.
+ int CreateEntry(Entry** entry, const CompletionCallback& callback);
+
+ // Identical to Backend::Doom() except that it accepts a CompletionCallback.
+ int DoomEntry(const CompletionCallback& callback);
+
+ const std::string& key() const { return key_; }
+ uint64 entry_hash() const { return entry_hash_; }
+ void SetKey(const std::string& key);
+
+ // From Entry:
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int stream_index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int stream_index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset,
+ int len,
+ int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE;
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ class ScopedOperationRunner;
+ friend class ScopedOperationRunner;
+
+ enum State {
+ // The state immediately after construction, but before |synchronous_entry_|
+ // has been assigned. This is the state at construction, and is the only
+ // legal state to destruct an entry in.
+ STATE_UNINITIALIZED,
+
+ // This entry is available for regular IO.
+ STATE_READY,
+
+ // IO is currently in flight, operations must wait for completion before
+ // launching.
+ STATE_IO_PENDING,
+
+ // A failure occurred in the current or previous operation. All operations
+ // after that must fail, until we receive a Close().
+ STATE_FAILURE,
+ };
+
+ // Used in histograms, please only add entries at the end.
+ enum CheckCrcResult {
+ CRC_CHECK_NEVER_READ_TO_END = 0,
+ CRC_CHECK_NOT_DONE = 1,
+ CRC_CHECK_DONE = 2,
+ CRC_CHECK_NEVER_READ_AT_ALL = 3,
+ CRC_CHECK_MAX = 4,
+ };
+
+ virtual ~SimpleEntryImpl();
+
+ // Sets entry to STATE_UNINITIALIZED.
+ void MakeUninitialized();
+
+ // Return this entry to a user of the API in |out_entry|. Increments the user
+ // count.
+ void ReturnEntryToCaller(Entry** out_entry);
+
+ // Ensures that |this| is no longer referenced by our |backend_|, this
+ // guarantees that this entry cannot have OpenEntry/CreateEntry called again.
+ void RemoveSelfFromBackend();
+
+ // An error occured, and the SimpleSynchronousEntry should have Doomed
+ // us at this point. We need to remove |this| from the Backend and the
+ // index.
+ void MarkAsDoomed();
+
+ // Runs the next operation in the queue, if any and if there is no other
+ // operation running at the moment.
+ // WARNING: May delete |this|, as an operation in the queue can contain
+ // the last reference.
+ void RunNextOperationIfNeeded();
+
+ void OpenEntryInternal(bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry);
+
+ void CreateEntryInternal(bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry);
+
+ void CloseInternal();
+
+ void ReadDataInternal(int index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback);
+
+ void WriteDataInternal(int index,
+ int offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback,
+ bool truncate);
+
+ // Called after a SimpleSynchronousEntry has completed CreateEntry() or
+ // OpenEntry(). If |in_sync_entry| is non-NULL, creation is successful and we
+ // can return |this| SimpleEntryImpl to |*out_entry|. Runs
+ // |completion_callback|.
+ void CreationOperationComplete(
+ const CompletionCallback& completion_callback,
+ const base::TimeTicks& start_time,
+ scoped_ptr<SimpleEntryCreationResults> in_results,
+ Entry** out_entry,
+ net::NetLog::EventType end_event_type);
+
+ // Called after we've closed and written the EOF record to our entry. Until
+ // this point it hasn't been safe to OpenEntry() the same entry, but from this
+ // point it is.
+ void CloseOperationComplete();
+
+ // Internal utility method used by other completion methods. Calls
+ // |completion_callback| after updating state and dooming on errors.
+ void EntryOperationComplete(int stream_index,
+ const CompletionCallback& completion_callback,
+ const SimpleEntryStat& entry_stat,
+ scoped_ptr<int> result);
+
+ // Called after an asynchronous read. Updates |crc32s_| if possible.
+ void ReadOperationComplete(int stream_index,
+ int offset,
+ const CompletionCallback& completion_callback,
+ scoped_ptr<uint32> read_crc32,
+ scoped_ptr<base::Time> last_used,
+ scoped_ptr<int> result);
+
+ // Called after an asynchronous write completes.
+ void WriteOperationComplete(int stream_index,
+ const CompletionCallback& completion_callback,
+ scoped_ptr<SimpleEntryStat> entry_stat,
+ scoped_ptr<int> result);
+
+ // Called after validating the checksums on an entry. Passes through the
+ // original result if successful, propogates the error if the checksum does
+ // not validate.
+ void ChecksumOperationComplete(
+ int stream_index,
+ int orig_result,
+ const CompletionCallback& completion_callback,
+ scoped_ptr<int> result);
+
+ // Called after completion of asynchronous IO and receiving file metadata for
+ // the entry in |entry_stat|. Updates the metadata in the entry and in the
+ // index to make them available on next IO operations.
+ void UpdateDataFromEntryStat(const SimpleEntryStat& entry_stat);
+
+ int64 GetDiskUsage() const;
+
+ // Used to report histograms.
+ void RecordReadIsParallelizable(const SimpleEntryOperation& operation) const;
+ void RecordWriteDependencyType(const SimpleEntryOperation& operation) const;
+
+ // All nonstatic SimpleEntryImpl methods should always be called on the IO
+ // thread, in all cases. |io_thread_checker_| documents and enforces this.
+ base::ThreadChecker io_thread_checker_;
+
+ base::WeakPtr<SimpleBackendImpl> backend_;
+ const scoped_refptr<base::TaskRunner> worker_pool_;
+ const base::FilePath path_;
+ const uint64 entry_hash_;
+ const bool use_optimistic_operations_;
+ std::string key_;
+
+ // |last_used_|, |last_modified_| and |data_size_| are copied from the
+ // synchronous entry at the completion of each item of asynchronous IO.
+ // TODO(clamy): Unify last_used_ with data in the index.
+ base::Time last_used_;
+ base::Time last_modified_;
+ int32 data_size_[kSimpleEntryFileCount];
+
+ // Number of times this object has been returned from Backend::OpenEntry() and
+ // Backend::CreateEntry() without subsequent Entry::Close() calls. Used to
+ // notify the backend when this entry not used by any callers.
+ int open_count_;
+
+ State state_;
+
+ // When possible, we compute a crc32, for the data in each entry as we read or
+ // write. For each stream, |crc32s_[index]| is the crc32 of that stream from
+ // [0 .. |crc32s_end_offset_|). If |crc32s_end_offset_[index] == 0| then the
+ // value of |crc32s_[index]| is undefined.
+ int32 crc32s_end_offset_[kSimpleEntryFileCount];
+ uint32 crc32s_[kSimpleEntryFileCount];
+
+ // If |have_written_[index]| is true, we have written to the stream |index|.
+ bool have_written_[kSimpleEntryFileCount];
+
+ // Reflects how much CRC checking has been done with the entry. This state is
+ // reported on closing each entry stream.
+ CheckCrcResult crc_check_state_[kSimpleEntryFileCount];
+
+ // The |synchronous_entry_| is the worker thread object that performs IO on
+ // entries. It's owned by this SimpleEntryImpl whenever |operation_running_|
+ // is false (i.e. when an operation is not pending on the worker pool).
+ SimpleSynchronousEntry* synchronous_entry_;
+
+ std::queue<SimpleEntryOperation> pending_operations_;
+
+ net::BoundNetLog net_log_;
+
+ scoped_ptr<SimpleEntryOperation> executing_operation_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_IMPL_H_
diff --git a/chromium/net/disk_cache/simple/simple_entry_operation.cc b/chromium/net/disk_cache/simple/simple_entry_operation.cc
new file mode 100644
index 00000000000..81d5f7c888b
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_entry_operation.cc
@@ -0,0 +1,184 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_entry_operation.h"
+
+#include "base/logging.h"
+#include "net/base/io_buffer.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/simple/simple_entry_impl.h"
+
+namespace disk_cache {
+
+SimpleEntryOperation::SimpleEntryOperation(const SimpleEntryOperation& other)
+ : entry_(other.entry_.get()),
+ buf_(other.buf_),
+ callback_(other.callback_),
+ out_entry_(other.out_entry_),
+ offset_(other.offset_),
+ length_(other.length_),
+ type_(other.type_),
+ have_index_(other.have_index_),
+ index_(other.index_),
+ truncate_(other.truncate_),
+ optimistic_(other.optimistic_),
+ alone_in_queue_(other.alone_in_queue_) {
+}
+
+SimpleEntryOperation::~SimpleEntryOperation() {}
+
+// Static.
+SimpleEntryOperation SimpleEntryOperation::OpenOperation(
+ SimpleEntryImpl* entry,
+ bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry) {
+ return SimpleEntryOperation(entry,
+ NULL,
+ callback,
+ out_entry,
+ 0,
+ 0,
+ TYPE_OPEN,
+ have_index,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// Static.
+SimpleEntryOperation SimpleEntryOperation::CreateOperation(
+ SimpleEntryImpl* entry,
+ bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry) {
+ return SimpleEntryOperation(entry,
+ NULL,
+ callback,
+ out_entry,
+ 0,
+ 0,
+ TYPE_CREATE,
+ have_index,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// Static.
+SimpleEntryOperation SimpleEntryOperation::CloseOperation(
+ SimpleEntryImpl* entry) {
+ return SimpleEntryOperation(entry,
+ NULL,
+ CompletionCallback(),
+ NULL,
+ 0,
+ 0,
+ TYPE_CLOSE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// Static.
+SimpleEntryOperation SimpleEntryOperation::ReadOperation(
+ SimpleEntryImpl* entry,
+ int index,
+ int offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback,
+ bool alone_in_queue) {
+ return SimpleEntryOperation(entry,
+ buf,
+ callback,
+ NULL,
+ offset,
+ length,
+ TYPE_READ,
+ false,
+ index,
+ false,
+ false,
+ alone_in_queue);
+}
+
+// Static.
+SimpleEntryOperation SimpleEntryOperation::WriteOperation(
+ SimpleEntryImpl* entry,
+ int index,
+ int offset,
+ int length,
+ net::IOBuffer* buf,
+ bool truncate,
+ bool optimistic,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ buf,
+ callback,
+ NULL,
+ offset,
+ length,
+ TYPE_WRITE,
+ false,
+ index,
+ truncate,
+ optimistic,
+ false);
+}
+
+bool SimpleEntryOperation::ConflictsWith(
+ const SimpleEntryOperation& other_op) const {
+ if (type_ != TYPE_READ && type_ != TYPE_WRITE)
+ return true;
+ if (other_op.type() != TYPE_READ && other_op.type() != TYPE_WRITE)
+ return true;
+ if (type() == TYPE_READ && other_op.type() == TYPE_READ)
+ return false;
+ if (index_ != other_op.index_)
+ return false;
+ int end = (type_ == TYPE_WRITE && truncate_) ? INT_MAX : offset_ + length_;
+ int other_op_end = (other_op.type() == TYPE_WRITE && other_op.truncate())
+ ? INT_MAX
+ : other_op.offset() + other_op.length();
+ return (offset_ < other_op_end && other_op.offset() < end);
+}
+
+void SimpleEntryOperation::ReleaseReferences() {
+ callback_ = CompletionCallback();
+ buf_ = NULL;
+ entry_ = NULL;
+}
+
+SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback,
+ Entry** out_entry,
+ int offset,
+ int length,
+ EntryOperationType type,
+ bool have_index,
+ int index,
+ bool truncate,
+ bool optimistic,
+ bool alone_in_queue)
+ : entry_(entry),
+ buf_(buf),
+ callback_(callback),
+ out_entry_(out_entry),
+ offset_(offset),
+ length_(length),
+ type_(type),
+ have_index_(have_index),
+ index_(index),
+ truncate_(truncate),
+ optimistic_(optimistic),
+ alone_in_queue_(alone_in_queue) {
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_entry_operation.h b/chromium/net/disk_cache/simple/simple_entry_operation.h
new file mode 100644
index 00000000000..acdd60a3207
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_entry_operation.h
@@ -0,0 +1,125 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_OPERATION_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_OPERATION_H_
+
+#include "base/memory/ref_counted.h"
+#include "net/base/completion_callback.h"
+#include "net/base/net_log.h"
+
+namespace net {
+class IOBuffer;
+}
+
+namespace disk_cache {
+
+class Entry;
+class SimpleEntryImpl;
+
+// SimpleEntryOperation stores the information regarding operations in
+// SimpleEntryImpl, between the moment they are issued by users of the backend,
+// and the moment when they are executed.
+class SimpleEntryOperation {
+ public:
+ typedef net::CompletionCallback CompletionCallback;
+
+ enum EntryOperationType {
+ TYPE_OPEN = 0,
+ TYPE_CREATE = 1,
+ TYPE_CLOSE = 2,
+ TYPE_READ = 3,
+ TYPE_WRITE = 4,
+ };
+
+ SimpleEntryOperation(const SimpleEntryOperation& other);
+ ~SimpleEntryOperation();
+
+ static SimpleEntryOperation OpenOperation(SimpleEntryImpl* entry,
+ bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry);
+ static SimpleEntryOperation CreateOperation(
+ SimpleEntryImpl* entry,
+ bool have_index,
+ const CompletionCallback& callback,
+ Entry** out_entry);
+ static SimpleEntryOperation CloseOperation(SimpleEntryImpl* entry);
+ static SimpleEntryOperation ReadOperation(SimpleEntryImpl* entry,
+ int index,
+ int offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback,
+ bool alone_in_queue);
+ static SimpleEntryOperation WriteOperation(
+ SimpleEntryImpl* entry,
+ int index,
+ int offset,
+ int length,
+ net::IOBuffer* buf,
+ bool truncate,
+ bool optimistic,
+ const CompletionCallback& callback);
+
+ bool ConflictsWith(const SimpleEntryOperation& other_op) const;
+ // Releases all references. After calling this operation, SimpleEntryOperation
+ // will only hold POD members.
+ void ReleaseReferences();
+
+ EntryOperationType type() const {
+ return static_cast<EntryOperationType>(type_);
+ }
+ const CompletionCallback& callback() const { return callback_; }
+ Entry** out_entry() { return out_entry_; }
+ bool have_index() const { return have_index_; }
+ int index() const { return index_; }
+ int offset() const { return offset_; }
+ int length() const { return length_; }
+ net::IOBuffer* buf() { return buf_.get(); }
+ bool truncate() const { return truncate_; }
+ bool optimistic() const { return optimistic_; }
+ bool alone_in_queue() const { return alone_in_queue_; }
+
+ private:
+ SimpleEntryOperation(SimpleEntryImpl* entry,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback,
+ Entry** out_entry,
+ int offset,
+ int length,
+ EntryOperationType type,
+ bool have_index,
+ int index,
+ bool truncate,
+ bool optimistic,
+ bool alone_in_queue);
+
+ // This ensures entry will not be deleted until the operation has ran.
+ scoped_refptr<SimpleEntryImpl> entry_;
+ scoped_refptr<net::IOBuffer> buf_;
+ CompletionCallback callback_;
+
+ // Used in open and create operations.
+ Entry** out_entry_;
+
+ // Used in write and read operations.
+ const int offset_;
+ const int length_;
+
+ const unsigned int type_ : 3; /* 3 */
+ // Used in open and create operations.
+ const unsigned int have_index_ : 1; /* 4 */
+ // Used in write and read operations.
+ const unsigned int index_ : 2; /* 6 */
+ // Used only in write operations.
+ const unsigned int truncate_ : 1; /* 7 */
+ const unsigned int optimistic_ : 1; /* 8 */
+ // Used only in SimpleCache.ReadIsParallelizable histogram.
+ const unsigned int alone_in_queue_ : 1; /* 9 */
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_OPERATION_H_
diff --git a/chromium/net/disk_cache/simple/simple_index.cc b/chromium/net/disk_cache/simple/simple_index.cc
new file mode 100644
index 00000000000..78ce87ed71d
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_index.cc
@@ -0,0 +1,461 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_index.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/file_util.h"
+#include "base/files/file_enumerator.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/pickle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/task_runner.h"
+#include "base/threading/worker_pool.h"
+#include "base/time/time.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_index_file.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+#include "net/disk_cache/simple/simple_util.h"
+
+#if defined(OS_POSIX)
+#include <sys/stat.h>
+#include <sys/time.h>
+#endif
+
+namespace {
+
+// How many milliseconds we delay writing the index to disk since the last cache
+// operation has happened.
+const int kDefaultWriteToDiskDelayMSecs = 20000;
+const int kDefaultWriteToDiskOnBackgroundDelayMSecs = 100;
+
+// Divides the cache space into this amount of parts to evict when only one part
+// is left.
+const uint32 kEvictionMarginDivisor = 20;
+
+const uint32 kBytesInKb = 1024;
+
+// Utility class used for timestamp comparisons in entry metadata while sorting.
+class CompareHashesForTimestamp {
+ typedef disk_cache::SimpleIndex SimpleIndex;
+ typedef disk_cache::SimpleIndex::EntrySet EntrySet;
+ public:
+ explicit CompareHashesForTimestamp(const EntrySet& set);
+
+ bool operator()(uint64 hash1, uint64 hash2);
+ private:
+ const EntrySet& entry_set_;
+};
+
+CompareHashesForTimestamp::CompareHashesForTimestamp(const EntrySet& set)
+ : entry_set_(set) {
+}
+
+bool CompareHashesForTimestamp::operator()(uint64 hash1, uint64 hash2) {
+ EntrySet::const_iterator it1 = entry_set_.find(hash1);
+ DCHECK(it1 != entry_set_.end());
+ EntrySet::const_iterator it2 = entry_set_.find(hash2);
+ DCHECK(it2 != entry_set_.end());
+ return it1->second.GetLastUsedTime() < it2->second.GetLastUsedTime();
+}
+
+} // namespace
+
+namespace disk_cache {
+
+EntryMetadata::EntryMetadata() : last_used_time_(0), entry_size_(0) {}
+
+EntryMetadata::EntryMetadata(base::Time last_used_time, uint64 entry_size)
+ : last_used_time_(last_used_time.ToInternalValue()),
+ entry_size_(entry_size) {}
+
+base::Time EntryMetadata::GetLastUsedTime() const {
+ return base::Time::FromInternalValue(last_used_time_);
+}
+
+void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) {
+ last_used_time_ = last_used_time.ToInternalValue();
+}
+
+void EntryMetadata::Serialize(Pickle* pickle) const {
+ DCHECK(pickle);
+ COMPILE_ASSERT(sizeof(EntryMetadata) == (sizeof(int64) + sizeof(uint64)),
+ EntryMetadata_has_two_member_variables);
+ pickle->WriteInt64(last_used_time_);
+ pickle->WriteUInt64(entry_size_);
+}
+
+bool EntryMetadata::Deserialize(PickleIterator* it) {
+ DCHECK(it);
+ return it->ReadInt64(&last_used_time_) && it->ReadUInt64(&entry_size_);
+}
+
+SimpleIndex::SimpleIndex(base::SingleThreadTaskRunner* io_thread,
+ const base::FilePath& cache_directory,
+ scoped_ptr<SimpleIndexFile> index_file)
+ : cache_size_(0),
+ max_size_(0),
+ high_watermark_(0),
+ low_watermark_(0),
+ eviction_in_progress_(false),
+ initialized_(false),
+ cache_directory_(cache_directory),
+ index_file_(index_file.Pass()),
+ io_thread_(io_thread),
+ // Creating the callback once so it is reused every time
+ // write_to_disk_timer_.Start() is called.
+ write_to_disk_cb_(base::Bind(&SimpleIndex::WriteToDisk, AsWeakPtr())),
+ app_on_background_(false) {}
+
+SimpleIndex::~SimpleIndex() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+
+ // Fail all callbacks waiting for the index to come up.
+ for (CallbackList::iterator it = to_run_when_initialized_.begin(),
+ end = to_run_when_initialized_.end(); it != end; ++it) {
+ it->Run(net::ERR_ABORTED);
+ }
+}
+
+void SimpleIndex::Initialize(base::Time cache_mtime) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+
+ // Take the foreground and background index flush delays from the experiment
+ // settings only if both are valid.
+ foreground_flush_delay_ = kDefaultWriteToDiskDelayMSecs;
+ background_flush_delay_ = kDefaultWriteToDiskOnBackgroundDelayMSecs;
+ const std::string index_flush_intervals = base::FieldTrialList::FindFullName(
+ "SimpleCacheIndexFlushDelay_Foreground_Background");
+ if (!index_flush_intervals.empty()) {
+ base::StringTokenizer tokens(index_flush_intervals, "_");
+ int foreground_delay, background_delay;
+ if (tokens.GetNext() &&
+ base::StringToInt(tokens.token(), &foreground_delay) &&
+ tokens.GetNext() &&
+ base::StringToInt(tokens.token(), &background_delay)) {
+ foreground_flush_delay_ = foreground_delay;
+ background_flush_delay_ = background_delay;
+ }
+ }
+
+#if defined(OS_ANDROID)
+ activity_status_listener_.reset(new base::android::ActivityStatus::Listener(
+ base::Bind(&SimpleIndex::OnActivityStateChange, AsWeakPtr())));
+#endif
+
+ SimpleIndexLoadResult* load_result = new SimpleIndexLoadResult();
+ scoped_ptr<SimpleIndexLoadResult> load_result_scoped(load_result);
+ base::Closure reply = base::Bind(
+ &SimpleIndex::MergeInitializingSet,
+ AsWeakPtr(),
+ base::Passed(&load_result_scoped));
+ index_file_->LoadIndexEntries(cache_mtime, reply, load_result);
+}
+
+bool SimpleIndex::SetMaxSize(int max_bytes) {
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ max_size_ = max_bytes;
+ high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor;
+ low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor);
+ return true;
+}
+
+int SimpleIndex::ExecuteWhenReady(const net::CompletionCallback& task) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ if (initialized_)
+ io_thread_->PostTask(FROM_HERE, base::Bind(task, net::OK));
+ else
+ to_run_when_initialized_.push_back(task);
+ return net::ERR_IO_PENDING;
+}
+
+scoped_ptr<SimpleIndex::HashList> SimpleIndex::RemoveEntriesBetween(
+ const base::Time initial_time, const base::Time end_time) {
+ return ExtractEntriesBetween(initial_time, end_time, true);
+}
+
+scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
+ const base::Time null_time = base::Time();
+ return ExtractEntriesBetween(null_time, null_time, false);
+}
+
+int32 SimpleIndex::GetEntryCount() const {
+ // TODO(pasko): return a meaningful initial estimate before initialized.
+ return entries_set_.size();
+}
+
+void SimpleIndex::Insert(const std::string& key) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // Upon insert we don't know yet the size of the entry.
+ // It will be updated later when the SimpleEntryImpl finishes opening or
+ // creating the new entry, and then UpdateEntrySize will be called.
+ const uint64 hash_key = simple_util::GetEntryHashKey(key);
+ InsertInEntrySet(
+ hash_key, EntryMetadata(base::Time::Now(), 0), &entries_set_);
+ if (!initialized_)
+ removed_entries_.erase(hash_key);
+ PostponeWritingToDisk();
+}
+
+void SimpleIndex::Remove(const std::string& key) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ const uint64 hash_key = simple_util::GetEntryHashKey(key);
+ EntrySet::iterator it = entries_set_.find(hash_key);
+ if (it != entries_set_.end()) {
+ UpdateEntryIteratorSize(&it, 0);
+ entries_set_.erase(it);
+ }
+
+ if (!initialized_)
+ removed_entries_.insert(hash_key);
+ PostponeWritingToDisk();
+}
+
+bool SimpleIndex::Has(uint64 hash) const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // If not initialized, always return true, forcing it to go to the disk.
+ return !initialized_ || entries_set_.count(hash) > 0;
+}
+
+bool SimpleIndex::UseIfExists(const std::string& key) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // Always update the last used time, even if it is during initialization.
+ // It will be merged later.
+ EntrySet::iterator it = entries_set_.find(simple_util::GetEntryHashKey(key));
+ if (it == entries_set_.end())
+ // If not initialized, always return true, forcing it to go to the disk.
+ return !initialized_;
+ it->second.SetLastUsedTime(base::Time::Now());
+ PostponeWritingToDisk();
+ return true;
+}
+
+void SimpleIndex::StartEvictionIfNeeded() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ if (eviction_in_progress_ || cache_size_ <= high_watermark_)
+ return;
+
+ // Take all live key hashes from the index and sort them by time.
+ eviction_in_progress_ = true;
+ eviction_start_time_ = base::TimeTicks::Now();
+ UMA_HISTOGRAM_MEMORY_KB("SimpleCache.Eviction.CacheSizeOnStart2",
+ cache_size_ / kBytesInKb);
+ UMA_HISTOGRAM_MEMORY_KB("SimpleCache.Eviction.MaxCacheSizeOnStart2",
+ max_size_ / kBytesInKb);
+ scoped_ptr<std::vector<uint64> > entry_hashes(new std::vector<uint64>());
+ for (EntrySet::const_iterator it = entries_set_.begin(),
+ end = entries_set_.end(); it != end; ++it) {
+ entry_hashes->push_back(it->first);
+ }
+ std::sort(entry_hashes->begin(), entry_hashes->end(),
+ CompareHashesForTimestamp(entries_set_));
+
+ // Remove as many entries from the index to get below |low_watermark_|.
+ std::vector<uint64>::iterator it = entry_hashes->begin();
+ uint64 evicted_so_far_size = 0;
+ while (evicted_so_far_size < cache_size_ - low_watermark_) {
+ DCHECK(it != entry_hashes->end());
+ EntrySet::iterator found_meta = entries_set_.find(*it);
+ DCHECK(found_meta != entries_set_.end());
+ uint64 to_evict_size = found_meta->second.GetEntrySize();
+ evicted_so_far_size += to_evict_size;
+ entries_set_.erase(found_meta);
+ ++it;
+ }
+ cache_size_ -= evicted_so_far_size;
+
+ // Take out the rest of hashes from the eviction list.
+ entry_hashes->erase(it, entry_hashes->end());
+ UMA_HISTOGRAM_COUNTS("SimpleCache.Eviction.EntryCount", entry_hashes->size());
+ UMA_HISTOGRAM_TIMES("SimpleCache.Eviction.TimeToSelectEntries",
+ base::TimeTicks::Now() - eviction_start_time_);
+ UMA_HISTOGRAM_MEMORY_KB("SimpleCache.Eviction.SizeOfEvicted2",
+ evicted_so_far_size / kBytesInKb);
+
+ index_file_->DoomEntrySet(
+ entry_hashes.Pass(),
+ base::Bind(&SimpleIndex::EvictionDone, AsWeakPtr()));
+}
+
+bool SimpleIndex::UpdateEntrySize(const std::string& key, uint64 entry_size) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ EntrySet::iterator it = entries_set_.find(simple_util::GetEntryHashKey(key));
+ if (it == entries_set_.end())
+ return false;
+
+ UpdateEntryIteratorSize(&it, entry_size);
+ PostponeWritingToDisk();
+ StartEvictionIfNeeded();
+ return true;
+}
+
+void SimpleIndex::EvictionDone(int result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+
+ // Ignore the result of eviction. We did our best.
+ eviction_in_progress_ = false;
+ UMA_HISTOGRAM_BOOLEAN("SimpleCache.Eviction.Result", result == net::OK);
+ UMA_HISTOGRAM_TIMES("SimpleCache.Eviction.TimeToDone",
+ base::TimeTicks::Now() - eviction_start_time_);
+ UMA_HISTOGRAM_MEMORY_KB("SimpleCache.Eviction.SizeWhenDone2",
+ cache_size_ / kBytesInKb);
+}
+
+// static
+void SimpleIndex::InsertInEntrySet(
+ uint64 hash_key,
+ const disk_cache::EntryMetadata& entry_metadata,
+ EntrySet* entry_set) {
+ DCHECK(entry_set);
+ entry_set->insert(std::make_pair(hash_key, entry_metadata));
+}
+
+void SimpleIndex::PostponeWritingToDisk() {
+ if (!initialized_)
+ return;
+ const int delay = app_on_background_ ? background_flush_delay_
+ : foreground_flush_delay_;
+ // If the timer is already active, Start() will just Reset it, postponing it.
+ write_to_disk_timer_.Start(
+ FROM_HERE, base::TimeDelta::FromMilliseconds(delay), write_to_disk_cb_);
+}
+
+void SimpleIndex::UpdateEntryIteratorSize(EntrySet::iterator* it,
+ uint64 entry_size) {
+ // Update the total cache size with the new entry size.
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK_GE(cache_size_, (*it)->second.GetEntrySize());
+ cache_size_ -= (*it)->second.GetEntrySize();
+ cache_size_ += entry_size;
+ (*it)->second.SetEntrySize(entry_size);
+}
+
+void SimpleIndex::MergeInitializingSet(
+ scoped_ptr<SimpleIndexLoadResult> load_result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(load_result->did_load);
+
+ SimpleIndex::EntrySet* index_file_entries = &load_result->entries;
+ // First, remove the entries that are in the |removed_entries_| from both
+ // sets.
+ for (base::hash_set<uint64>::const_iterator it =
+ removed_entries_.begin(); it != removed_entries_.end(); ++it) {
+ entries_set_.erase(*it);
+ index_file_entries->erase(*it);
+ }
+
+ for (EntrySet::const_iterator it = entries_set_.begin();
+ it != entries_set_.end(); ++it) {
+ const uint64 entry_hash = it->first;
+ std::pair<EntrySet::iterator, bool> insert_result =
+ index_file_entries->insert(EntrySet::value_type(entry_hash,
+ EntryMetadata()));
+ EntrySet::iterator& possibly_inserted_entry = insert_result.first;
+ possibly_inserted_entry->second = it->second;
+ }
+
+ uint64 merged_cache_size = 0;
+ for (EntrySet::iterator it = index_file_entries->begin();
+ it != index_file_entries->end(); ++it) {
+ merged_cache_size += it->second.GetEntrySize();
+ }
+
+ entries_set_.swap(*index_file_entries);
+ cache_size_ = merged_cache_size;
+ initialized_ = true;
+ removed_entries_.clear();
+
+ // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
+ // merge down much.
+ if (load_result->flush_required)
+ WriteToDisk();
+
+ UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.IndexInitializationWaiters",
+ to_run_when_initialized_.size(), 0, 100, 20);
+ // Run all callbacks waiting for the index to come up.
+ for (CallbackList::iterator it = to_run_when_initialized_.begin(),
+ end = to_run_when_initialized_.end(); it != end; ++it) {
+ io_thread_->PostTask(FROM_HERE, base::Bind((*it), net::OK));
+ }
+ to_run_when_initialized_.clear();
+}
+
+#if defined(OS_ANDROID)
+void SimpleIndex::OnActivityStateChange(
+ base::android::ActivityState state) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // For more info about android activities, see:
+ // developer.android.com/training/basics/activity-lifecycle/pausing.html
+ // These values are defined in the file ActivityStatus.java
+ if (state == base::android::ACTIVITY_STATE_RESUMED) {
+ app_on_background_ = false;
+ } else if (state == base::android::ACTIVITY_STATE_STOPPED) {
+ app_on_background_ = true;
+ WriteToDisk();
+ }
+}
+#endif
+
+void SimpleIndex::WriteToDisk() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ if (!initialized_)
+ return;
+ UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.IndexNumEntriesOnWrite",
+ entries_set_.size(), 0, 100000, 50);
+ const base::TimeTicks start = base::TimeTicks::Now();
+ if (!last_write_to_disk_.is_null()) {
+ if (app_on_background_) {
+ UMA_HISTOGRAM_MEDIUM_TIMES("SimpleCache.IndexWriteInterval.Background",
+ start - last_write_to_disk_);
+ } else {
+ UMA_HISTOGRAM_MEDIUM_TIMES("SimpleCache.IndexWriteInterval.Foreground",
+ start - last_write_to_disk_);
+ }
+ }
+ last_write_to_disk_ = start;
+
+ index_file_->WriteToDisk(entries_set_, cache_size_,
+ start, app_on_background_);
+}
+
+scoped_ptr<SimpleIndex::HashList> SimpleIndex::ExtractEntriesBetween(
+ const base::Time initial_time, const base::Time end_time,
+ bool delete_entries) {
+ DCHECK_EQ(true, initialized_);
+ const base::Time extended_end_time =
+ end_time.is_null() ? base::Time::Max() : end_time;
+ DCHECK(extended_end_time >= initial_time);
+ scoped_ptr<HashList> ret_hashes(new HashList());
+ for (EntrySet::iterator it = entries_set_.begin(), end = entries_set_.end();
+ it != end;) {
+ EntryMetadata& metadata = it->second;
+ base::Time entry_time = metadata.GetLastUsedTime();
+ if (initial_time <= entry_time && entry_time < extended_end_time) {
+ ret_hashes->push_back(it->first);
+ if (delete_entries) {
+ cache_size_ -= metadata.GetEntrySize();
+ entries_set_.erase(it++);
+ continue;
+ }
+ }
+ ++it;
+ }
+ return ret_hashes.Pass();
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_index.h b/chromium/net/disk_cache/simple/simple_index.h
new file mode 100644
index 00000000000..788ffb2cfe8
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_index.h
@@ -0,0 +1,203 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_H_
+
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "net/base/completion_callback.h"
+#include "net/base/net_export.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/activity_status.h"
+#endif
+
+class Pickle;
+class PickleIterator;
+
+namespace disk_cache {
+
+class SimpleIndexFile;
+struct SimpleIndexLoadResult;
+
+class NET_EXPORT_PRIVATE EntryMetadata {
+ public:
+ EntryMetadata();
+ EntryMetadata(base::Time last_used_time, uint64 entry_size);
+
+ base::Time GetLastUsedTime() const;
+ void SetLastUsedTime(const base::Time& last_used_time);
+
+ uint64 GetEntrySize() const { return entry_size_; }
+ void SetEntrySize(uint64 entry_size) { entry_size_ = entry_size; }
+
+ // Serialize the data into the provided pickle.
+ void Serialize(Pickle* pickle) const;
+ bool Deserialize(PickleIterator* it);
+
+ private:
+ friend class SimpleIndexFileTest;
+
+ // When adding new members here, you should update the Serialize() and
+ // Deserialize() methods.
+
+ // This is the serialized format from Time::ToInternalValue().
+ // If you want to make calculations/comparisons, you should use the
+ // base::Time() class. Use the GetLastUsedTime() method above.
+ // TODO(felipeg): Use Time() here.
+ int64 last_used_time_;
+
+ uint64 entry_size_; // Storage size in bytes.
+};
+
+// This class is not Thread-safe.
+class NET_EXPORT_PRIVATE SimpleIndex
+ : public base::SupportsWeakPtr<SimpleIndex> {
+ public:
+ typedef std::vector<uint64> HashList;
+
+ SimpleIndex(base::SingleThreadTaskRunner* io_thread,
+ const base::FilePath& cache_directory,
+ scoped_ptr<SimpleIndexFile> simple_index_file);
+
+ virtual ~SimpleIndex();
+
+ void Initialize(base::Time cache_mtime);
+
+ bool SetMaxSize(int max_bytes);
+ int max_size() const { return max_size_; }
+
+ void Insert(const std::string& key);
+ void Remove(const std::string& key);
+
+ // Check whether the index has the entry given the hash of its key.
+ bool Has(uint64 hash) const;
+
+ // Update the last used time of the entry with the given key and return true
+ // iff the entry exist in the index.
+ bool UseIfExists(const std::string& key);
+
+ void WriteToDisk();
+
+ // Update the size (in bytes) of an entry, in the metadata stored in the
+ // index. This should be the total disk-file size including all streams of the
+ // entry.
+ bool UpdateEntrySize(const std::string& key, uint64 entry_size);
+
+ typedef base::hash_map<uint64, EntryMetadata> EntrySet;
+
+ static void InsertInEntrySet(uint64 hash_key,
+ const EntryMetadata& entry_metadata,
+ EntrySet* entry_set);
+
+ // Executes the |callback| when the index is ready. Allows multiple callbacks.
+ int ExecuteWhenReady(const net::CompletionCallback& callback);
+
+ // Takes out entries from the index that have last accessed time matching the
+ // range between |initial_time| and |end_time| where open intervals are
+ // possible according to the definition given in |DoomEntriesBetween()| in the
+ // disk cache backend interface. Returns the set of hashes taken out.
+ scoped_ptr<HashList> RemoveEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+
+ // Returns the list of all entries key hash.
+ scoped_ptr<HashList> GetAllHashes();
+
+ // Returns number of indexed entries.
+ int32 GetEntryCount() const;
+
+ // Returns whether the index has been initialized yet.
+ bool initialized() const { return initialized_; }
+
+ private:
+ friend class SimpleIndexTest;
+ FRIEND_TEST_ALL_PREFIXES(SimpleIndexTest, IndexSizeCorrectOnMerge);
+ FRIEND_TEST_ALL_PREFIXES(SimpleIndexTest, DiskWriteQueued);
+ FRIEND_TEST_ALL_PREFIXES(SimpleIndexTest, DiskWriteExecuted);
+ FRIEND_TEST_ALL_PREFIXES(SimpleIndexTest, DiskWritePostponed);
+
+ void StartEvictionIfNeeded();
+ void EvictionDone(int result);
+
+ void PostponeWritingToDisk();
+
+ void UpdateEntryIteratorSize(EntrySet::iterator* it, uint64 entry_size);
+
+ // Must run on IO Thread.
+ void MergeInitializingSet(scoped_ptr<SimpleIndexLoadResult> load_result);
+
+#if defined(OS_ANDROID)
+ void OnActivityStateChange(base::android::ActivityState state);
+
+ scoped_ptr<base::android::ActivityStatus::Listener> activity_status_listener_;
+#endif
+
+ scoped_ptr<HashList> ExtractEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ bool delete_entries);
+
+ EntrySet entries_set_;
+
+ uint64 cache_size_; // Total cache storage size in bytes.
+ uint64 max_size_;
+ uint64 high_watermark_;
+ uint64 low_watermark_;
+ bool eviction_in_progress_;
+ base::TimeTicks eviction_start_time_;
+
+ // This stores all the hash_key of entries that are removed during
+ // initialization.
+ base::hash_set<uint64> removed_entries_;
+ bool initialized_;
+
+ const base::FilePath& cache_directory_;
+ scoped_ptr<SimpleIndexFile> index_file_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> io_thread_;
+
+ // All nonstatic SimpleEntryImpl methods should always be called on the IO
+ // thread, in all cases. |io_thread_checker_| documents and enforces this.
+ base::ThreadChecker io_thread_checker_;
+
+ // Timestamp of the last time we wrote the index to disk.
+ // PostponeWritingToDisk() may give up postponing and allow the write if it
+ // has been a while since last time we wrote.
+ base::TimeTicks last_write_to_disk_;
+
+ base::OneShotTimer<SimpleIndex> write_to_disk_timer_;
+ base::Closure write_to_disk_cb_;
+
+ typedef std::list<net::CompletionCallback> CallbackList;
+ CallbackList to_run_when_initialized_;
+
+ // Set to true when the app is on the background. When the app is in the
+ // background we can write the index much more frequently, to insure fresh
+ // index on next startup.
+ bool app_on_background_;
+
+ // The time in milliseconds for the index to be idle before it gets flushed to
+ // the disk. When the app is on foreground the delay is different from the
+ // background state.
+ int foreground_flush_delay_;
+ int background_flush_delay_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_H_
diff --git a/chromium/net/disk_cache/simple/simple_index_file.cc b/chromium/net/disk_cache/simple/simple_index_file.cc
new file mode 100644
index 00000000000..7bcea7cdfa8
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_index_file.cc
@@ -0,0 +1,423 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_index_file.h"
+
+#include <vector>
+
+#include "base/file_util.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/hash.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/pickle.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_index.h"
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "third_party/zlib/zlib.h"
+
+
+namespace {
+
+const uint64 kMaxEntiresInIndex = 100000000;
+
+uint32 CalculatePickleCRC(const Pickle& pickle) {
+ return crc32(crc32(0, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(pickle.payload()),
+ pickle.payload_size());
+}
+
+void DoomEntrySetReply(const net::CompletionCallback& reply_callback,
+ int result) {
+ reply_callback.Run(result);
+}
+
+void WriteToDiskInternal(const base::FilePath& index_filename,
+ const base::FilePath& temp_index_filename,
+ scoped_ptr<Pickle> pickle,
+ const base::TimeTicks& start_time,
+ bool app_on_background) {
+ int bytes_written = file_util::WriteFile(
+ temp_index_filename,
+ reinterpret_cast<const char*>(pickle->data()),
+ pickle->size());
+ DCHECK_EQ(bytes_written, implicit_cast<int>(pickle->size()));
+ if (bytes_written != static_cast<int>(pickle->size())) {
+ // TODO(felipeg): Add better error handling.
+ LOG(ERROR) << "Could not write Simple Cache index to temporary file: "
+ << temp_index_filename.value();
+ base::DeleteFile(temp_index_filename, /* recursive = */ false);
+ } else {
+ // Swap temp and index_file.
+ bool result = base::ReplaceFile(temp_index_filename, index_filename, NULL);
+ DCHECK(result);
+ }
+ if (app_on_background) {
+ UMA_HISTOGRAM_TIMES("SimpleCache.IndexWriteToDiskTime.Background",
+ (base::TimeTicks::Now() - start_time));
+ } else {
+ UMA_HISTOGRAM_TIMES("SimpleCache.IndexWriteToDiskTime.Foreground",
+ (base::TimeTicks::Now() - start_time));
+ }
+}
+
+} // namespace
+
+namespace disk_cache {
+
+SimpleIndexLoadResult::SimpleIndexLoadResult() : did_load(false),
+ flush_required(false) {
+}
+
+SimpleIndexLoadResult::~SimpleIndexLoadResult() {
+}
+
+void SimpleIndexLoadResult::Reset() {
+ did_load = false;
+ flush_required = false;
+ entries.clear();
+}
+
+// static
+const char SimpleIndexFile::kIndexFileName[] = "the-real-index";
+// static
+const char SimpleIndexFile::kTempIndexFileName[] = "temp-index";
+
+SimpleIndexFile::IndexMetadata::IndexMetadata() :
+ magic_number_(kSimpleIndexMagicNumber),
+ version_(kSimpleVersion),
+ number_of_entries_(0),
+ cache_size_(0) {}
+
+SimpleIndexFile::IndexMetadata::IndexMetadata(
+ uint64 number_of_entries, uint64 cache_size) :
+ magic_number_(kSimpleIndexMagicNumber),
+ version_(kSimpleVersion),
+ number_of_entries_(number_of_entries),
+ cache_size_(cache_size) {}
+
+void SimpleIndexFile::IndexMetadata::Serialize(Pickle* pickle) const {
+ DCHECK(pickle);
+ pickle->WriteUInt64(magic_number_);
+ pickle->WriteUInt32(version_);
+ pickle->WriteUInt64(number_of_entries_);
+ pickle->WriteUInt64(cache_size_);
+}
+
+bool SimpleIndexFile::IndexMetadata::Deserialize(PickleIterator* it) {
+ DCHECK(it);
+ return it->ReadUInt64(&magic_number_) &&
+ it->ReadUInt32(&version_) &&
+ it->ReadUInt64(&number_of_entries_)&&
+ it->ReadUInt64(&cache_size_);
+}
+
+bool SimpleIndexFile::IndexMetadata::CheckIndexMetadata() {
+ return number_of_entries_ <= kMaxEntiresInIndex &&
+ magic_number_ == disk_cache::kSimpleIndexMagicNumber &&
+ version_ == disk_cache::kSimpleVersion;
+}
+
+SimpleIndexFile::SimpleIndexFile(
+ base::SingleThreadTaskRunner* cache_thread,
+ base::TaskRunner* worker_pool,
+ const base::FilePath& cache_directory)
+ : cache_thread_(cache_thread),
+ worker_pool_(worker_pool),
+ cache_directory_(cache_directory),
+ index_file_(cache_directory_.AppendASCII(kIndexFileName)),
+ temp_index_file_(cache_directory_.AppendASCII(kTempIndexFileName)) {
+}
+
+SimpleIndexFile::~SimpleIndexFile() {}
+
+void SimpleIndexFile::LoadIndexEntries(base::Time cache_last_modified,
+ const base::Closure& callback,
+ SimpleIndexLoadResult* out_result) {
+ base::Closure task = base::Bind(&SimpleIndexFile::SyncLoadIndexEntries,
+ cache_last_modified, cache_directory_,
+ index_file_, out_result);
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, callback);
+}
+
+void SimpleIndexFile::WriteToDisk(const SimpleIndex::EntrySet& entry_set,
+ uint64 cache_size,
+ const base::TimeTicks& start,
+ bool app_on_background) {
+ IndexMetadata index_metadata(entry_set.size(), cache_size);
+ scoped_ptr<Pickle> pickle = Serialize(index_metadata, entry_set);
+ cache_thread_->PostTask(FROM_HERE, base::Bind(
+ &WriteToDiskInternal,
+ index_file_,
+ temp_index_file_,
+ base::Passed(&pickle),
+ base::TimeTicks::Now(),
+ app_on_background));
+}
+
+void SimpleIndexFile::DoomEntrySet(
+ scoped_ptr<std::vector<uint64> > entry_hashes,
+ const net::CompletionCallback& reply_callback) {
+ PostTaskAndReplyWithResult(
+ worker_pool_,
+ FROM_HERE,
+ base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
+ base::Passed(entry_hashes.Pass()), cache_directory_),
+ base::Bind(&DoomEntrySetReply, reply_callback));
+}
+
+// static
+void SimpleIndexFile::SyncLoadIndexEntries(
+ base::Time cache_last_modified,
+ const base::FilePath& cache_directory,
+ const base::FilePath& index_file_path,
+ SimpleIndexLoadResult* out_result) {
+ // TODO(felipeg): probably could load a stale index and use it for something.
+ const SimpleIndex::EntrySet& entries = out_result->entries;
+
+ const bool index_file_exists = base::PathExists(index_file_path);
+
+ // Used in histograms. Please only add new values at the end.
+ enum {
+ INDEX_STATE_CORRUPT = 0,
+ INDEX_STATE_STALE = 1,
+ INDEX_STATE_FRESH = 2,
+ INDEX_STATE_FRESH_CONCURRENT_UPDATES = 3,
+ INDEX_STATE_MAX = 4,
+ } index_file_state;
+
+ // Only load if the index is not stale.
+ if (IsIndexFileStale(cache_last_modified, index_file_path)) {
+ index_file_state = INDEX_STATE_STALE;
+ } else {
+ index_file_state = INDEX_STATE_FRESH;
+ base::Time latest_dir_mtime;
+ if (simple_util::GetMTime(cache_directory, &latest_dir_mtime) &&
+ IsIndexFileStale(latest_dir_mtime, index_file_path)) {
+ // A file operation has updated the directory since we last looked at it
+ // during backend initialization.
+ index_file_state = INDEX_STATE_FRESH_CONCURRENT_UPDATES;
+ }
+
+ const base::TimeTicks start = base::TimeTicks::Now();
+ SyncLoadFromDisk(index_file_path, out_result);
+ UMA_HISTOGRAM_TIMES("SimpleCache.IndexLoadTime",
+ base::TimeTicks::Now() - start);
+ UMA_HISTOGRAM_COUNTS("SimpleCache.IndexEntriesLoaded",
+ out_result->did_load ? entries.size() : 0);
+ if (!out_result->did_load)
+ index_file_state = INDEX_STATE_CORRUPT;
+ }
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.IndexFileStateOnLoad",
+ index_file_state,
+ INDEX_STATE_MAX);
+
+ if (!out_result->did_load) {
+ const base::TimeTicks start = base::TimeTicks::Now();
+ SyncRestoreFromDisk(cache_directory, index_file_path, out_result);
+ UMA_HISTOGRAM_MEDIUM_TIMES("SimpleCache.IndexRestoreTime",
+ base::TimeTicks::Now() - start);
+ UMA_HISTOGRAM_COUNTS("SimpleCache.IndexEntriesRestored",
+ entries.size());
+ }
+
+ // Used in histograms. Please only add new values at the end.
+ enum {
+ INITIALIZE_METHOD_RECOVERED = 0,
+ INITIALIZE_METHOD_LOADED = 1,
+ INITIALIZE_METHOD_NEWCACHE = 2,
+ INITIALIZE_METHOD_MAX = 3,
+ };
+ int initialize_method;
+ if (index_file_exists) {
+ if (out_result->flush_required)
+ initialize_method = INITIALIZE_METHOD_RECOVERED;
+ else
+ initialize_method = INITIALIZE_METHOD_LOADED;
+ } else {
+ UMA_HISTOGRAM_COUNTS("SimpleCache.IndexCreatedEntryCount",
+ entries.size());
+ initialize_method = INITIALIZE_METHOD_NEWCACHE;
+ }
+
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.IndexInitializeMethod",
+ initialize_method, INITIALIZE_METHOD_MAX);
+}
+
+// static
+void SimpleIndexFile::SyncLoadFromDisk(const base::FilePath& index_filename,
+ SimpleIndexLoadResult* out_result) {
+ out_result->Reset();
+
+ base::MemoryMappedFile index_file_map;
+ if (!index_file_map.Initialize(index_filename)) {
+ LOG(WARNING) << "Could not map Simple Index file.";
+ base::DeleteFile(index_filename, false);
+ return;
+ }
+
+ SimpleIndexFile::Deserialize(
+ reinterpret_cast<const char*>(index_file_map.data()),
+ index_file_map.length(), out_result);
+
+ if (!out_result->did_load)
+ base::DeleteFile(index_filename, false);
+}
+
+// static
+scoped_ptr<Pickle> SimpleIndexFile::Serialize(
+ const SimpleIndexFile::IndexMetadata& index_metadata,
+ const SimpleIndex::EntrySet& entries) {
+ scoped_ptr<Pickle> pickle(new Pickle(sizeof(SimpleIndexFile::PickleHeader)));
+
+ index_metadata.Serialize(pickle.get());
+ for (SimpleIndex::EntrySet::const_iterator it = entries.begin();
+ it != entries.end(); ++it) {
+ pickle->WriteUInt64(it->first);
+ it->second.Serialize(pickle.get());
+ }
+ SimpleIndexFile::PickleHeader* header_p =
+ pickle->headerT<SimpleIndexFile::PickleHeader>();
+ header_p->crc = CalculatePickleCRC(*pickle);
+ return pickle.Pass();
+}
+
+// static
+void SimpleIndexFile::Deserialize(const char* data, int data_len,
+ SimpleIndexLoadResult* out_result) {
+ DCHECK(data);
+
+ out_result->Reset();
+ SimpleIndex::EntrySet* entries = &out_result->entries;
+
+ Pickle pickle(data, data_len);
+ if (!pickle.data()) {
+ LOG(WARNING) << "Corrupt Simple Index File.";
+ return;
+ }
+
+ PickleIterator pickle_it(pickle);
+
+ SimpleIndexFile::PickleHeader* header_p =
+ pickle.headerT<SimpleIndexFile::PickleHeader>();
+ const uint32 crc_read = header_p->crc;
+ const uint32 crc_calculated = CalculatePickleCRC(pickle);
+
+ if (crc_read != crc_calculated) {
+ LOG(WARNING) << "Invalid CRC in Simple Index file.";
+ return;
+ }
+
+ SimpleIndexFile::IndexMetadata index_metadata;
+ if (!index_metadata.Deserialize(&pickle_it)) {
+ LOG(ERROR) << "Invalid index_metadata on Simple Cache Index.";
+ return;
+ }
+
+ if (!index_metadata.CheckIndexMetadata()) {
+ LOG(ERROR) << "Invalid index_metadata on Simple Cache Index.";
+ return;
+ }
+
+#if !defined(OS_WIN)
+ // TODO(gavinp): Consider using std::unordered_map.
+ entries->resize(index_metadata.GetNumberOfEntries() + kExtraSizeForMerge);
+#endif
+ while (entries->size() < index_metadata.GetNumberOfEntries()) {
+ uint64 hash_key;
+ EntryMetadata entry_metadata;
+ if (!pickle_it.ReadUInt64(&hash_key) ||
+ !entry_metadata.Deserialize(&pickle_it)) {
+ LOG(WARNING) << "Invalid EntryMetadata in Simple Index file.";
+ entries->clear();
+ return;
+ }
+ SimpleIndex::InsertInEntrySet(hash_key, entry_metadata, entries);
+ }
+
+ out_result->did_load = true;
+}
+
+// static
+void SimpleIndexFile::SyncRestoreFromDisk(
+ const base::FilePath& cache_directory,
+ const base::FilePath& index_file_path,
+ SimpleIndexLoadResult* out_result) {
+ LOG(INFO) << "Simple Cache Index is being restored from disk.";
+
+ base::DeleteFile(index_file_path, /* recursive = */ false);
+ out_result->Reset();
+ SimpleIndex::EntrySet* entries = &out_result->entries;
+
+ // TODO(felipeg,gavinp): Fix this once we have a one-file per entry format.
+ COMPILE_ASSERT(kSimpleEntryFileCount == 3,
+ file_pattern_must_match_file_count);
+
+ const int kFileSuffixLength = sizeof("_0") - 1;
+ const base::FilePath::StringType file_pattern = FILE_PATH_LITERAL("*_[0-2]");
+ base::FileEnumerator enumerator(cache_directory,
+ false /* recursive */,
+ base::FileEnumerator::FILES,
+ file_pattern);
+ for (base::FilePath file_path = enumerator.Next(); !file_path.empty();
+ file_path = enumerator.Next()) {
+ const base::FilePath::StringType base_name = file_path.BaseName().value();
+ // Converting to std::string is OK since we never use UTF8 wide chars in our
+ // file names.
+ const std::string hash_key_string(base_name.begin(),
+ base_name.end() - kFileSuffixLength);
+ uint64 hash_key = 0;
+ if (!simple_util::GetEntryHashKeyFromHexString(
+ hash_key_string, &hash_key)) {
+ LOG(WARNING) << "Invalid Entry Hash Key filename while restoring "
+ << "Simple Index from disk: " << base_name;
+ // TODO(felipeg): Should we delete the invalid file here ?
+ continue;
+ }
+
+ base::FileEnumerator::FileInfo info = enumerator.GetInfo();
+ base::Time last_used_time;
+#if defined(OS_POSIX)
+ // For POSIX systems, a last access time is available. However, it's not
+ // guaranteed to be more accurate than mtime. It is no worse though.
+ last_used_time = base::Time::FromTimeT(info.stat().st_atime);
+#endif
+ if (last_used_time.is_null())
+ last_used_time = info.GetLastModifiedTime();
+
+ int64 file_size = info.GetSize();
+ SimpleIndex::EntrySet::iterator it = entries->find(hash_key);
+ if (it == entries->end()) {
+ SimpleIndex::InsertInEntrySet(
+ hash_key,
+ EntryMetadata(last_used_time, file_size),
+ entries);
+ } else {
+ // Summing up the total size of the entry through all the *_[0-2] files
+ it->second.SetEntrySize(it->second.GetEntrySize() + file_size);
+ }
+ }
+
+ out_result->did_load = true;
+
+ // When we restore from disk we write the merged index file to disk right
+ // away, this might save us from having to restore again next time.
+ out_result->flush_required = true;
+}
+
+// static
+bool SimpleIndexFile::IsIndexFileStale(base::Time cache_last_modified,
+ const base::FilePath& index_file_path) {
+ base::Time index_mtime;
+ if (!simple_util::GetMTime(index_file_path, &index_mtime))
+ return true;
+ return index_mtime < cache_last_modified;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_index_file.h b/chromium/net/disk_cache/simple/simple_index_file.h
new file mode 100644
index 00000000000..b536df9a1e7
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_index_file.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_FILE_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_FILE_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/pickle.h"
+#include "base/port.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/simple/simple_index.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+class TaskRunner;
+}
+
+namespace disk_cache {
+
+const uint64 kSimpleIndexMagicNumber = GG_UINT64_C(0x656e74657220796f);
+
+struct NET_EXPORT_PRIVATE SimpleIndexLoadResult {
+ SimpleIndexLoadResult();
+ ~SimpleIndexLoadResult();
+ void Reset();
+
+ bool did_load;
+ SimpleIndex::EntrySet entries;
+ bool flush_required;
+};
+
+// Simple Index File format is a pickle serialized data of IndexMetadata and
+// EntryMetadata objects. The file format is as follows: one instance of
+// serialized |IndexMetadata| followed serialized |EntryMetadata| entries
+// repeated |number_of_entries| amount of times. To know more about the format,
+// see SimpleIndexFile::Serialize() and SeeSimpleIndexFile::LoadFromDisk()
+// methods.
+//
+// The non-static methods must run on the IO thread. All the real
+// work is done in the static methods, which are run on the cache thread
+// or in worker threads. Synchronization between methods is the
+// responsibility of the caller.
+class NET_EXPORT_PRIVATE SimpleIndexFile {
+ public:
+ class NET_EXPORT_PRIVATE IndexMetadata {
+ public:
+ IndexMetadata();
+ IndexMetadata(uint64 number_of_entries, uint64 cache_size);
+
+ void Serialize(Pickle* pickle) const;
+ bool Deserialize(PickleIterator* it);
+
+ bool CheckIndexMetadata();
+
+ uint64 GetNumberOfEntries() { return number_of_entries_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(IndexMetadataTest, Basics);
+ FRIEND_TEST_ALL_PREFIXES(IndexMetadataTest, Serialize);
+
+ uint64 magic_number_;
+ uint32 version_;
+ uint64 number_of_entries_;
+ uint64 cache_size_; // Total cache storage size in bytes.
+ };
+
+ SimpleIndexFile(base::SingleThreadTaskRunner* cache_thread,
+ base::TaskRunner* worker_pool,
+ const base::FilePath& cache_directory);
+ virtual ~SimpleIndexFile();
+
+ // Get index entries based on current disk context.
+ virtual void LoadIndexEntries(base::Time cache_last_modified,
+ const base::Closure& callback,
+ SimpleIndexLoadResult* out_result);
+
+ // Write the specified set of entries to disk.
+ virtual void WriteToDisk(const SimpleIndex::EntrySet& entry_set,
+ uint64 cache_size,
+ const base::TimeTicks& start,
+ bool app_on_background);
+
+ // Doom the entries specified in |entry_hashes|, calling |reply_callback|
+ // with the result on the current thread when done.
+ virtual void DoomEntrySet(scoped_ptr<std::vector<uint64> > entry_hashes,
+ const base::Callback<void(int)>& reply_callback);
+
+ private:
+ friend class WrappedSimpleIndexFile;
+
+ // When loading the entries from disk, add this many extra hash buckets to
+ // prevent reallocation on the IO thread when merging in new live entries.
+ static const int kExtraSizeForMerge = 512;
+
+ // Synchronous (IO performing) implementation of LoadIndexEntries.
+ static void SyncLoadIndexEntries(base::Time cache_last_modified,
+ const base::FilePath& cache_directory,
+ const base::FilePath& index_file_path,
+ SimpleIndexLoadResult* out_result);
+
+ // Load the index file from disk returning an EntrySet. Upon failure, returns
+ // NULL.
+ static void SyncLoadFromDisk(const base::FilePath& index_filename,
+ SimpleIndexLoadResult* out_result);
+
+ // Returns a scoped_ptr for a newly allocated Pickle containing the serialized
+ // data to be written to a file.
+ static scoped_ptr<Pickle> Serialize(
+ const SimpleIndexFile::IndexMetadata& index_metadata,
+ const SimpleIndex::EntrySet& entries);
+
+ // Given the contents of an index file |data| of length |data_len|, returns
+ // the corresponding EntrySet. Returns NULL on error.
+ static void Deserialize(const char* data, int data_len,
+ SimpleIndexLoadResult* out_result);
+
+ // Scan the index directory for entries, returning an EntrySet of all entries
+ // found.
+ static void SyncRestoreFromDisk(const base::FilePath& cache_directory,
+ const base::FilePath& index_file_path,
+ SimpleIndexLoadResult* out_result);
+
+ // Determines if an index file is stale relative to the time of last
+ // modification of the cache directory.
+ static bool IsIndexFileStale(base::Time cache_last_modified,
+ const base::FilePath& index_file_path);
+
+ struct PickleHeader : public Pickle::Header {
+ uint32 crc;
+ };
+
+ const scoped_refptr<base::SingleThreadTaskRunner> cache_thread_;
+ const scoped_refptr<base::TaskRunner> worker_pool_;
+ const base::FilePath cache_directory_;
+ const base::FilePath index_file_;
+ const base::FilePath temp_index_file_;
+
+ static const char kIndexFileName[];
+ static const char kTempIndexFileName[];
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleIndexFile);
+};
+
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_FILE_H_
diff --git a/chromium/net/disk_cache/simple/simple_index_file_unittest.cc b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
new file mode 100644
index 00000000000..bf7ee83c30e
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_index_file_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/hash.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/pickle.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+#include "net/disk_cache/simple/simple_index.h"
+#include "net/disk_cache/simple/simple_index_file.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+using disk_cache::SimpleIndexFile;
+using disk_cache::SimpleIndex;
+
+namespace disk_cache {
+
+TEST(IndexMetadataTest, Basics) {
+ SimpleIndexFile::IndexMetadata index_metadata;
+
+ EXPECT_EQ(disk_cache::kSimpleIndexMagicNumber, index_metadata.magic_number_);
+ EXPECT_EQ(disk_cache::kSimpleVersion, index_metadata.version_);
+ EXPECT_EQ(0U, index_metadata.GetNumberOfEntries());
+ EXPECT_EQ(0U, index_metadata.cache_size_);
+
+ EXPECT_TRUE(index_metadata.CheckIndexMetadata());
+}
+
+TEST(IndexMetadataTest, Serialize) {
+ SimpleIndexFile::IndexMetadata index_metadata(123, 456);
+ Pickle pickle;
+ index_metadata.Serialize(&pickle);
+ PickleIterator it(pickle);
+ SimpleIndexFile::IndexMetadata new_index_metadata;
+ new_index_metadata.Deserialize(&it);
+
+ EXPECT_EQ(new_index_metadata.magic_number_, index_metadata.magic_number_);
+ EXPECT_EQ(new_index_metadata.version_, index_metadata.version_);
+ EXPECT_EQ(new_index_metadata.GetNumberOfEntries(),
+ index_metadata.GetNumberOfEntries());
+ EXPECT_EQ(new_index_metadata.cache_size_, index_metadata.cache_size_);
+
+ EXPECT_TRUE(new_index_metadata.CheckIndexMetadata());
+}
+
+// This friend derived class is able to reexport its ancestors private methods
+// as public, for use in tests.
+class WrappedSimpleIndexFile : public SimpleIndexFile {
+ public:
+ using SimpleIndexFile::Deserialize;
+ using SimpleIndexFile::IsIndexFileStale;
+ using SimpleIndexFile::Serialize;
+
+ explicit WrappedSimpleIndexFile(const base::FilePath& index_file_directory)
+ : SimpleIndexFile(base::MessageLoopProxy::current().get(),
+ base::MessageLoopProxy::current().get(),
+ index_file_directory) {}
+ virtual ~WrappedSimpleIndexFile() {
+ }
+
+ const base::FilePath& GetIndexFilePath() const {
+ return index_file_;
+ }
+};
+
+class SimpleIndexFileTest : public testing::Test {
+ public:
+ bool CompareTwoEntryMetadata(const EntryMetadata& a, const EntryMetadata& b) {
+ return a.last_used_time_ == b.last_used_time_ &&
+ a.entry_size_ == b.entry_size_;
+ }
+
+ protected:
+ SimpleIndexFileTest() : callback_called_(false) {}
+
+ base::Closure GetCallback() {
+ return base::Bind(&SimpleIndexFileTest::LoadIndexEntriesCallback,
+ base::Unretained(this));
+ }
+
+ bool callback_called() { return callback_called_; }
+
+ private:
+ void LoadIndexEntriesCallback() {
+ EXPECT_FALSE(callback_called_);
+ callback_called_ = true;
+ }
+
+ bool callback_called_;
+};
+
+TEST_F(SimpleIndexFileTest, Serialize) {
+ SimpleIndex::EntrySet entries;
+ static const uint64 kHashes[] = { 11, 22, 33 };
+ static const size_t kNumHashes = arraysize(kHashes);
+ EntryMetadata metadata_entries[kNumHashes];
+
+ SimpleIndexFile::IndexMetadata index_metadata(static_cast<uint64>(kNumHashes),
+ 456);
+ for (size_t i = 0; i < kNumHashes; ++i) {
+ uint64 hash = kHashes[i];
+ metadata_entries[i] =
+ EntryMetadata(Time::FromInternalValue(hash), hash);
+ SimpleIndex::InsertInEntrySet(hash, metadata_entries[i], &entries);
+ }
+
+ scoped_ptr<Pickle> pickle = WrappedSimpleIndexFile::Serialize(
+ index_metadata, entries);
+ EXPECT_TRUE(pickle.get() != NULL);
+
+ SimpleIndexLoadResult deserialize_result;
+ WrappedSimpleIndexFile::Deserialize(static_cast<const char*>(pickle->data()),
+ pickle->size(),
+ &deserialize_result);
+ EXPECT_TRUE(deserialize_result.did_load);
+ const SimpleIndex::EntrySet& new_entries = deserialize_result.entries;
+ EXPECT_EQ(entries.size(), new_entries.size());
+
+ for (size_t i = 0; i < kNumHashes; ++i) {
+ SimpleIndex::EntrySet::const_iterator it = new_entries.find(kHashes[i]);
+ EXPECT_TRUE(new_entries.end() != it);
+ EXPECT_TRUE(CompareTwoEntryMetadata(it->second, metadata_entries[i]));
+ }
+}
+
+TEST_F(SimpleIndexFileTest, IsIndexFileStale) {
+ base::ScopedTempDir cache_dir;
+ ASSERT_TRUE(cache_dir.CreateUniqueTempDir());
+ base::Time cache_mtime;
+ const base::FilePath cache_path = cache_dir.path();
+
+ ASSERT_TRUE(simple_util::GetMTime(cache_path, &cache_mtime));
+ WrappedSimpleIndexFile simple_index_file(cache_path);
+ const base::FilePath& index_path = simple_index_file.GetIndexFilePath();
+ EXPECT_TRUE(WrappedSimpleIndexFile::IsIndexFileStale(cache_mtime,
+ index_path));
+ const std::string kDummyData = "nothing to be seen here";
+ EXPECT_EQ(static_cast<int>(kDummyData.size()),
+ file_util::WriteFile(index_path,
+ kDummyData.data(),
+ kDummyData.size()));
+ ASSERT_TRUE(simple_util::GetMTime(cache_path, &cache_mtime));
+ EXPECT_FALSE(WrappedSimpleIndexFile::IsIndexFileStale(cache_mtime,
+ index_path));
+
+ const base::Time past_time = base::Time::Now() -
+ base::TimeDelta::FromSeconds(10);
+ EXPECT_TRUE(file_util::TouchFile(index_path, past_time, past_time));
+ EXPECT_TRUE(file_util::TouchFile(cache_path, past_time, past_time));
+ ASSERT_TRUE(simple_util::GetMTime(cache_path, &cache_mtime));
+ EXPECT_FALSE(WrappedSimpleIndexFile::IsIndexFileStale(cache_mtime,
+ index_path));
+ const base::Time even_older =
+ past_time - base::TimeDelta::FromSeconds(10);
+ EXPECT_TRUE(file_util::TouchFile(index_path, even_older, even_older));
+ EXPECT_TRUE(WrappedSimpleIndexFile::IsIndexFileStale(cache_mtime,
+ index_path));
+
+}
+
+TEST_F(SimpleIndexFileTest, WriteThenLoadIndex) {
+ base::ScopedTempDir cache_dir;
+ ASSERT_TRUE(cache_dir.CreateUniqueTempDir());
+
+ SimpleIndex::EntrySet entries;
+ static const uint64 kHashes[] = { 11, 22, 33 };
+ static const size_t kNumHashes = arraysize(kHashes);
+ EntryMetadata metadata_entries[kNumHashes];
+ for (size_t i = 0; i < kNumHashes; ++i) {
+ uint64 hash = kHashes[i];
+ metadata_entries[i] =
+ EntryMetadata(Time::FromInternalValue(hash), hash);
+ SimpleIndex::InsertInEntrySet(hash, metadata_entries[i], &entries);
+ }
+
+ const uint64 kCacheSize = 456U;
+ {
+ WrappedSimpleIndexFile simple_index_file(cache_dir.path());
+ simple_index_file.WriteToDisk(entries, kCacheSize,
+ base::TimeTicks(), false);
+ base::RunLoop().RunUntilIdle();
+ EXPECT_TRUE(base::PathExists(simple_index_file.GetIndexFilePath()));
+ }
+
+ WrappedSimpleIndexFile simple_index_file(cache_dir.path());
+ base::Time fake_cache_mtime;
+ ASSERT_TRUE(simple_util::GetMTime(simple_index_file.GetIndexFilePath(),
+ &fake_cache_mtime));
+ SimpleIndexLoadResult load_index_result;
+ simple_index_file.LoadIndexEntries(fake_cache_mtime,
+ GetCallback(),
+ &load_index_result);
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(base::PathExists(simple_index_file.GetIndexFilePath()));
+ ASSERT_TRUE(callback_called());
+ EXPECT_TRUE(load_index_result.did_load);
+ EXPECT_FALSE(load_index_result.flush_required);
+
+ EXPECT_EQ(kNumHashes, load_index_result.entries.size());
+ for (size_t i = 0; i < kNumHashes; ++i)
+ EXPECT_EQ(1U, load_index_result.entries.count(kHashes[i]));
+}
+
+TEST_F(SimpleIndexFileTest, LoadCorruptIndex) {
+ base::ScopedTempDir cache_dir;
+ ASSERT_TRUE(cache_dir.CreateUniqueTempDir());
+
+ WrappedSimpleIndexFile simple_index_file(cache_dir.path());
+ const base::FilePath& index_path = simple_index_file.GetIndexFilePath();
+ const std::string kDummyData = "nothing to be seen here";
+ EXPECT_EQ(static_cast<int>(kDummyData.size()),
+ file_util::WriteFile(index_path,
+ kDummyData.data(),
+ kDummyData.size()));
+ base::Time fake_cache_mtime;
+ ASSERT_TRUE(simple_util::GetMTime(simple_index_file.GetIndexFilePath(),
+ &fake_cache_mtime));
+ EXPECT_FALSE(WrappedSimpleIndexFile::IsIndexFileStale(fake_cache_mtime,
+ index_path));
+
+ SimpleIndexLoadResult load_index_result;
+ simple_index_file.LoadIndexEntries(fake_cache_mtime,
+ GetCallback(),
+ &load_index_result);
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_FALSE(base::PathExists(index_path));
+ ASSERT_TRUE(callback_called());
+ EXPECT_TRUE(load_index_result.did_load);
+ EXPECT_TRUE(load_index_result.flush_required);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_index_unittest.cc b/chromium/net/disk_cache/simple/simple_index_unittest.cc
new file mode 100644
index 00000000000..0c845b21318
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_index_unittest.cc
@@ -0,0 +1,581 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/hash.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/pickle.h"
+#include "base/sha1.h"
+#include "base/strings/stringprintf.h"
+#include "base/task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "net/disk_cache/simple/simple_index.h"
+#include "net/disk_cache/simple/simple_index_file.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int64 kTestLastUsedTimeInternal = 12345;
+const base::Time kTestLastUsedTime =
+ base::Time::FromInternalValue(kTestLastUsedTimeInternal);
+const uint64 kTestEntrySize = 789;
+const uint64 kKey1Hash = disk_cache::simple_util::GetEntryHashKey("key1");
+const uint64 kKey2Hash = disk_cache::simple_util::GetEntryHashKey("key2");
+const uint64 kKey3Hash = disk_cache::simple_util::GetEntryHashKey("key3");
+
+} // namespace
+
+namespace disk_cache {
+
+class EntryMetadataTest : public testing::Test {
+ public:
+ EntryMetadata NewEntryMetadataWithValues() {
+ return EntryMetadata(kTestLastUsedTime, kTestEntrySize);
+ }
+
+ void CheckEntryMetadataValues(const EntryMetadata& entry_metadata) {
+ EXPECT_EQ(kTestLastUsedTime, entry_metadata.GetLastUsedTime());
+ EXPECT_EQ(kTestEntrySize, entry_metadata.GetEntrySize());
+ }
+};
+
+class MockSimpleIndexFile : public SimpleIndexFile,
+ public base::SupportsWeakPtr<MockSimpleIndexFile> {
+ public:
+ MockSimpleIndexFile()
+ : SimpleIndexFile(NULL, NULL, base::FilePath()),
+ load_result_(NULL),
+ load_index_entries_calls_(0),
+ doom_entry_set_calls_(0),
+ disk_writes_(0) {}
+
+ virtual void LoadIndexEntries(
+ base::Time cache_last_modified,
+ const base::Closure& callback,
+ SimpleIndexLoadResult* out_load_result) OVERRIDE {
+ load_callback_ = callback;
+ load_result_ = out_load_result;
+ ++load_index_entries_calls_;
+ }
+
+ virtual void WriteToDisk(const SimpleIndex::EntrySet& entry_set,
+ uint64 cache_size,
+ const base::TimeTicks& start,
+ bool app_on_background) OVERRIDE {
+ disk_writes_++;
+ disk_write_entry_set_ = entry_set;
+ }
+
+ virtual void DoomEntrySet(
+ scoped_ptr<std::vector<uint64> > entry_hashes,
+ const base::Callback<void(int)>& reply_callback) OVERRIDE {
+ last_doom_entry_hashes_ = *entry_hashes.get();
+ last_doom_reply_callback_ = reply_callback;
+ ++doom_entry_set_calls_;
+ }
+
+ void GetAndResetDiskWriteEntrySet(SimpleIndex::EntrySet* entry_set) {
+ entry_set->swap(disk_write_entry_set_);
+ }
+
+ const base::Closure& load_callback() const { return load_callback_; }
+ SimpleIndexLoadResult* load_result() const { return load_result_; }
+ int load_index_entries_calls() const { return load_index_entries_calls_; }
+ int disk_writes() const { return disk_writes_; }
+ const std::vector<uint64>& last_doom_entry_hashes() const {
+ return last_doom_entry_hashes_;
+ }
+ int doom_entry_set_calls() const { return doom_entry_set_calls_; }
+
+ private:
+ base::Closure load_callback_;
+ SimpleIndexLoadResult* load_result_;
+ int load_index_entries_calls_;
+ std::vector<uint64> last_doom_entry_hashes_;
+ int doom_entry_set_calls_;
+ base::Callback<void(int)> last_doom_reply_callback_;
+ int disk_writes_;
+ SimpleIndex::EntrySet disk_write_entry_set_;
+};
+
+class SimpleIndexTest : public testing::Test {
+ public:
+ virtual void SetUp() OVERRIDE {
+ scoped_ptr<MockSimpleIndexFile> index_file(new MockSimpleIndexFile());
+ index_file_ = index_file->AsWeakPtr();
+ index_.reset(new SimpleIndex(NULL, base::FilePath(),
+ index_file.PassAs<SimpleIndexFile>()));
+
+ index_->Initialize(base::Time());
+ }
+
+ void WaitForTimeChange() {
+ base::Time now(base::Time::Now());
+
+ do {
+ base::PlatformThread::YieldCurrentThread();
+ } while (now == base::Time::Now());
+ }
+
+ // Redirect to allow single "friend" declaration in base class.
+ bool GetEntryForTesting(const std::string& key, EntryMetadata* metadata) {
+ const uint64 hash_key = simple_util::GetEntryHashKey(key);
+ SimpleIndex::EntrySet::iterator it = index_->entries_set_.find(hash_key);
+ if (index_->entries_set_.end() == it)
+ return false;
+ *metadata = it->second;
+ return true;
+ }
+
+ void InsertIntoIndexFileReturn(const std::string& key,
+ base::Time last_used_time,
+ uint64 entry_size) {
+ uint64 hash_key(simple_util::GetEntryHashKey(key));
+ index_file_->load_result()->entries.insert(std::make_pair(
+ hash_key, EntryMetadata(last_used_time, entry_size)));
+ }
+
+ void ReturnIndexFile() {
+ index_file_->load_result()->did_load = true;
+ index_file_->load_callback().Run();
+ }
+
+ // Non-const for timer manipulation.
+ SimpleIndex* index() { return index_.get(); }
+ const MockSimpleIndexFile* index_file() const { return index_file_.get(); }
+
+ protected:
+ scoped_ptr<SimpleIndex> index_;
+ base::WeakPtr<MockSimpleIndexFile> index_file_;
+};
+
+TEST_F(EntryMetadataTest, Basics) {
+ EntryMetadata entry_metadata;
+ EXPECT_EQ(base::Time::FromInternalValue(0), entry_metadata.GetLastUsedTime());
+ EXPECT_EQ(size_t(0), entry_metadata.GetEntrySize());
+
+ entry_metadata = NewEntryMetadataWithValues();
+ CheckEntryMetadataValues(entry_metadata);
+
+ const base::Time new_time = base::Time::FromInternalValue(5);
+ entry_metadata.SetLastUsedTime(new_time);
+ EXPECT_EQ(new_time, entry_metadata.GetLastUsedTime());
+}
+
+TEST_F(EntryMetadataTest, Serialize) {
+ EntryMetadata entry_metadata = NewEntryMetadataWithValues();
+
+ Pickle pickle;
+ entry_metadata.Serialize(&pickle);
+
+ PickleIterator it(pickle);
+ EntryMetadata new_entry_metadata;
+ new_entry_metadata.Deserialize(&it);
+ CheckEntryMetadataValues(new_entry_metadata);
+}
+
+TEST_F(SimpleIndexTest, IndexSizeCorrectOnMerge) {
+ typedef disk_cache::SimpleIndex::EntrySet EntrySet;
+ index()->SetMaxSize(100);
+ index()->Insert("two");
+ index()->UpdateEntrySize("two", 2);
+ index()->Insert("five");
+ index()->UpdateEntrySize("five", 5);
+ index()->Insert("seven");
+ index()->UpdateEntrySize("seven", 7);
+ EXPECT_EQ(14U, index()->cache_size_);
+ {
+ scoped_ptr<SimpleIndexLoadResult> result(new SimpleIndexLoadResult());
+ result->did_load = true;
+ index()->MergeInitializingSet(result.Pass());
+ }
+ EXPECT_EQ(14U, index()->cache_size_);
+ {
+ scoped_ptr<SimpleIndexLoadResult> result(new SimpleIndexLoadResult());
+ result->did_load = true;
+ const uint64 new_hash_key = simple_util::GetEntryHashKey("eleven");
+ result->entries.insert(
+ std::make_pair(new_hash_key, EntryMetadata(base::Time::Now(), 11)));
+ const uint64 redundant_hash_key = simple_util::GetEntryHashKey("seven");
+ result->entries.insert(std::make_pair(redundant_hash_key,
+ EntryMetadata(base::Time::Now(), 7)));
+ index()->MergeInitializingSet(result.Pass());
+ }
+ EXPECT_EQ(2U + 5U + 7U + 11U, index()->cache_size_);
+}
+
+// State of index changes as expected with an insert and a remove.
+TEST_F(SimpleIndexTest, BasicInsertRemove) {
+ // Confirm blank state.
+ EntryMetadata metadata;
+ EXPECT_EQ(base::Time(), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+
+ // Confirm state after insert.
+ index()->Insert("key1");
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata));
+ base::Time now(base::Time::Now());
+ EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+
+ // Confirm state after remove.
+ metadata = EntryMetadata();
+ index()->Remove("key1");
+ EXPECT_FALSE(GetEntryForTesting("key1", &metadata));
+ EXPECT_EQ(base::Time(), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+}
+
+TEST_F(SimpleIndexTest, Has) {
+ // Confirm the base index has dispatched the request for index entries.
+ EXPECT_TRUE(index_file_.get());
+ EXPECT_EQ(1, index_file_->load_index_entries_calls());
+
+ // Confirm "Has()" always returns true before the callback is called.
+ EXPECT_TRUE(index()->Has(kKey1Hash));
+ index()->Insert("key1");
+ EXPECT_TRUE(index()->Has(kKey1Hash));
+ index()->Remove("key1");
+ // TODO(rdsmith): Maybe return false on explicitly removed entries?
+ EXPECT_TRUE(index()->Has(kKey1Hash));
+
+ ReturnIndexFile();
+
+ // Confirm "Has() returns conditionally now.
+ EXPECT_FALSE(index()->Has(kKey1Hash));
+ index()->Insert("key1");
+ EXPECT_TRUE(index()->Has(kKey1Hash));
+ index()->Remove("key1");
+}
+
+TEST_F(SimpleIndexTest, UseIfExists) {
+ // Confirm the base index has dispatched the request for index entries.
+ EXPECT_TRUE(index_file_.get());
+ EXPECT_EQ(1, index_file_->load_index_entries_calls());
+
+ // Confirm "UseIfExists()" always returns true before the callback is called
+ // and updates mod time if the entry was really there.
+ EntryMetadata metadata1, metadata2;
+ EXPECT_TRUE(index()->UseIfExists("key1"));
+ EXPECT_FALSE(GetEntryForTesting("key1", &metadata1));
+ index()->Insert("key1");
+ EXPECT_TRUE(index()->UseIfExists("key1"));
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata1));
+ WaitForTimeChange();
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata2));
+ EXPECT_EQ(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime());
+ EXPECT_TRUE(index()->UseIfExists("key1"));
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata2));
+ EXPECT_LT(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime());
+ index()->Remove("key1");
+ EXPECT_TRUE(index()->UseIfExists("key1"));
+
+ ReturnIndexFile();
+
+ // Confirm "UseIfExists() returns conditionally now
+ EXPECT_FALSE(index()->UseIfExists("key1"));
+ EXPECT_FALSE(GetEntryForTesting("key1", &metadata1));
+ index()->Insert("key1");
+ EXPECT_TRUE(index()->UseIfExists("key1"));
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata1));
+ WaitForTimeChange();
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata2));
+ EXPECT_EQ(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime());
+ EXPECT_TRUE(index()->UseIfExists("key1"));
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata2));
+ EXPECT_LT(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime());
+ index()->Remove("key1");
+ EXPECT_FALSE(index()->UseIfExists("key1"));
+}
+
+TEST_F(SimpleIndexTest, UpdateEntrySize) {
+ base::Time now(base::Time::Now());
+
+ index()->SetMaxSize(1000);
+
+ InsertIntoIndexFileReturn("key1",
+ now - base::TimeDelta::FromDays(2),
+ 475u);
+ ReturnIndexFile();
+
+ EntryMetadata metadata;
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata));
+ EXPECT_EQ(now - base::TimeDelta::FromDays(2), metadata.GetLastUsedTime());
+ EXPECT_EQ(475u, metadata.GetEntrySize());
+
+ index()->UpdateEntrySize("key1", 600u);
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata));
+ EXPECT_EQ(600u, metadata.GetEntrySize());
+ EXPECT_EQ(1, index()->GetEntryCount());
+}
+
+TEST_F(SimpleIndexTest, GetEntryCount) {
+ EXPECT_EQ(0, index()->GetEntryCount());
+ index()->Insert("key1");
+ EXPECT_EQ(1, index()->GetEntryCount());
+ index()->Insert("key2");
+ EXPECT_EQ(2, index()->GetEntryCount());
+ index()->Insert("key3");
+ EXPECT_EQ(3, index()->GetEntryCount());
+ index()->Insert("key3");
+ EXPECT_EQ(3, index()->GetEntryCount());
+ index()->Remove("key2");
+ EXPECT_EQ(2, index()->GetEntryCount());
+ index()->Insert("key4");
+ EXPECT_EQ(3, index()->GetEntryCount());
+ index()->Remove("key3");
+ EXPECT_EQ(2, index()->GetEntryCount());
+ index()->Remove("key3");
+ EXPECT_EQ(2, index()->GetEntryCount());
+ index()->Remove("key1");
+ EXPECT_EQ(1, index()->GetEntryCount());
+ index()->Remove("key4");
+ EXPECT_EQ(0, index()->GetEntryCount());
+}
+
+// Confirm that we get the results we expect from a simple init.
+TEST_F(SimpleIndexTest, BasicInit) {
+ base::Time now(base::Time::Now());
+
+ InsertIntoIndexFileReturn("key1",
+ now - base::TimeDelta::FromDays(2),
+ 10u);
+ InsertIntoIndexFileReturn("key2",
+ now - base::TimeDelta::FromDays(3),
+ 100u);
+
+ ReturnIndexFile();
+
+ EntryMetadata metadata;
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata));
+ EXPECT_EQ(now - base::TimeDelta::FromDays(2), metadata.GetLastUsedTime());
+ EXPECT_EQ(10ul, metadata.GetEntrySize());
+ EXPECT_TRUE(GetEntryForTesting("key2", &metadata));
+ EXPECT_EQ(now - base::TimeDelta::FromDays(3), metadata.GetLastUsedTime());
+ EXPECT_EQ(100ul, metadata.GetEntrySize());
+}
+
+// Remove something that's going to come in from the loaded index.
+TEST_F(SimpleIndexTest, RemoveBeforeInit) {
+ index()->Remove("key1");
+
+ InsertIntoIndexFileReturn("key1",
+ base::Time::Now() - base::TimeDelta::FromDays(2),
+ 10u);
+ ReturnIndexFile();
+
+ EXPECT_FALSE(index()->Has(kKey1Hash));
+}
+
+// Insert something that's going to come in from the loaded index; correct
+// result?
+TEST_F(SimpleIndexTest, InsertBeforeInit) {
+ index()->Insert("key1");
+
+ InsertIntoIndexFileReturn("key1",
+ base::Time::Now() - base::TimeDelta::FromDays(2),
+ 10u);
+ ReturnIndexFile();
+
+ EntryMetadata metadata;
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata));
+ base::Time now(base::Time::Now());
+ EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+}
+
+// Insert and Remove something that's going to come in from the loaded index.
+TEST_F(SimpleIndexTest, InsertRemoveBeforeInit) {
+ index()->Insert("key1");
+ index()->Remove("key1");
+
+ InsertIntoIndexFileReturn("key1",
+ base::Time::Now() - base::TimeDelta::FromDays(2),
+ 10u);
+ ReturnIndexFile();
+
+ EXPECT_FALSE(index()->Has(kKey1Hash));
+}
+
+// Insert and Remove something that's going to come in from the loaded index.
+TEST_F(SimpleIndexTest, RemoveInsertBeforeInit) {
+ index()->Remove("key1");
+ index()->Insert("key1");
+
+ InsertIntoIndexFileReturn("key1",
+ base::Time::Now() - base::TimeDelta::FromDays(2),
+ 10u);
+ ReturnIndexFile();
+
+ EntryMetadata metadata;
+ EXPECT_TRUE(GetEntryForTesting("key1", &metadata));
+ base::Time now(base::Time::Now());
+ EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+}
+
+// Do all above tests at once + a non-conflict to test for cross-key
+// interactions.
+TEST_F(SimpleIndexTest, AllInitConflicts) {
+ base::Time now(base::Time::Now());
+
+ index()->Remove("key1");
+ InsertIntoIndexFileReturn("key1",
+ now - base::TimeDelta::FromDays(2),
+ 10u);
+ index()->Insert("key2");
+ InsertIntoIndexFileReturn("key2",
+ now - base::TimeDelta::FromDays(3),
+ 100u);
+ index()->Insert("key3");
+ index()->Remove("key3");
+ InsertIntoIndexFileReturn("key3",
+ now - base::TimeDelta::FromDays(4),
+ 1000u);
+ index()->Remove("key4");
+ index()->Insert("key4");
+ InsertIntoIndexFileReturn("key4",
+ now - base::TimeDelta::FromDays(5),
+ 10000u);
+ InsertIntoIndexFileReturn("key5",
+ now - base::TimeDelta::FromDays(6),
+ 100000u);
+
+ ReturnIndexFile();
+
+ EXPECT_FALSE(index()->Has(kKey1Hash));
+
+ EntryMetadata metadata;
+ EXPECT_TRUE(GetEntryForTesting("key2", &metadata));
+ EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+
+ EXPECT_FALSE(index()->Has(kKey3Hash));
+
+ EXPECT_TRUE(GetEntryForTesting("key4", &metadata));
+ EXPECT_LT(now - base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_GT(now + base::TimeDelta::FromMinutes(1), metadata.GetLastUsedTime());
+ EXPECT_EQ(0ul, metadata.GetEntrySize());
+
+ EXPECT_TRUE(GetEntryForTesting("key5", &metadata));
+ EXPECT_EQ(now - base::TimeDelta::FromDays(6), metadata.GetLastUsedTime());
+ EXPECT_EQ(100000u, metadata.GetEntrySize());
+}
+
+TEST_F(SimpleIndexTest, BasicEviction) {
+ base::Time now(base::Time::Now());
+ index()->SetMaxSize(1000);
+ InsertIntoIndexFileReturn("key1",
+ now - base::TimeDelta::FromDays(2),
+ 475u);
+ index()->Insert("key2");
+ index()->UpdateEntrySize("key2", 475);
+ ReturnIndexFile();
+
+ WaitForTimeChange();
+
+ index()->Insert("key3");
+ // Confirm index is as expected: No eviction, everything there.
+ EXPECT_EQ(3, index()->GetEntryCount());
+ EXPECT_EQ(0, index_file()->doom_entry_set_calls());
+ EXPECT_TRUE(index()->Has(kKey1Hash));
+ EXPECT_TRUE(index()->Has(kKey2Hash));
+ EXPECT_TRUE(index()->Has(kKey3Hash));
+
+ // Trigger an eviction, and make sure the right things are tossed.
+ // TODO(rdsmith): This is dependent on the innards of the implementation
+ // as to at exactly what point we trigger eviction. Not sure how to fix
+ // that.
+ index()->UpdateEntrySize("key3", 475);
+ EXPECT_EQ(1, index_file()->doom_entry_set_calls());
+ EXPECT_EQ(1, index()->GetEntryCount());
+ EXPECT_FALSE(index()->Has(kKey1Hash));
+ EXPECT_FALSE(index()->Has(kKey2Hash));
+ EXPECT_TRUE(index()->Has(kKey3Hash));
+ ASSERT_EQ(2u, index_file_->last_doom_entry_hashes().size());
+}
+
+// Confirm all the operations queue a disk write at some point in the
+// future.
+TEST_F(SimpleIndexTest, DiskWriteQueued) {
+ index()->SetMaxSize(1000);
+ ReturnIndexFile();
+
+ EXPECT_FALSE(index()->write_to_disk_timer_.IsRunning());
+
+ index()->Insert("key1");
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ index()->write_to_disk_timer_.Stop();
+ EXPECT_FALSE(index()->write_to_disk_timer_.IsRunning());
+
+ index()->UseIfExists("key1");
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ index()->write_to_disk_timer_.Stop();
+
+ index()->UpdateEntrySize("key1", 20);
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ index()->write_to_disk_timer_.Stop();
+
+ index()->Remove("key1");
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ index()->write_to_disk_timer_.Stop();
+}
+
+TEST_F(SimpleIndexTest, DiskWriteExecuted) {
+ index()->SetMaxSize(1000);
+ ReturnIndexFile();
+
+ EXPECT_FALSE(index()->write_to_disk_timer_.IsRunning());
+
+ index()->Insert("key1");
+ index()->UpdateEntrySize("key1", 20);
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ base::Closure user_task(index()->write_to_disk_timer_.user_task());
+ index()->write_to_disk_timer_.Stop();
+
+ EXPECT_EQ(0, index_file_->disk_writes());
+ user_task.Run();
+ EXPECT_EQ(1, index_file_->disk_writes());
+ SimpleIndex::EntrySet entry_set;
+ index_file_->GetAndResetDiskWriteEntrySet(&entry_set);
+
+ uint64 hash_key(simple_util::GetEntryHashKey("key1"));
+ base::Time now(base::Time::Now());
+ ASSERT_EQ(1u, entry_set.size());
+ EXPECT_EQ(hash_key, entry_set.begin()->first);
+ const EntryMetadata& entry1(entry_set.begin()->second);
+ EXPECT_LT(now - base::TimeDelta::FromMinutes(1), entry1.GetLastUsedTime());
+ EXPECT_GT(now + base::TimeDelta::FromMinutes(1), entry1.GetLastUsedTime());
+ EXPECT_EQ(20u, entry1.GetEntrySize());
+}
+
+TEST_F(SimpleIndexTest, DiskWritePostponed) {
+ index()->SetMaxSize(1000);
+ ReturnIndexFile();
+
+ EXPECT_FALSE(index()->write_to_disk_timer_.IsRunning());
+
+ index()->Insert("key1");
+ index()->UpdateEntrySize("key1", 20);
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ base::TimeTicks expected_trigger(
+ index()->write_to_disk_timer_.desired_run_time());
+
+ WaitForTimeChange();
+ EXPECT_EQ(expected_trigger, index()->write_to_disk_timer_.desired_run_time());
+ index()->Insert("key2");
+ index()->UpdateEntrySize("key2", 40);
+ EXPECT_TRUE(index()->write_to_disk_timer_.IsRunning());
+ EXPECT_LT(expected_trigger, index()->write_to_disk_timer_.desired_run_time());
+ index()->write_to_disk_timer_.Stop();
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_net_log_parameters.cc b/chromium/net/disk_cache/simple/simple_net_log_parameters.cc
new file mode 100644
index 00000000000..9acd66396a9
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_net_log_parameters.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_net_log_parameters.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/simple/simple_entry_impl.h"
+
+namespace {
+
+base::Value* NetLogSimpleEntryConstructionCallback(
+ const disk_cache::SimpleEntryImpl* entry,
+ net::NetLog::LogLevel log_level ALLOW_UNUSED) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ dict->SetString("entry_hash",
+ base::StringPrintf("%#016" PRIx64, entry->entry_hash()));
+ return dict;
+}
+
+base::Value* NetLogSimpleEntryCreationCallback(
+ const disk_cache::SimpleEntryImpl* entry,
+ int net_error,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ dict->SetInteger("net_error", net_error);
+ if (net_error == net::OK)
+ dict->SetString("key", entry->key());
+ return dict;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+net::NetLog::ParametersCallback CreateNetLogSimpleEntryConstructionCallback(
+ const SimpleEntryImpl* entry) {
+ DCHECK(entry);
+ return base::Bind(&NetLogSimpleEntryConstructionCallback, entry);
+}
+
+net::NetLog::ParametersCallback CreateNetLogSimpleEntryCreationCallback(
+ const SimpleEntryImpl* entry,
+ int net_error) {
+ DCHECK(entry);
+ return base::Bind(&NetLogSimpleEntryCreationCallback, entry, net_error);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_net_log_parameters.h b/chromium/net/disk_cache/simple/simple_net_log_parameters.h
new file mode 100644
index 00000000000..b6f386f4a99
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_net_log_parameters.h
@@ -0,0 +1,32 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_NET_LOG_PARAMETERS_H_
+#define NET_DISK_CACHE_SIMPLE_NET_LOG_PARAMETERS_H_
+
+#include "net/base/net_log.h"
+
+// This file augments the functions in net/disk_cache/net_log_parameters.h to
+// include ones that deal with specifics of the Simple Cache backend.
+namespace disk_cache {
+
+class SimpleEntryImpl;
+
+// Creates a NetLog callback that returns parameters for the construction of a
+// SimpleEntryImpl. Contains the entry's hash. |entry| can't be NULL and must
+// outlive the returned callback.
+net::NetLog::ParametersCallback CreateNetLogSimpleEntryConstructionCallback(
+ const SimpleEntryImpl* entry);
+
+// Creates a NetLog callback that returns parameters for the result of calling
+// |CreateEntry| or |OpenEntry| on a SimpleEntryImpl. Contains the |net_error|
+// and, if successful, the entry's key. |entry| can't be NULL and must outlive
+// the returned callback.
+net::NetLog::ParametersCallback CreateNetLogSimpleEntryCreationCallback(
+ const SimpleEntryImpl* entry,
+ int net_error);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_NET_LOG_PARAMETERS_H_
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.cc b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
new file mode 100644
index 00000000000..e6f1eaa4f21
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.cc
@@ -0,0 +1,635 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_synchronous_entry.h"
+
+#include <algorithm>
+#include <cstring>
+#include <functional>
+#include <limits>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/file_util.h"
+#include "base/hash.h"
+#include "base/location.h"
+#include "base/metrics/histogram.h"
+#include "base/sha1.h"
+#include "base/strings/stringprintf.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "third_party/zlib/zlib.h"
+
+using base::kInvalidPlatformFileValue;
+using base::ClosePlatformFile;
+using base::FilePath;
+using base::GetPlatformFileInfo;
+using base::PlatformFileError;
+using base::PlatformFileInfo;
+using base::PLATFORM_FILE_CREATE;
+using base::PLATFORM_FILE_ERROR_EXISTS;
+using base::PLATFORM_FILE_OK;
+using base::PLATFORM_FILE_OPEN;
+using base::PLATFORM_FILE_READ;
+using base::PLATFORM_FILE_WRITE;
+using base::ReadPlatformFile;
+using base::Time;
+using base::TruncatePlatformFile;
+using base::WritePlatformFile;
+
+namespace {
+
+// Used in histograms, please only add entries at the end.
+enum OpenEntryResult {
+ OPEN_ENTRY_SUCCESS = 0,
+ OPEN_ENTRY_PLATFORM_FILE_ERROR = 1,
+ OPEN_ENTRY_CANT_READ_HEADER = 2,
+ OPEN_ENTRY_BAD_MAGIC_NUMBER = 3,
+ OPEN_ENTRY_BAD_VERSION = 4,
+ OPEN_ENTRY_CANT_READ_KEY = 5,
+ // OPEN_ENTRY_KEY_MISMATCH = 6, Deprecated.
+ OPEN_ENTRY_KEY_HASH_MISMATCH = 7,
+ OPEN_ENTRY_MAX = 8,
+};
+
+// Used in histograms, please only add entries at the end.
+enum CreateEntryResult {
+ CREATE_ENTRY_SUCCESS = 0,
+ CREATE_ENTRY_PLATFORM_FILE_ERROR = 1,
+ CREATE_ENTRY_CANT_WRITE_HEADER = 2,
+ CREATE_ENTRY_CANT_WRITE_KEY = 3,
+ CREATE_ENTRY_MAX = 4,
+};
+
+// Used in histograms, please only add entries at the end.
+enum WriteResult {
+ WRITE_RESULT_SUCCESS = 0,
+ WRITE_RESULT_PRETRUNCATE_FAILURE,
+ WRITE_RESULT_WRITE_FAILURE,
+ WRITE_RESULT_TRUNCATE_FAILURE,
+ WRITE_RESULT_MAX,
+};
+
+// Used in histograms, please only add entries at the end.
+enum CheckEOFResult {
+ CHECK_EOF_RESULT_SUCCESS,
+ CHECK_EOF_RESULT_READ_FAILURE,
+ CHECK_EOF_RESULT_MAGIC_NUMBER_MISMATCH,
+ CHECK_EOF_RESULT_CRC_MISMATCH,
+ CHECK_EOF_RESULT_MAX,
+};
+
+// Used in histograms, please only add entries at the end.
+enum CloseResult {
+ CLOSE_RESULT_SUCCESS,
+ CLOSE_RESULT_WRITE_FAILURE,
+};
+
+void RecordSyncOpenResult(OpenEntryResult result, bool had_index) {
+ DCHECK_GT(OPEN_ENTRY_MAX, result);
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncOpenResult", result, OPEN_ENTRY_MAX);
+ if (had_index) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncOpenResult_WithIndex", result, OPEN_ENTRY_MAX);
+ } else {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncOpenResult_WithoutIndex", result, OPEN_ENTRY_MAX);
+ }
+}
+
+void RecordSyncCreateResult(CreateEntryResult result, bool had_index) {
+ DCHECK_GT(CREATE_ENTRY_MAX, result);
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCreateResult", result, CREATE_ENTRY_MAX);
+ if (had_index) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCreateResult_WithIndex", result, CREATE_ENTRY_MAX);
+ } else {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCreateResult_WithoutIndex", result, CREATE_ENTRY_MAX);
+ }
+}
+
+void RecordWriteResult(WriteResult result) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncWriteResult", result, WRITE_RESULT_MAX);
+}
+
+void RecordCheckEOFResult(CheckEOFResult result) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCheckEOFResult", result, CHECK_EOF_RESULT_MAX);
+}
+
+void RecordCloseResult(CloseResult result) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCloseResult", result, WRITE_RESULT_MAX);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+using simple_util::ConvertEntryHashKeyToHexString;
+using simple_util::GetEntryHashKey;
+using simple_util::GetFilenameFromEntryHashAndIndex;
+using simple_util::GetDataSizeFromKeyAndFileSize;
+using simple_util::GetFileSizeFromKeyAndDataSize;
+using simple_util::GetFileOffsetFromKeyAndDataOffset;
+
+SimpleEntryStat::SimpleEntryStat() {}
+
+SimpleEntryStat::SimpleEntryStat(base::Time last_used_p,
+ base::Time last_modified_p,
+ const int32 data_size_p[])
+ : last_used(last_used_p),
+ last_modified(last_modified_p) {
+ memcpy(data_size, data_size_p, sizeof(data_size));
+}
+
+SimpleEntryCreationResults::SimpleEntryCreationResults(
+ SimpleEntryStat entry_stat)
+ : sync_entry(NULL),
+ entry_stat(entry_stat),
+ result(net::OK) {
+}
+
+SimpleEntryCreationResults::~SimpleEntryCreationResults() {
+}
+
+SimpleSynchronousEntry::CRCRecord::CRCRecord() : index(-1),
+ has_crc32(false),
+ data_crc32(0) {
+}
+
+SimpleSynchronousEntry::CRCRecord::CRCRecord(int index_p,
+ bool has_crc32_p,
+ uint32 data_crc32_p)
+ : index(index_p),
+ has_crc32(has_crc32_p),
+ data_crc32(data_crc32_p) {}
+
+SimpleSynchronousEntry::EntryOperationData::EntryOperationData(int index_p,
+ int offset_p,
+ int buf_len_p)
+ : index(index_p),
+ offset(offset_p),
+ buf_len(buf_len_p) {}
+
+SimpleSynchronousEntry::EntryOperationData::EntryOperationData(int index_p,
+ int offset_p,
+ int buf_len_p,
+ bool truncate_p)
+ : index(index_p),
+ offset(offset_p),
+ buf_len(buf_len_p),
+ truncate(truncate_p) {}
+
+// static
+void SimpleSynchronousEntry::OpenEntry(
+ const FilePath& path,
+ const uint64 entry_hash,
+ bool had_index,
+ SimpleEntryCreationResults *out_results) {
+ SimpleSynchronousEntry* sync_entry = new SimpleSynchronousEntry(path, "",
+ entry_hash);
+ out_results->result = sync_entry->InitializeForOpen(
+ had_index, &out_results->entry_stat);
+ if (out_results->result != net::OK) {
+ sync_entry->Doom();
+ delete sync_entry;
+ out_results->sync_entry = NULL;
+ return;
+ }
+ out_results->sync_entry = sync_entry;
+}
+
+// static
+void SimpleSynchronousEntry::CreateEntry(
+ const FilePath& path,
+ const std::string& key,
+ const uint64 entry_hash,
+ bool had_index,
+ SimpleEntryCreationResults *out_results) {
+ DCHECK_EQ(entry_hash, GetEntryHashKey(key));
+ SimpleSynchronousEntry* sync_entry = new SimpleSynchronousEntry(path, key,
+ entry_hash);
+ out_results->result = sync_entry->InitializeForCreate(
+ had_index, &out_results->entry_stat);
+ if (out_results->result != net::OK) {
+ if (out_results->result != net::ERR_FILE_EXISTS)
+ sync_entry->Doom();
+ delete sync_entry;
+ out_results->sync_entry = NULL;
+ return;
+ }
+ out_results->sync_entry = sync_entry;
+}
+
+// TODO(gavinp): Move this function to its correct location in this .cc file.
+// static
+bool SimpleSynchronousEntry::DeleteFilesForEntryHash(
+ const FilePath& path,
+ const uint64 entry_hash) {
+ bool result = true;
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ FilePath to_delete = path.AppendASCII(
+ GetFilenameFromEntryHashAndIndex(entry_hash, i));
+ if (!base::DeleteFile(to_delete, false)) {
+ result = false;
+ DLOG(ERROR) << "Could not delete " << to_delete.MaybeAsASCII();
+ }
+ }
+ return result;
+}
+
+// static
+void SimpleSynchronousEntry::DoomEntry(
+ const FilePath& path,
+ const std::string& key,
+ uint64 entry_hash,
+ int* out_result) {
+ DCHECK_EQ(entry_hash, GetEntryHashKey(key));
+ bool deleted_well = DeleteFilesForEntryHash(path, entry_hash);
+ *out_result = deleted_well ? net::OK : net::ERR_FAILED;
+}
+
+// static
+int SimpleSynchronousEntry::DoomEntrySet(
+ scoped_ptr<std::vector<uint64> > key_hashes,
+ const FilePath& path) {
+ const size_t did_delete_count = std::count_if(
+ key_hashes->begin(), key_hashes->end(), std::bind1st(
+ std::ptr_fun(SimpleSynchronousEntry::DeleteFilesForEntryHash), path));
+ return (did_delete_count == key_hashes->size()) ? net::OK : net::ERR_FAILED;
+}
+
+void SimpleSynchronousEntry::ReadData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* out_buf,
+ uint32* out_crc32,
+ base::Time* out_last_used,
+ int* out_result) const {
+ DCHECK(initialized_);
+ int64 file_offset =
+ GetFileOffsetFromKeyAndDataOffset(key_, in_entry_op.offset);
+ int bytes_read = ReadPlatformFile(files_[in_entry_op.index],
+ file_offset,
+ out_buf->data(),
+ in_entry_op.buf_len);
+ if (bytes_read > 0) {
+ *out_last_used = Time::Now();
+ *out_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(out_buf->data()),
+ bytes_read);
+ }
+ if (bytes_read >= 0) {
+ *out_result = bytes_read;
+ } else {
+ *out_result = net::ERR_CACHE_READ_FAILURE;
+ Doom();
+ }
+}
+
+void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* in_buf,
+ SimpleEntryStat* out_entry_stat,
+ int* out_result) const {
+ DCHECK(initialized_);
+ int index = in_entry_op.index;
+ int offset = in_entry_op.offset;
+ int buf_len = in_entry_op.buf_len;
+ int truncate = in_entry_op.truncate;
+
+ bool extending_by_write = offset + buf_len > out_entry_stat->data_size[index];
+ if (extending_by_write) {
+ // We are extending the file, and need to insure the EOF record is zeroed.
+ const int64 file_eof_offset = GetFileOffsetFromKeyAndDataOffset(
+ key_, out_entry_stat->data_size[index]);
+ if (!TruncatePlatformFile(files_[index], file_eof_offset)) {
+ RecordWriteResult(WRITE_RESULT_PRETRUNCATE_FAILURE);
+ Doom();
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ }
+ const int64 file_offset = GetFileOffsetFromKeyAndDataOffset(key_, offset);
+ if (buf_len > 0) {
+ if (WritePlatformFile(
+ files_[index], file_offset, in_buf->data(), buf_len) != buf_len) {
+ RecordWriteResult(WRITE_RESULT_WRITE_FAILURE);
+ Doom();
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ }
+ if (!truncate && (buf_len > 0 || !extending_by_write)) {
+ out_entry_stat->data_size[index] =
+ std::max(out_entry_stat->data_size[index], offset + buf_len);
+ } else {
+ if (!TruncatePlatformFile(files_[index], file_offset + buf_len)) {
+ RecordWriteResult(WRITE_RESULT_TRUNCATE_FAILURE);
+ Doom();
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ out_entry_stat->data_size[index] = offset + buf_len;
+ }
+
+ RecordWriteResult(WRITE_RESULT_SUCCESS);
+ out_entry_stat->last_used = out_entry_stat->last_modified = Time::Now();
+ *out_result = buf_len;
+}
+
+void SimpleSynchronousEntry::CheckEOFRecord(int index,
+ int32 data_size,
+ uint32 expected_crc32,
+ int* out_result) const {
+ DCHECK(initialized_);
+
+ SimpleFileEOF eof_record;
+ int64 file_offset = GetFileOffsetFromKeyAndDataOffset(key_, data_size);
+ if (ReadPlatformFile(files_[index],
+ file_offset,
+ reinterpret_cast<char*>(&eof_record),
+ sizeof(eof_record)) != sizeof(eof_record)) {
+ RecordCheckEOFResult(CHECK_EOF_RESULT_READ_FAILURE);
+ Doom();
+ *out_result = net::ERR_CACHE_CHECKSUM_READ_FAILURE;
+ return;
+ }
+
+ if (eof_record.final_magic_number != kSimpleFinalMagicNumber) {
+ RecordCheckEOFResult(CHECK_EOF_RESULT_MAGIC_NUMBER_MISMATCH);
+ DLOG(INFO) << "eof record had bad magic number.";
+ Doom();
+ *out_result = net::ERR_CACHE_CHECKSUM_READ_FAILURE;
+ return;
+ }
+
+ const bool has_crc = (eof_record.flags & SimpleFileEOF::FLAG_HAS_CRC32) ==
+ SimpleFileEOF::FLAG_HAS_CRC32;
+ UMA_HISTOGRAM_BOOLEAN("SimpleCache.SyncCheckEOFHasCrc", has_crc);
+ if (has_crc && eof_record.data_crc32 != expected_crc32) {
+ RecordCheckEOFResult(CHECK_EOF_RESULT_CRC_MISMATCH);
+ DLOG(INFO) << "eof record had bad crc.";
+ Doom();
+ *out_result = net::ERR_CACHE_CHECKSUM_MISMATCH;
+ return;
+ }
+
+ RecordCheckEOFResult(CHECK_EOF_RESULT_SUCCESS);
+ *out_result = net::OK;
+}
+
+void SimpleSynchronousEntry::Close(
+ const SimpleEntryStat& entry_stat,
+ scoped_ptr<std::vector<CRCRecord> > crc32s_to_write) {
+ for (std::vector<CRCRecord>::const_iterator it = crc32s_to_write->begin();
+ it != crc32s_to_write->end(); ++it) {
+ SimpleFileEOF eof_record;
+ eof_record.final_magic_number = kSimpleFinalMagicNumber;
+ eof_record.flags = 0;
+ if (it->has_crc32)
+ eof_record.flags |= SimpleFileEOF::FLAG_HAS_CRC32;
+ eof_record.data_crc32 = it->data_crc32;
+ int64 file_offset = GetFileOffsetFromKeyAndDataOffset(
+ key_, entry_stat.data_size[it->index]);
+ if (WritePlatformFile(files_[it->index],
+ file_offset,
+ reinterpret_cast<const char*>(&eof_record),
+ sizeof(eof_record)) != sizeof(eof_record)) {
+ RecordCloseResult(CLOSE_RESULT_WRITE_FAILURE);
+ DLOG(INFO) << "Could not write eof record.";
+ Doom();
+ break;
+ }
+ const int64 file_size = file_offset + sizeof(eof_record);
+ UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.LastClusterSize",
+ file_size % 4096, 0, 4097, 50);
+ const int64 cluster_loss = file_size % 4096 ? 4096 - file_size % 4096 : 0;
+ UMA_HISTOGRAM_PERCENTAGE("SimpleCache.LastClusterLossPercent",
+ cluster_loss * 100 / (cluster_loss + file_size));
+ }
+
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ bool did_close_file = ClosePlatformFile(files_[i]);
+ CHECK(did_close_file);
+ }
+ RecordCloseResult(CLOSE_RESULT_SUCCESS);
+ have_open_files_ = false;
+ delete this;
+}
+
+SimpleSynchronousEntry::SimpleSynchronousEntry(const FilePath& path,
+ const std::string& key,
+ const uint64 entry_hash)
+ : path_(path),
+ entry_hash_(entry_hash),
+ key_(key),
+ have_open_files_(false),
+ initialized_(false) {
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ files_[i] = kInvalidPlatformFileValue;
+ }
+}
+
+SimpleSynchronousEntry::~SimpleSynchronousEntry() {
+ DCHECK(!(have_open_files_ && initialized_));
+ if (have_open_files_)
+ CloseFiles();
+}
+
+bool SimpleSynchronousEntry::OpenOrCreateFiles(
+ bool create,
+ bool had_index,
+ SimpleEntryStat* out_entry_stat) {
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ FilePath filename = path_.AppendASCII(
+ GetFilenameFromEntryHashAndIndex(entry_hash_, i));
+ int flags = PLATFORM_FILE_READ | PLATFORM_FILE_WRITE;
+ if (create)
+ flags |= PLATFORM_FILE_CREATE;
+ else
+ flags |= PLATFORM_FILE_OPEN;
+ PlatformFileError error;
+ files_[i] = CreatePlatformFile(filename, flags, NULL, &error);
+ if (error != PLATFORM_FILE_OK) {
+ // TODO(ttuttle,gavinp): Remove one each of these triplets of histograms.
+ // We can calculate the third as the sum or difference of the other two.
+ if (create) {
+ RecordSyncCreateResult(CREATE_ENTRY_PLATFORM_FILE_ERROR, had_index);
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.SyncCreatePlatformFileError",
+ -error, -base::PLATFORM_FILE_ERROR_MAX);
+ if (had_index) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCreatePlatformFileError_WithIndex",
+ -error, -base::PLATFORM_FILE_ERROR_MAX);
+ } else {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncCreatePlatformFileError_WithoutIndex",
+ -error, -base::PLATFORM_FILE_ERROR_MAX);
+ }
+ } else {
+ RecordSyncOpenResult(OPEN_ENTRY_PLATFORM_FILE_ERROR, had_index);
+ UMA_HISTOGRAM_ENUMERATION("SimpleCache.SyncOpenPlatformFileError",
+ -error, -base::PLATFORM_FILE_ERROR_MAX);
+ if (had_index) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncOpenPlatformFileError_WithIndex",
+ -error, -base::PLATFORM_FILE_ERROR_MAX);
+ } else {
+ UMA_HISTOGRAM_ENUMERATION(
+ "SimpleCache.SyncOpenPlatformFileError_WithoutIndex",
+ -error, -base::PLATFORM_FILE_ERROR_MAX);
+ }
+ }
+ while (--i >= 0) {
+ bool ALLOW_UNUSED did_close = ClosePlatformFile(files_[i]);
+ DLOG_IF(INFO, !did_close) << "Could not close file "
+ << filename.MaybeAsASCII();
+ }
+ return false;
+ }
+ }
+
+ have_open_files_ = true;
+ if (create) {
+ out_entry_stat->last_modified = out_entry_stat->last_used = Time::Now();
+ for (int i = 0; i < kSimpleEntryFileCount; ++i)
+ out_entry_stat->data_size[i] = 0;
+ } else {
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ PlatformFileInfo file_info;
+ bool success = GetPlatformFileInfo(files_[i], &file_info);
+ base::Time file_last_modified;
+ if (!success) {
+ DLOG(WARNING) << "Could not get platform file info.";
+ continue;
+ }
+ out_entry_stat->last_used = file_info.last_accessed;
+ if (simple_util::GetMTime(path_, &file_last_modified))
+ out_entry_stat->last_modified = file_last_modified;
+ else
+ out_entry_stat->last_modified = file_info.last_modified;
+
+ // Keep the file size in |data size_| briefly until the key is initialized
+ // properly.
+ out_entry_stat->data_size[i] = file_info.size;
+ }
+ }
+
+ return true;
+}
+
+void SimpleSynchronousEntry::CloseFiles() {
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ DCHECK_NE(kInvalidPlatformFileValue, files_[i]);
+ bool did_close = ClosePlatformFile(files_[i]);
+ DCHECK(did_close);
+ }
+}
+
+int SimpleSynchronousEntry::InitializeForOpen(bool had_index,
+ SimpleEntryStat* out_entry_stat) {
+ DCHECK(!initialized_);
+ if (!OpenOrCreateFiles(false, had_index, out_entry_stat))
+ return net::ERR_FAILED;
+
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ SimpleFileHeader header;
+ int header_read_result =
+ ReadPlatformFile(files_[i], 0, reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (header_read_result != sizeof(header)) {
+ DLOG(WARNING) << "Cannot read header from entry.";
+ RecordSyncOpenResult(OPEN_ENTRY_CANT_READ_HEADER, had_index);
+ return net::ERR_FAILED;
+ }
+
+ if (header.initial_magic_number != kSimpleInitialMagicNumber) {
+ // TODO(gavinp): This seems very bad; for now we log at WARNING, but we
+ // should give consideration to not saturating the log with these if that
+ // becomes a problem.
+ DLOG(WARNING) << "Magic number did not match.";
+ RecordSyncOpenResult(OPEN_ENTRY_BAD_MAGIC_NUMBER, had_index);
+ return net::ERR_FAILED;
+ }
+
+ if (header.version != kSimpleVersion) {
+ DLOG(WARNING) << "Unreadable version.";
+ RecordSyncOpenResult(OPEN_ENTRY_BAD_VERSION, had_index);
+ return net::ERR_FAILED;
+ }
+
+ scoped_ptr<char[]> key(new char[header.key_length]);
+ int key_read_result = ReadPlatformFile(files_[i], sizeof(header),
+ key.get(), header.key_length);
+ if (key_read_result != implicit_cast<int>(header.key_length)) {
+ DLOG(WARNING) << "Cannot read key from entry.";
+ RecordSyncOpenResult(OPEN_ENTRY_CANT_READ_KEY, had_index);
+ return net::ERR_FAILED;
+ }
+
+ key_ = std::string(key.get(), header.key_length);
+ out_entry_stat->data_size[i] =
+ GetDataSizeFromKeyAndFileSize(key_, out_entry_stat->data_size[i]);
+ if (out_entry_stat->data_size[i] < 0) {
+ // This entry can't possibly be valid, as it does not have enough space to
+ // store a valid SimpleFileEOF record.
+ return net::ERR_FAILED;
+ }
+
+ if (base::Hash(key.get(), header.key_length) != header.key_hash) {
+ DLOG(WARNING) << "Hash mismatch on key.";
+ RecordSyncOpenResult(OPEN_ENTRY_KEY_HASH_MISMATCH, had_index);
+ return net::ERR_FAILED;
+ }
+ }
+ RecordSyncOpenResult(OPEN_ENTRY_SUCCESS, had_index);
+ initialized_ = true;
+ return net::OK;
+}
+
+int SimpleSynchronousEntry::InitializeForCreate(
+ bool had_index,
+ SimpleEntryStat* out_entry_stat) {
+ DCHECK(!initialized_);
+ if (!OpenOrCreateFiles(true, had_index, out_entry_stat)) {
+ DLOG(WARNING) << "Could not create platform files.";
+ return net::ERR_FILE_EXISTS;
+ }
+ for (int i = 0; i < kSimpleEntryFileCount; ++i) {
+ SimpleFileHeader header;
+ header.initial_magic_number = kSimpleInitialMagicNumber;
+ header.version = kSimpleVersion;
+
+ header.key_length = key_.size();
+ header.key_hash = base::Hash(key_);
+
+ if (WritePlatformFile(files_[i], 0, reinterpret_cast<char*>(&header),
+ sizeof(header)) != sizeof(header)) {
+ DLOG(WARNING) << "Could not write headers to new cache entry.";
+ RecordSyncCreateResult(CREATE_ENTRY_CANT_WRITE_HEADER, had_index);
+ return net::ERR_FAILED;
+ }
+
+ if (WritePlatformFile(files_[i], sizeof(header), key_.data(),
+ key_.size()) != implicit_cast<int>(key_.size())) {
+ DLOG(WARNING) << "Could not write keys to new cache entry.";
+ RecordSyncCreateResult(CREATE_ENTRY_CANT_WRITE_KEY, had_index);
+ return net::ERR_FAILED;
+ }
+ }
+ RecordSyncCreateResult(CREATE_ENTRY_SUCCESS, had_index);
+ initialized_ = true;
+ return net::OK;
+}
+
+void SimpleSynchronousEntry::Doom() const {
+ // TODO(gavinp): Consider if we should guard against redundant Doom() calls.
+ DeleteFilesForEntryHash(path_, entry_hash_);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_synchronous_entry.h b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
new file mode 100644
index 00000000000..f591c9a0f93
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_synchronous_entry.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_SYNCHRONOUS_ENTRY_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_SYNCHRONOUS_ENTRY_H_
+
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/platform_file.h"
+#include "base/time/time.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+
+namespace net {
+class IOBuffer;
+}
+
+namespace disk_cache {
+
+class SimpleSynchronousEntry;
+
+struct SimpleEntryStat {
+ SimpleEntryStat();
+ SimpleEntryStat(base::Time last_used_p,
+ base::Time last_modified_p,
+ const int32 data_size_p[]);
+
+ base::Time last_used;
+ base::Time last_modified;
+ int32 data_size[kSimpleEntryFileCount];
+};
+
+struct SimpleEntryCreationResults {
+ SimpleEntryCreationResults(SimpleEntryStat entry_stat);
+ ~SimpleEntryCreationResults();
+
+ SimpleSynchronousEntry* sync_entry;
+ SimpleEntryStat entry_stat;
+ int result;
+};
+
+// Worker thread interface to the very simple cache. This interface is not
+// thread safe, and callers must ensure that it is only ever accessed from
+// a single thread between synchronization points.
+class SimpleSynchronousEntry {
+ public:
+ struct CRCRecord {
+ CRCRecord();
+ CRCRecord(int index_p, bool has_crc32_p, uint32 data_crc32_p);
+
+ int index;
+ bool has_crc32;
+ uint32 data_crc32;
+ };
+
+ struct EntryOperationData {
+ EntryOperationData(int index_p, int offset_p, int buf_len_p);
+ EntryOperationData(int index_p,
+ int offset_p,
+ int buf_len_p,
+ bool truncate_p);
+
+ int index;
+ int offset;
+ int buf_len;
+ bool truncate;
+ };
+
+ static void OpenEntry(const base::FilePath& path,
+ uint64 entry_hash,
+ bool had_index,
+ SimpleEntryCreationResults* out_results);
+
+ static void CreateEntry(const base::FilePath& path,
+ const std::string& key,
+ uint64 entry_hash,
+ bool had_index,
+ SimpleEntryCreationResults* out_results);
+
+ // Deletes an entry without first Opening it. Does not check if there is
+ // already an Entry object in memory holding the open files. Be careful! This
+ // is meant to be used by the Backend::DoomEntry() call. |callback| will be
+ // run by |callback_runner|.
+ static void DoomEntry(const base::FilePath& path,
+ const std::string& key,
+ uint64 entry_hash,
+ int* out_result);
+
+ // Like |DoomEntry()| above. Deletes all entries corresponding to the
+ // |key_hashes|. Succeeds only when all entries are deleted. Returns a net
+ // error code.
+ static int DoomEntrySet(scoped_ptr<std::vector<uint64> > key_hashes,
+ const base::FilePath& path);
+
+ // N.B. ReadData(), WriteData(), CheckEOFRecord() and Close() may block on IO.
+ void ReadData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* out_buf,
+ uint32* out_crc32,
+ base::Time* out_last_used,
+ int* out_result) const;
+ void WriteData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* in_buf,
+ SimpleEntryStat* out_entry_stat,
+ int* out_result) const;
+ void CheckEOFRecord(int index,
+ int data_size,
+ uint32 expected_crc32,
+ int* out_result) const;
+
+ // Close all streams, and add write EOF records to streams indicated by the
+ // CRCRecord entries in |crc32s_to_write|.
+ void Close(const SimpleEntryStat& entry_stat,
+ scoped_ptr<std::vector<CRCRecord> > crc32s_to_write);
+
+ const base::FilePath& path() const { return path_; }
+ std::string key() const { return key_; }
+
+ private:
+ SimpleSynchronousEntry(
+ const base::FilePath& path,
+ const std::string& key,
+ uint64 entry_hash);
+
+ // Like Entry, the SimpleSynchronousEntry self releases when Close() is
+ // called.
+ ~SimpleSynchronousEntry();
+
+ bool OpenOrCreateFiles(bool create,
+ bool had_index,
+ SimpleEntryStat* out_entry_stat);
+ void CloseFiles();
+
+ // Returns a net error, i.e. net::OK on success. |had_index| is passed
+ // from the main entry for metrics purposes, and is true if the index was
+ // initialized when the open operation began.
+ int InitializeForOpen(bool had_index, SimpleEntryStat* out_entry_stat);
+
+ // Returns a net error, including net::OK on success and net::FILE_EXISTS
+ // when the entry already exists. |had_index| is passed from the main entry
+ // for metrics purposes, and is true if the index was initialized when the
+ // create operation began.
+ int InitializeForCreate(bool had_index, SimpleEntryStat* out_entry_stat);
+
+ void Doom() const;
+
+ static bool DeleteFilesForEntryHash(const base::FilePath& path,
+ uint64 entry_hash);
+
+ const base::FilePath path_;
+ const uint64 entry_hash_;
+ std::string key_;
+
+ bool have_open_files_;
+ bool initialized_;
+
+ base::PlatformFile files_[kSimpleEntryFileCount];
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_SYNCHRONOUS_ENTRY_H_
diff --git a/chromium/net/disk_cache/simple/simple_test_util.cc b/chromium/net/disk_cache/simple/simple_test_util.cc
new file mode 100644
index 00000000000..483cbec1cdd
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_test_util.cc
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_test_util.h"
+
+#include "base/file_util.h"
+#include "net/disk_cache/simple/simple_util.h"
+
+namespace disk_cache {
+
+namespace simple_util {
+
+bool CreateCorruptFileForTests(const std::string& key,
+ const base::FilePath& cache_path) {
+ base::FilePath entry_file_path = cache_path.AppendASCII(
+ disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 0));
+ int flags = base::PLATFORM_FILE_CREATE_ALWAYS | base::PLATFORM_FILE_WRITE;
+ base::PlatformFile entry_file =
+ base::CreatePlatformFile(entry_file_path, flags, NULL, NULL);
+
+ if (base::kInvalidPlatformFileValue == entry_file)
+ return false;
+ if (base::WritePlatformFile(entry_file, 0, "dummy", 1) != 1)
+ return false;
+ if (!base::ClosePlatformFile(entry_file))
+ return false;
+
+ return true;
+}
+
+} // namespace simple_backend
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_test_util.h b/chromium/net/disk_cache/simple/simple_test_util.h
new file mode 100644
index 00000000000..98c140b1f14
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_test_util.h
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_TEST_UTIL_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_TEST_UTIL_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace disk_cache {
+
+namespace simple_util {
+
+// Creates a corrupt file to be used in tests.
+bool CreateCorruptFileForTests(const std::string& key,
+ const base::FilePath& cache_path);
+
+} // namespace simple_backend
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_TEST_UTIL_H_
diff --git a/chromium/net/disk_cache/simple/simple_util.cc b/chromium/net/disk_cache/simple/simple_util.cc
new file mode 100644
index 00000000000..72a4612271f
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_util.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/simple/simple_util.h"
+
+#include <limits>
+
+#include "base/file_util.h"
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/sha1.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "net/disk_cache/simple/simple_entry_format.h"
+
+namespace {
+
+// Size of the uint64 hash_key number in Hex format in a string.
+const size_t kEntryHashKeyAsHexStringSize = 2 * sizeof(uint64);
+
+} // namespace
+
+namespace disk_cache {
+
+namespace simple_util {
+
+std::string ConvertEntryHashKeyToHexString(uint64 hash_key) {
+ const std::string hash_key_str = base::StringPrintf("%016" PRIx64, hash_key);
+ DCHECK_EQ(kEntryHashKeyAsHexStringSize, hash_key_str.size());
+ return hash_key_str;
+}
+
+std::string GetEntryHashKeyAsHexString(const std::string& key) {
+ std::string hash_key_str =
+ ConvertEntryHashKeyToHexString(GetEntryHashKey(key));
+ DCHECK_EQ(kEntryHashKeyAsHexStringSize, hash_key_str.size());
+ return hash_key_str;
+}
+
+bool GetEntryHashKeyFromHexString(const std::string& hash_key,
+ uint64* hash_key_out) {
+ if (hash_key.size() != kEntryHashKeyAsHexStringSize) {
+ return false;
+ }
+ return base::HexStringToUInt64(hash_key, hash_key_out);
+}
+
+uint64 GetEntryHashKey(const std::string& key) {
+ union {
+ unsigned char sha_hash[base::kSHA1Length];
+ uint64 key_hash;
+ } u;
+ base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(key.data()),
+ key.size(), u.sha_hash);
+ return u.key_hash;
+}
+
+std::string GetFilenameFromEntryHashAndIndex(uint64 entry_hash,
+ int index) {
+ return base::StringPrintf("%016" PRIx64 "_%1d", entry_hash, index);
+}
+
+std::string GetFilenameFromKeyAndIndex(const std::string& key, int index) {
+ return GetEntryHashKeyAsHexString(key) + base::StringPrintf("_%1d", index);
+}
+
+int32 GetDataSizeFromKeyAndFileSize(const std::string& key, int64 file_size) {
+ int64 data_size = file_size - key.size() - sizeof(SimpleFileHeader) -
+ sizeof(SimpleFileEOF);
+ DCHECK_GE(implicit_cast<int64>(std::numeric_limits<int32>::max()), data_size);
+ return data_size;
+}
+
+int64 GetFileSizeFromKeyAndDataSize(const std::string& key, int32 data_size) {
+ return data_size + key.size() + sizeof(SimpleFileHeader) +
+ sizeof(SimpleFileEOF);
+}
+
+int64 GetFileOffsetFromKeyAndDataOffset(const std::string& key,
+ int data_offset) {
+ const int64 headers_size = sizeof(disk_cache::SimpleFileHeader) + key.size();
+ return headers_size + data_offset;
+}
+
+// TODO(clamy, gavinp): this should go in base
+bool GetMTime(const base::FilePath& path, base::Time* out_mtime) {
+ DCHECK(out_mtime);
+ base::PlatformFileInfo file_info;
+ if (!file_util::GetFileInfo(path, &file_info))
+ return false;
+ *out_mtime = file_info.last_modified;
+ return true;
+}
+
+} // namespace simple_backend
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/simple/simple_util.h b/chromium/net/disk_cache/simple/simple_util.h
new file mode 100644
index 00000000000..2e92b4a049d
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_util.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_UTIL_H_
+#define NET_DISK_CACHE_SIMPLE_SIMPLE_UTIL_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+
+namespace base {
+class FilePath;
+class Time;
+}
+
+namespace disk_cache {
+
+namespace simple_util {
+
+NET_EXPORT_PRIVATE std::string ConvertEntryHashKeyToHexString(uint64 hash_key);
+
+// |key| is the regular cache key, such as an URL.
+// Returns the Hex ascii representation of the uint64 hash_key.
+NET_EXPORT_PRIVATE std::string GetEntryHashKeyAsHexString(
+ const std::string& key);
+
+// |key| is the regular HTTP Cache key, which is a URL.
+// Returns the hash of the key as uint64.
+NET_EXPORT_PRIVATE uint64 GetEntryHashKey(const std::string& key);
+
+// Parses the |hash_key| string into a uint64 buffer.
+// |hash_key| string must be of the form: FFFFFFFFFFFFFFFF .
+NET_EXPORT_PRIVATE bool GetEntryHashKeyFromHexString(
+ const std::string& hash_key,
+ uint64* hash_key_out);
+
+// Given a |key| for a (potential) entry in the simple backend and the |index|
+// of a stream on that entry, returns the filename in which that stream would be
+// stored.
+NET_EXPORT_PRIVATE std::string GetFilenameFromKeyAndIndex(
+ const std::string& key,
+ int index);
+
+// Same as |GetFilenameFromKeyAndIndex| above, but using a hex string.
+std::string GetFilenameFromEntryHashAndIndex(uint64 entry_hash, int index);
+
+// Given the size of a file holding a stream in the simple backend and the key
+// to an entry, returns the number of bytes in the stream.
+NET_EXPORT_PRIVATE int32 GetDataSizeFromKeyAndFileSize(const std::string& key,
+ int64 file_size);
+
+// Given the size of a stream in the simple backend and the key to an entry,
+// returns the number of bytes in the file.
+NET_EXPORT_PRIVATE int64 GetFileSizeFromKeyAndDataSize(const std::string& key,
+ int32 data_size);
+
+// Given the key to an entry, and an offset into a stream on that entry, returns
+// the file offset corresponding to |data_offset|.
+NET_EXPORT_PRIVATE int64 GetFileOffsetFromKeyAndDataOffset(
+ const std::string& key,
+ int data_offset);
+
+// Fills |out_time| with the time the file last modified time.
+// TODO(gavinp): Remove this function.
+NET_EXPORT_PRIVATE bool GetMTime(const base::FilePath& path,
+ base::Time* out_mtime);
+} // namespace simple_backend
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_UTIL_H_
diff --git a/chromium/net/disk_cache/simple/simple_util_unittest.cc b/chromium/net/disk_cache/simple/simple_util_unittest.cc
new file mode 100644
index 00000000000..35388e9f172
--- /dev/null
+++ b/chromium/net/disk_cache/simple/simple_util_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "net/disk_cache/simple/simple_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using disk_cache::simple_util::ConvertEntryHashKeyToHexString;
+using disk_cache::simple_util::GetEntryHashKeyAsHexString;
+using disk_cache::simple_util::GetEntryHashKeyFromHexString;
+using disk_cache::simple_util::GetEntryHashKey;
+using disk_cache::simple_util::GetFileSizeFromKeyAndDataSize;
+using disk_cache::simple_util::GetDataSizeFromKeyAndFileSize;
+
+class SimpleUtilTest : public testing::Test {};
+
+TEST_F(SimpleUtilTest, ConvertEntryHashKeyToHexString) {
+ EXPECT_EQ("0000000005f5e0ff",
+ ConvertEntryHashKeyToHexString(GG_UINT64_C(99999999)));
+ EXPECT_EQ("7fffffffffffffff",
+ ConvertEntryHashKeyToHexString(GG_UINT64_C(9223372036854775807)));
+ EXPECT_EQ("8000000000000000",
+ ConvertEntryHashKeyToHexString(GG_UINT64_C(9223372036854775808)));
+ EXPECT_EQ("ffffffffffffffff",
+ ConvertEntryHashKeyToHexString(GG_UINT64_C(18446744073709551615)));
+}
+
+TEST_F(SimpleUtilTest, GetEntryHashKey) {
+ EXPECT_EQ("7ac408c1dff9c84b",
+ GetEntryHashKeyAsHexString("http://www.amazon.com/"));
+ EXPECT_EQ(GG_UINT64_C(0x7ac408c1dff9c84b), GetEntryHashKey("http://www.amazon.com/"));
+
+ EXPECT_EQ("9fe947998c2ccf47",
+ GetEntryHashKeyAsHexString("www.amazon.com"));
+ EXPECT_EQ(GG_UINT64_C(0x9fe947998c2ccf47), GetEntryHashKey("www.amazon.com"));
+
+ EXPECT_EQ("0d4b6b5eeea339da", GetEntryHashKeyAsHexString(""));
+ EXPECT_EQ(GG_UINT64_C(0x0d4b6b5eeea339da), GetEntryHashKey(""));
+
+ EXPECT_EQ("a68ac2ecc87dfd04", GetEntryHashKeyAsHexString("http://www.domain.com/uoQ76Kb2QL5hzaVOSAKWeX0W9LfDLqphmRXpsfHN8tgF5lCsfTxlOVWY8vFwzhsRzoNYKhUIOTc5TnUlT0vpdQflPyk2nh7vurXOj60cDnkG3nsrXMhFCsPjhcZAic2jKpF9F9TYRYQwJo81IMi6gY01RK3ZcNl8WGfqcvoZ702UIdetvR7kiaqo1czwSJCMjRFdG6EgMzgXrwE8DYMz4fWqoa1F1c1qwTCBk3yOcmGTbxsPSJK5QRyNea9IFLrBTjfE7ZlN2vZiI7adcDYJef.htm"));
+
+ EXPECT_EQ(GG_UINT64_C(0xa68ac2ecc87dfd04), GetEntryHashKey("http://www.domain.com/uoQ76Kb2QL5hzaVOSAKWeX0W9LfDLqphmRXpsfHN8tgF5lCsfTxlOVWY8vFwzhsRzoNYKhUIOTc5TnUlT0vpdQflPyk2nh7vurXOj60cDnkG3nsrXMhFCsPjhcZAic2jKpF9F9TYRYQwJo81IMi6gY01RK3ZcNl8WGfqcvoZ702UIdetvR7kiaqo1czwSJCMjRFdG6EgMzgXrwE8DYMz4fWqoa1F1c1qwTCBk3yOcmGTbxsPSJK5QRyNea9IFLrBTjfE7ZlN2vZiI7adcDYJef.htm"));
+}
+
+TEST_F(SimpleUtilTest, GetEntryHashKeyFromHexString) {
+ uint64 hash_key = 0;
+ EXPECT_TRUE(GetEntryHashKeyFromHexString("0000000005f5e0ff", &hash_key));
+ EXPECT_EQ(GG_UINT64_C(99999999), hash_key);
+
+ EXPECT_TRUE(GetEntryHashKeyFromHexString("7ffffffffffffffF", &hash_key));
+ EXPECT_EQ(GG_UINT64_C(9223372036854775807), hash_key);
+
+ EXPECT_TRUE(GetEntryHashKeyFromHexString("8000000000000000", &hash_key));
+ EXPECT_EQ(GG_UINT64_C(9223372036854775808), hash_key);
+
+ EXPECT_TRUE(GetEntryHashKeyFromHexString("FFFFFFFFFFFFFFFF", &hash_key));
+ EXPECT_EQ(GG_UINT64_C(18446744073709551615), hash_key);
+
+ // Wrong hash string size.
+ EXPECT_FALSE(GetEntryHashKeyFromHexString("FFFFFFFFFFFFFFF", &hash_key));
+
+ // Wrong hash string size.
+ EXPECT_FALSE(GetEntryHashKeyFromHexString("FFFFFFFFFFFFFFFFF", &hash_key));
+
+ EXPECT_FALSE(GetEntryHashKeyFromHexString("iwr8wglhg8*(&1231((", &hash_key));
+}
+
+TEST_F(SimpleUtilTest, SizesAndOffsets) {
+ const char key[] = "This is an example key";
+ const int data_size = 1000;
+ const int file_size = GetFileSizeFromKeyAndDataSize(key, data_size);
+ EXPECT_EQ(data_size, GetDataSizeFromKeyAndFileSize(key, file_size));
+}
diff --git a/chromium/net/disk_cache/sparse_control.cc b/chromium/net/disk_cache/sparse_control.cc
new file mode 100644
index 00000000000..b96ccc9faff
--- /dev/null
+++ b/chromium/net/disk_cache/sparse_control.cc
@@ -0,0 +1,884 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/sparse_control.h"
+
+#include "base/bind.h"
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/file.h"
+#include "net/disk_cache/net_log_parameters.h"
+
+using base::Time;
+
+namespace {
+
+// Stream of the sparse data index.
+const int kSparseIndex = 2;
+
+// Stream of the sparse data.
+const int kSparseData = 1;
+
+// We can have up to 64k children.
+const int kMaxMapSize = 8 * 1024;
+
+// The maximum number of bytes that a child can store.
+const int kMaxEntrySize = 0x100000;
+
+// The size of each data block (tracked by the child allocation bitmap).
+const int kBlockSize = 1024;
+
+// Returns the name of a child entry given the base_name and signature of the
+// parent and the child_id.
+// If the entry is called entry_name, child entries will be named something
+// like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the
+// number of the particular child.
+std::string GenerateChildName(const std::string& base_name, int64 signature,
+ int64 child_id) {
+ return base::StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(),
+ signature, child_id);
+}
+
+// This class deletes the children of a sparse entry.
+class ChildrenDeleter
+ : public base::RefCounted<ChildrenDeleter>,
+ public disk_cache::FileIOCallback {
+ public:
+ ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name)
+ : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {}
+
+ virtual void OnFileIOComplete(int bytes_copied) OVERRIDE;
+
+ // Two ways of deleting the children: if we have the children map, use Start()
+ // directly, otherwise pass the data address to ReadData().
+ void Start(char* buffer, int len);
+ void ReadData(disk_cache::Addr address, int len);
+
+ private:
+ friend class base::RefCounted<ChildrenDeleter>;
+ virtual ~ChildrenDeleter() {}
+
+ void DeleteChildren();
+
+ base::WeakPtr<disk_cache::BackendImpl> backend_;
+ std::string name_;
+ disk_cache::Bitmap children_map_;
+ int64 signature_;
+ scoped_ptr<char[]> buffer_;
+ DISALLOW_COPY_AND_ASSIGN(ChildrenDeleter);
+};
+
+// This is the callback of the file operation.
+void ChildrenDeleter::OnFileIOComplete(int bytes_copied) {
+ char* buffer = buffer_.release();
+ Start(buffer, bytes_copied);
+}
+
+void ChildrenDeleter::Start(char* buffer, int len) {
+ buffer_.reset(buffer);
+ if (len < static_cast<int>(sizeof(disk_cache::SparseData)))
+ return Release();
+
+ // Just copy the information from |buffer|, delete |buffer| and start deleting
+ // the child entries.
+ disk_cache::SparseData* data =
+ reinterpret_cast<disk_cache::SparseData*>(buffer);
+ signature_ = data->header.signature;
+
+ int num_bits = (len - sizeof(disk_cache::SparseHeader)) * 8;
+ children_map_.Resize(num_bits, false);
+ children_map_.SetMap(data->bitmap, num_bits / 32);
+ buffer_.reset();
+
+ DeleteChildren();
+}
+
+void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
+ DCHECK(address.is_block_file());
+ if (!backend_.get())
+ return Release();
+
+ disk_cache::File* file(backend_->File(address));
+ if (!file)
+ return Release();
+
+ size_t file_offset = address.start_block() * address.BlockSize() +
+ disk_cache::kBlockHeaderSize;
+
+ buffer_.reset(new char[len]);
+ bool completed;
+ if (!file->Read(buffer_.get(), len, file_offset, this, &completed))
+ return Release();
+
+ if (completed)
+ OnFileIOComplete(len);
+
+ // And wait until OnFileIOComplete gets called.
+}
+
+void ChildrenDeleter::DeleteChildren() {
+ int child_id = 0;
+ if (!children_map_.FindNextSetBit(&child_id) || !backend_.get()) {
+ // We are done. Just delete this object.
+ return Release();
+ }
+ std::string child_name = GenerateChildName(name_, signature_, child_id);
+ backend_->SyncDoomEntry(child_name);
+ children_map_.Set(child_id, false);
+
+ // Post a task to delete the next child.
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&ChildrenDeleter::DeleteChildren, this));
+}
+
+// Returns the NetLog event type corresponding to a SparseOperation.
+net::NetLog::EventType GetSparseEventType(
+ disk_cache::SparseControl::SparseOperation operation) {
+ switch (operation) {
+ case disk_cache::SparseControl::kReadOperation:
+ return net::NetLog::TYPE_SPARSE_READ;
+ case disk_cache::SparseControl::kWriteOperation:
+ return net::NetLog::TYPE_SPARSE_WRITE;
+ case disk_cache::SparseControl::kGetRangeOperation:
+ return net::NetLog::TYPE_SPARSE_GET_RANGE;
+ default:
+ NOTREACHED();
+ return net::NetLog::TYPE_CANCELLED;
+ }
+}
+
+// Logs the end event for |operation| on a child entry. Range operations log
+// no events for each child they search through.
+void LogChildOperationEnd(const net::BoundNetLog& net_log,
+ disk_cache::SparseControl::SparseOperation operation,
+ int result) {
+ if (net_log.IsLoggingAllEvents()) {
+ net::NetLog::EventType event_type;
+ switch (operation) {
+ case disk_cache::SparseControl::kReadOperation:
+ event_type = net::NetLog::TYPE_SPARSE_READ_CHILD_DATA;
+ break;
+ case disk_cache::SparseControl::kWriteOperation:
+ event_type = net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA;
+ break;
+ case disk_cache::SparseControl::kGetRangeOperation:
+ return;
+ default:
+ NOTREACHED();
+ return;
+ }
+ net_log.EndEventWithNetErrorCode(event_type, result);
+ }
+}
+
+} // namespace.
+
+namespace disk_cache {
+
+SparseControl::SparseControl(EntryImpl* entry)
+ : entry_(entry),
+ child_(NULL),
+ operation_(kNoOperation),
+ pending_(false),
+ finished_(false),
+ init_(false),
+ range_found_(false),
+ abort_(false),
+ child_map_(child_data_.bitmap, kNumSparseBits, kNumSparseBits / 32),
+ offset_(0),
+ buf_len_(0),
+ child_offset_(0),
+ child_len_(0),
+ result_(0) {
+ memset(&sparse_header_, 0, sizeof(sparse_header_));
+ memset(&child_data_, 0, sizeof(child_data_));
+}
+
+SparseControl::~SparseControl() {
+ if (child_)
+ CloseChild();
+ if (init_)
+ WriteSparseData();
+}
+
+int SparseControl::Init() {
+ DCHECK(!init_);
+
+ // We should not have sparse data for the exposed entry.
+ if (entry_->GetDataSize(kSparseData))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Now see if there is something where we store our data.
+ int rv = net::OK;
+ int data_len = entry_->GetDataSize(kSparseIndex);
+ if (!data_len) {
+ rv = CreateSparseEntry();
+ } else {
+ rv = OpenSparseEntry(data_len);
+ }
+
+ if (rv == net::OK)
+ init_ = true;
+ return rv;
+}
+
+bool SparseControl::CouldBeSparse() const {
+ DCHECK(!init_);
+
+ if (entry_->GetDataSize(kSparseData))
+ return false;
+
+ // We don't verify the data, just see if it could be there.
+ return (entry_->GetDataSize(kSparseIndex) != 0);
+}
+
+int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
+ int buf_len, const CompletionCallback& callback) {
+ DCHECK(init_);
+ // We don't support simultaneous IO for sparse data.
+ if (operation_ != kNoOperation)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ // We only support up to 64 GB.
+ if (offset + buf_len >= 0x1000000000LL || offset + buf_len < 0)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ DCHECK(!user_buf_.get());
+ DCHECK(user_callback_.is_null());
+
+ if (!buf && (op == kReadOperation || op == kWriteOperation))
+ return 0;
+
+ // Copy the operation parameters.
+ operation_ = op;
+ offset_ = offset;
+ user_buf_ = buf ? new net::DrainableIOBuffer(buf, buf_len) : NULL;
+ buf_len_ = buf_len;
+ user_callback_ = callback;
+
+ result_ = 0;
+ pending_ = false;
+ finished_ = false;
+ abort_ = false;
+
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().BeginEvent(
+ GetSparseEventType(operation_),
+ CreateNetLogSparseOperationCallback(offset_, buf_len_));
+ }
+ DoChildrenIO();
+
+ if (!pending_) {
+ // Everything was done synchronously.
+ operation_ = kNoOperation;
+ user_buf_ = NULL;
+ user_callback_.Reset();
+ return result_;
+ }
+
+ return net::ERR_IO_PENDING;
+}
+
+int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) {
+ DCHECK(init_);
+ // We don't support simultaneous IO for sparse data.
+ if (operation_ != kNoOperation)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ DCHECK(start);
+
+ range_found_ = false;
+ int result = StartIO(
+ kGetRangeOperation, offset, NULL, len, CompletionCallback());
+ if (range_found_) {
+ *start = offset_;
+ return result;
+ }
+
+ // This is a failure. We want to return a valid start value in any case.
+ *start = offset;
+ return result < 0 ? result : 0; // Don't mask error codes to the caller.
+}
+
+void SparseControl::CancelIO() {
+ if (operation_ == kNoOperation)
+ return;
+ abort_ = true;
+}
+
+int SparseControl::ReadyToUse(const CompletionCallback& callback) {
+ if (!abort_)
+ return net::OK;
+
+ // We'll grab another reference to keep this object alive because we just have
+ // one extra reference due to the pending IO operation itself, but we'll
+ // release that one before invoking user_callback_.
+ entry_->AddRef(); // Balanced in DoAbortCallbacks.
+ abort_callbacks_.push_back(callback);
+ return net::ERR_IO_PENDING;
+}
+
+// Static
+void SparseControl::DeleteChildren(EntryImpl* entry) {
+ DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
+ int data_len = entry->GetDataSize(kSparseIndex);
+ if (data_len < static_cast<int>(sizeof(SparseData)) ||
+ entry->GetDataSize(kSparseData))
+ return;
+
+ int map_len = data_len - sizeof(SparseHeader);
+ if (map_len > kMaxMapSize || map_len % 4)
+ return;
+
+ char* buffer;
+ Addr address;
+ entry->GetData(kSparseIndex, &buffer, &address);
+ if (!buffer && !address.is_initialized())
+ return;
+
+ entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN);
+
+ DCHECK(entry->backend_.get());
+ ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_.get(),
+ entry->GetKey());
+ // The object will self destruct when finished.
+ deleter->AddRef();
+
+ if (buffer) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&ChildrenDeleter::Start, deleter, buffer, data_len));
+ } else {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&ChildrenDeleter::ReadData, deleter, address, data_len));
+ }
+}
+
+// We are going to start using this entry to store sparse data, so we have to
+// initialize our control info.
+int SparseControl::CreateSparseEntry() {
+ if (CHILD_ENTRY & entry_->GetEntryFlags())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ memset(&sparse_header_, 0, sizeof(sparse_header_));
+ sparse_header_.signature = Time::Now().ToInternalValue();
+ sparse_header_.magic = kIndexMagic;
+ sparse_header_.parent_key_len = entry_->GetKey().size();
+ children_map_.Resize(kNumSparseBits, true);
+
+ // Save the header. The bitmap is saved in the destructor.
+ scoped_refptr<net::IOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
+
+ int rv = entry_->WriteData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_),
+ CompletionCallback(), false);
+ if (rv != sizeof(sparse_header_)) {
+ DLOG(ERROR) << "Unable to save sparse_header_";
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+ }
+
+ entry_->SetEntryFlags(PARENT_ENTRY);
+ return net::OK;
+}
+
+// We are opening an entry from disk. Make sure that our control data is there.
+int SparseControl::OpenSparseEntry(int data_len) {
+ if (data_len < static_cast<int>(sizeof(SparseData)))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (entry_->GetDataSize(kSparseData))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (!(PARENT_ENTRY & entry_->GetEntryFlags()))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB.
+ int map_len = data_len - sizeof(sparse_header_);
+ if (map_len > kMaxMapSize || map_len % 4)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ scoped_refptr<net::IOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
+
+ // Read header.
+ int rv = entry_->ReadData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_),
+ CompletionCallback());
+ if (rv != static_cast<int>(sizeof(sparse_header_)))
+ return net::ERR_CACHE_READ_FAILURE;
+
+ // The real validation should be performed by the caller. This is just to
+ // double check.
+ if (sparse_header_.magic != kIndexMagic ||
+ sparse_header_.parent_key_len !=
+ static_cast<int>(entry_->GetKey().size()))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Read the actual bitmap.
+ buf = new net::IOBuffer(map_len);
+ rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf.get(),
+ map_len, CompletionCallback());
+ if (rv != map_len)
+ return net::ERR_CACHE_READ_FAILURE;
+
+ // Grow the bitmap to the current size and copy the bits.
+ children_map_.Resize(map_len * 8, false);
+ children_map_.SetMap(reinterpret_cast<uint32*>(buf->data()), map_len);
+ return net::OK;
+}
+
+bool SparseControl::OpenChild() {
+ DCHECK_GE(result_, 0);
+
+ std::string key = GenerateChildKey();
+ if (child_) {
+ // Keep using the same child or open another one?.
+ if (key == child_->GetKey())
+ return true;
+ CloseChild();
+ }
+
+ // See if we are tracking this child.
+ if (!ChildPresent())
+ return ContinueWithoutChild(key);
+
+ if (!entry_->backend_.get())
+ return false;
+
+ child_ = entry_->backend_->OpenEntryImpl(key);
+ if (!child_)
+ return ContinueWithoutChild(key);
+
+ EntryImpl* child = static_cast<EntryImpl*>(child_);
+ if (!(CHILD_ENTRY & child->GetEntryFlags()) ||
+ child->GetDataSize(kSparseIndex) <
+ static_cast<int>(sizeof(child_data_)))
+ return KillChildAndContinue(key, false);
+
+ scoped_refptr<net::WrappedIOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
+
+ // Read signature.
+ int rv = child_->ReadData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
+ CompletionCallback());
+ if (rv != sizeof(child_data_))
+ return KillChildAndContinue(key, true); // This is a fatal failure.
+
+ if (child_data_.header.signature != sparse_header_.signature ||
+ child_data_.header.magic != kIndexMagic)
+ return KillChildAndContinue(key, false);
+
+ if (child_data_.header.last_block_len < 0 ||
+ child_data_.header.last_block_len > kBlockSize) {
+ // Make sure these values are always within range.
+ child_data_.header.last_block_len = 0;
+ child_data_.header.last_block = -1;
+ }
+
+ return true;
+}
+
+void SparseControl::CloseChild() {
+ scoped_refptr<net::WrappedIOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
+
+ // Save the allocation bitmap before closing the child entry.
+ int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
+ CompletionCallback(), false);
+ if (rv != sizeof(child_data_))
+ DLOG(ERROR) << "Failed to save child data";
+ child_->Release();
+ child_ = NULL;
+}
+
+std::string SparseControl::GenerateChildKey() {
+ return GenerateChildName(entry_->GetKey(), sparse_header_.signature,
+ offset_ >> 20);
+}
+
+// We are deleting the child because something went wrong.
+bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) {
+ SetChildBit(false);
+ child_->DoomImpl();
+ child_->Release();
+ child_ = NULL;
+ if (fatal) {
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ return false;
+ }
+ return ContinueWithoutChild(key);
+}
+
+// We were not able to open this child; see what we can do.
+bool SparseControl::ContinueWithoutChild(const std::string& key) {
+ if (kReadOperation == operation_)
+ return false;
+ if (kGetRangeOperation == operation_)
+ return true;
+
+ if (!entry_->backend_.get())
+ return false;
+
+ child_ = entry_->backend_->CreateEntryImpl(key);
+ if (!child_) {
+ child_ = NULL;
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ return false;
+ }
+ // Write signature.
+ InitChildData();
+ return true;
+}
+
+bool SparseControl::ChildPresent() {
+ int child_bit = static_cast<int>(offset_ >> 20);
+ if (children_map_.Size() <= child_bit)
+ return false;
+
+ return children_map_.Get(child_bit);
+}
+
+void SparseControl::SetChildBit(bool value) {
+ int child_bit = static_cast<int>(offset_ >> 20);
+
+ // We may have to increase the bitmap of child entries.
+ if (children_map_.Size() <= child_bit)
+ children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true);
+
+ children_map_.Set(child_bit, value);
+}
+
+void SparseControl::WriteSparseData() {
+ scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer(
+ reinterpret_cast<const char*>(children_map_.GetMap())));
+
+ int len = children_map_.ArraySize() * 4;
+ int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf.get(),
+ len, CompletionCallback(), false);
+ if (rv != len) {
+ DLOG(ERROR) << "Unable to save sparse map";
+ }
+}
+
+bool SparseControl::VerifyRange() {
+ DCHECK_GE(result_, 0);
+
+ child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1);
+ child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_);
+
+ // We can write to (or get info from) anywhere in this child.
+ if (operation_ != kReadOperation)
+ return true;
+
+ // Check that there are no holes in this range.
+ int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ int start = child_offset_ >> 10;
+ if (child_map_.FindNextBit(&start, last_bit, false)) {
+ // Something is not here.
+ DCHECK_GE(child_data_.header.last_block_len, 0);
+ DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+ int partial_block_len = PartialBlockLength(start);
+ if (start == child_offset_ >> 10) {
+ // It looks like we don't have anything.
+ if (partial_block_len <= (child_offset_ & (kBlockSize - 1)))
+ return false;
+ }
+
+ // We have the first part.
+ child_len_ = (start << 10) - child_offset_;
+ if (partial_block_len) {
+ // We may have a few extra bytes.
+ child_len_ = std::min(child_len_ + partial_block_len, buf_len_);
+ }
+ // There is no need to read more after this one.
+ buf_len_ = child_len_;
+ }
+ return true;
+}
+
+void SparseControl::UpdateRange(int result) {
+ if (result <= 0 || operation_ != kWriteOperation)
+ return;
+
+ DCHECK_GE(child_data_.header.last_block_len, 0);
+ DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+
+ // Write the bitmap.
+ int first_bit = child_offset_ >> 10;
+ int block_offset = child_offset_ & (kBlockSize - 1);
+ if (block_offset && (child_data_.header.last_block != first_bit ||
+ child_data_.header.last_block_len < block_offset)) {
+ // The first block is not completely filled; ignore it.
+ first_bit++;
+ }
+
+ int last_bit = (child_offset_ + result) >> 10;
+ block_offset = (child_offset_ + result) & (kBlockSize - 1);
+
+ // This condition will hit with the following criteria:
+ // 1. The first byte doesn't follow the last write.
+ // 2. The first byte is in the middle of a block.
+ // 3. The first byte and the last byte are in the same block.
+ if (first_bit > last_bit)
+ return;
+
+ if (block_offset && !child_map_.Get(last_bit)) {
+ // The last block is not completely filled; save it for later.
+ child_data_.header.last_block = last_bit;
+ child_data_.header.last_block_len = block_offset;
+ } else {
+ child_data_.header.last_block = -1;
+ }
+
+ child_map_.SetRange(first_bit, last_bit, true);
+}
+
+int SparseControl::PartialBlockLength(int block_index) const {
+ if (block_index == child_data_.header.last_block)
+ return child_data_.header.last_block_len;
+
+ // This may be the last stored index.
+ int entry_len = child_->GetDataSize(kSparseData);
+ if (block_index == entry_len >> 10)
+ return entry_len & (kBlockSize - 1);
+
+ // This is really empty.
+ return 0;
+}
+
+void SparseControl::InitChildData() {
+ // We know the real type of child_.
+ EntryImpl* child = static_cast<EntryImpl*>(child_);
+ child->SetEntryFlags(CHILD_ENTRY);
+
+ memset(&child_data_, 0, sizeof(child_data_));
+ child_data_.header = sparse_header_;
+
+ scoped_refptr<net::WrappedIOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
+
+ int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
+ CompletionCallback(), false);
+ if (rv != sizeof(child_data_))
+ DLOG(ERROR) << "Failed to save child data";
+ SetChildBit(true);
+}
+
+void SparseControl::DoChildrenIO() {
+ while (DoChildIO()) continue;
+
+ // Range operations are finished synchronously, often without setting
+ // |finished_| to true.
+ if (kGetRangeOperation == operation_ &&
+ entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().EndEvent(
+ net::NetLog::TYPE_SPARSE_GET_RANGE,
+ CreateNetLogGetAvailableRangeResultCallback(offset_, result_));
+ }
+ if (finished_) {
+ if (kGetRangeOperation != operation_ &&
+ entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().EndEvent(GetSparseEventType(operation_));
+ }
+ if (pending_)
+ DoUserCallback(); // Don't touch this object after this point.
+ }
+}
+
+bool SparseControl::DoChildIO() {
+ finished_ = true;
+ if (!buf_len_ || result_ < 0)
+ return false;
+
+ if (!OpenChild())
+ return false;
+
+ if (!VerifyRange())
+ return false;
+
+ // We have more work to do. Let's not trigger a callback to the caller.
+ finished_ = false;
+ CompletionCallback callback;
+ if (!user_callback_.is_null()) {
+ callback =
+ base::Bind(&SparseControl::OnChildIOCompleted, base::Unretained(this));
+ }
+
+ int rv = 0;
+ switch (operation_) {
+ case kReadOperation:
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().BeginEvent(
+ net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child_->net_log().source(),
+ child_len_));
+ }
+ rv = child_->ReadDataImpl(kSparseData, child_offset_, user_buf_.get(),
+ child_len_, callback);
+ break;
+ case kWriteOperation:
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child_->net_log().source(),
+ child_len_));
+ }
+ rv = child_->WriteDataImpl(kSparseData, child_offset_, user_buf_.get(),
+ child_len_, callback, false);
+ break;
+ case kGetRangeOperation:
+ rv = DoGetAvailableRange();
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ if (rv == net::ERR_IO_PENDING) {
+ if (!pending_) {
+ pending_ = true;
+ // The child will protect himself against closing the entry while IO is in
+ // progress. However, this entry can still be closed, and that would not
+ // be a good thing for us, so we increase the refcount until we're
+ // finished doing sparse stuff.
+ entry_->AddRef(); // Balanced in DoUserCallback.
+ }
+ return false;
+ }
+ if (!rv)
+ return false;
+
+ DoChildIOCompleted(rv);
+ return true;
+}
+
+int SparseControl::DoGetAvailableRange() {
+ if (!child_)
+ return child_len_; // Move on to the next child.
+
+ // Check that there are no holes in this range.
+ int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ int start = child_offset_ >> 10;
+ int partial_start_bytes = PartialBlockLength(start);
+ int found = start;
+ int bits_found = child_map_.FindBits(&found, last_bit, true);
+
+ // We don't care if there is a partial block in the middle of the range.
+ int block_offset = child_offset_ & (kBlockSize - 1);
+ if (!bits_found && partial_start_bytes <= block_offset)
+ return child_len_;
+
+ // We are done. Just break the loop and reset result_ to our real result.
+ range_found_ = true;
+
+ // found now points to the first 1. Lets see if we have zeros before it.
+ int empty_start = std::max((found << 10) - child_offset_, 0);
+
+ int bytes_found = bits_found << 10;
+ bytes_found += PartialBlockLength(found + bits_found);
+
+ if (start == found)
+ bytes_found -= block_offset;
+
+ // If the user is searching past the end of this child, bits_found is the
+ // right result; otherwise, we have some empty space at the start of this
+ // query that we have to subtract from the range that we searched.
+ result_ = std::min(bytes_found, child_len_ - empty_start);
+
+ if (!bits_found) {
+ result_ = std::min(partial_start_bytes - block_offset, child_len_);
+ empty_start = 0;
+ }
+
+ // Only update offset_ when this query found zeros at the start.
+ if (empty_start)
+ offset_ += empty_start;
+
+ // This will actually break the loop.
+ buf_len_ = 0;
+ return 0;
+}
+
+void SparseControl::DoChildIOCompleted(int result) {
+ LogChildOperationEnd(entry_->net_log(), operation_, result);
+ if (result < 0) {
+ // We fail the whole operation if we encounter an error.
+ result_ = result;
+ return;
+ }
+
+ UpdateRange(result);
+
+ result_ += result;
+ offset_ += result;
+ buf_len_ -= result;
+
+ // We'll be reusing the user provided buffer for the next chunk.
+ if (buf_len_ && user_buf_.get())
+ user_buf_->DidConsume(result);
+}
+
+void SparseControl::OnChildIOCompleted(int result) {
+ DCHECK_NE(net::ERR_IO_PENDING, result);
+ DoChildIOCompleted(result);
+
+ if (abort_) {
+ // We'll return the current result of the operation, which may be less than
+ // the bytes to read or write, but the user cancelled the operation.
+ abort_ = false;
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED);
+ entry_->net_log().EndEvent(GetSparseEventType(operation_));
+ }
+ // We have an indirect reference to this object for every callback so if
+ // there is only one callback, we may delete this object before reaching
+ // DoAbortCallbacks.
+ bool has_abort_callbacks = !abort_callbacks_.empty();
+ DoUserCallback();
+ if (has_abort_callbacks)
+ DoAbortCallbacks();
+ return;
+ }
+
+ // We are running a callback from the message loop. It's time to restart what
+ // we were doing before.
+ DoChildrenIO();
+}
+
+void SparseControl::DoUserCallback() {
+ DCHECK(!user_callback_.is_null());
+ CompletionCallback cb = user_callback_;
+ user_callback_.Reset();
+ user_buf_ = NULL;
+ pending_ = false;
+ operation_ = kNoOperation;
+ int rv = result_;
+ entry_->Release(); // Don't touch object after this line.
+ cb.Run(rv);
+}
+
+void SparseControl::DoAbortCallbacks() {
+ for (size_t i = 0; i < abort_callbacks_.size(); i++) {
+ // Releasing all references to entry_ may result in the destruction of this
+ // object so we should not be touching it after the last Release().
+ CompletionCallback cb = abort_callbacks_[i];
+ if (i == abort_callbacks_.size() - 1)
+ abort_callbacks_.clear();
+
+ entry_->Release(); // Don't touch object after this line.
+ cb.Run(net::OK);
+ }
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/sparse_control.h b/chromium/net/disk_cache/sparse_control.h
new file mode 100644
index 00000000000..a018e18742e
--- /dev/null
+++ b/chromium/net/disk_cache/sparse_control.h
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SPARSE_CONTROL_H_
+#define NET_DISK_CACHE_SPARSE_CONTROL_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/base/completion_callback.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/disk_format.h"
+
+namespace net {
+class IOBuffer;
+class DrainableIOBuffer;
+}
+
+namespace disk_cache {
+
+class Entry;
+class EntryImpl;
+
+// This class provides support for the sparse capabilities of the disk cache.
+// Basically, sparse IO is directed from EntryImpl to this class, and we split
+// the operation into multiple small pieces, sending each one to the
+// appropriate entry. An instance of this class is asociated with each entry
+// used directly for sparse operations (the entry passed in to the constructor).
+class SparseControl {
+ public:
+ typedef net::CompletionCallback CompletionCallback;
+
+ // The operation to perform.
+ enum SparseOperation {
+ kNoOperation,
+ kReadOperation,
+ kWriteOperation,
+ kGetRangeOperation
+ };
+
+ explicit SparseControl(EntryImpl* entry);
+ ~SparseControl();
+
+ // Initializes the object for the current entry. If this entry already stores
+ // sparse data, or can be used to do it, it updates the relevant information
+ // on disk and returns net::OK. Otherwise it returns a net error code.
+ int Init();
+
+ // Performs a quick test to see if the entry is sparse or not, without
+ // generating disk IO (so the answer provided is only a best effort).
+ bool CouldBeSparse() const;
+
+ // Performs an actual sparse read or write operation for this entry. |op| is
+ // the operation to perform, |offset| is the desired sparse offset, |buf| and
+ // |buf_len| specify the actual data to use and |callback| is the callback
+ // to use for asynchronous operations. See the description of the Read /
+ // WriteSparseData for details about the arguments. The return value is the
+ // number of bytes read or written, or a net error code.
+ int StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
+ int buf_len, const CompletionCallback& callback);
+
+ // Implements Entry::GetAvailableRange().
+ int GetAvailableRange(int64 offset, int len, int64* start);
+
+ // Cancels the current sparse operation (if any).
+ void CancelIO();
+
+ // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are
+ // busy. If the entry is busy, we'll invoke the callback when we are ready
+ // again. See disk_cache::Entry::ReadyToUse() for more info.
+ int ReadyToUse(const CompletionCallback& completion_callback);
+
+ // Deletes the children entries of |entry|.
+ static void DeleteChildren(EntryImpl* entry);
+
+ private:
+ // Creates a new sparse entry or opens an aready created entry from disk.
+ // These methods just read / write the required info from disk for the current
+ // entry, and verify that everything is correct. The return value is a net
+ // error code.
+ int CreateSparseEntry();
+ int OpenSparseEntry(int data_len);
+
+ // Opens and closes a child entry. A child entry is a regular EntryImpl object
+ // with a key derived from the key of the resource to store and the range
+ // stored by that child.
+ bool OpenChild();
+ void CloseChild();
+ std::string GenerateChildKey();
+
+ // Deletes the current child and continues the current operation (open).
+ bool KillChildAndContinue(const std::string& key, bool fatal);
+
+ // Continues the current operation (open) without a current child.
+ bool ContinueWithoutChild(const std::string& key);
+
+ // Returns true if the required child is tracked by the parent entry, i.e. it
+ // was already created.
+ bool ChildPresent();
+
+ // Sets the bit for the current child to the provided |value|. In other words,
+ // starts or stops tracking this child.
+ void SetChildBit(bool value);
+
+ // Writes to disk the tracking information for this entry.
+ void WriteSparseData();
+
+ // Verify that the range to be accessed for the current child is appropriate.
+ // Returns false if an error is detected or there is no need to perform the
+ // current IO operation (for instance if the required range is not stored by
+ // the child).
+ bool VerifyRange();
+
+ // Updates the contents bitmap for the current range, based on the result of
+ // the current operation.
+ void UpdateRange(int result);
+
+ // Returns the number of bytes stored at |block_index|, if its allocation-bit
+ // is off (because it is not completely filled).
+ int PartialBlockLength(int block_index) const;
+
+ // Initializes the sparse info for the current child.
+ void InitChildData();
+
+ // Iterates through all the children needed to complete the current operation.
+ void DoChildrenIO();
+
+ // Performs a single operation with the current child. Returns true when we
+ // should move on to the next child and false when we should interrupt our
+ // work.
+ bool DoChildIO();
+
+ // Performs the required work for GetAvailableRange for one child.
+ int DoGetAvailableRange();
+
+ // Performs the required work after a single IO operations finishes.
+ void DoChildIOCompleted(int result);
+
+ // Invoked by the callback of asynchronous operations.
+ void OnChildIOCompleted(int result);
+
+ // Reports to the user that we are done.
+ void DoUserCallback();
+ void DoAbortCallbacks();
+
+ EntryImpl* entry_; // The sparse entry.
+ EntryImpl* child_; // The current child entry.
+ SparseOperation operation_;
+ bool pending_; // True if any child IO operation returned pending.
+ bool finished_;
+ bool init_;
+ bool range_found_; // True if GetAvailableRange found something.
+ bool abort_; // True if we should abort the current operation ASAP.
+
+ SparseHeader sparse_header_; // Data about the children of entry_.
+ Bitmap children_map_; // The actual bitmap of children.
+ SparseData child_data_; // Parent and allocation map of child_.
+ Bitmap child_map_; // The allocation map as a bitmap.
+
+ CompletionCallback user_callback_;
+ std::vector<CompletionCallback> abort_callbacks_;
+ int64 offset_; // Current sparse offset.
+ scoped_refptr<net::DrainableIOBuffer> user_buf_;
+ int buf_len_; // Bytes to read or write.
+ int child_offset_; // Offset to use for the current child.
+ int child_len_; // Bytes to read or write for this child.
+ int result_;
+
+ DISALLOW_COPY_AND_ASSIGN(SparseControl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SPARSE_CONTROL_H_
diff --git a/chromium/net/disk_cache/stats.cc b/chromium/net/disk_cache/stats.cc
new file mode 100644
index 00000000000..33d9d1c534a
--- /dev/null
+++ b/chromium/net/disk_cache/stats.cc
@@ -0,0 +1,309 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/stats.h"
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace {
+
+const int32 kDiskSignature = 0xF01427E0;
+
+struct OnDiskStats {
+ int32 signature;
+ int size;
+ int data_sizes[disk_cache::Stats::kDataSizesLength];
+ int64 counters[disk_cache::Stats::MAX_COUNTER];
+};
+COMPILE_ASSERT(sizeof(OnDiskStats) < 512, needs_more_than_2_blocks);
+
+// Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
+int LogBase2(int32 number) {
+ unsigned int value = static_cast<unsigned int>(number);
+ const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
+ const unsigned int s[] = {1, 2, 4, 8, 16};
+
+ unsigned int result = 0;
+ for (int i = 4; i >= 0; i--) {
+ if (value & mask[i]) {
+ value >>= s[i];
+ result |= s[i];
+ }
+ }
+ return static_cast<int>(result);
+}
+
+// WARNING: Add new stats only at the end, or change LoadStats().
+static const char* kCounterNames[] = {
+ "Open miss",
+ "Open hit",
+ "Create miss",
+ "Create hit",
+ "Resurrect hit",
+ "Create error",
+ "Trim entry",
+ "Doom entry",
+ "Doom cache",
+ "Invalid entry",
+ "Open entries",
+ "Max entries",
+ "Timer",
+ "Read data",
+ "Write data",
+ "Open rankings",
+ "Get rankings",
+ "Fatal error",
+ "Last report",
+ "Last report timer",
+ "Doom recent entries",
+ "unused"
+};
+COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
+ update_the_names);
+
+} // namespace
+
+namespace disk_cache {
+
+bool VerifyStats(OnDiskStats* stats) {
+ if (stats->signature != kDiskSignature)
+ return false;
+
+ // We don't want to discard the whole cache every time we have one extra
+ // counter; we keep old data if we can.
+ if (static_cast<unsigned int>(stats->size) > sizeof(*stats)) {
+ memset(stats, 0, sizeof(*stats));
+ stats->signature = kDiskSignature;
+ } else if (static_cast<unsigned int>(stats->size) != sizeof(*stats)) {
+ size_t delta = sizeof(*stats) - static_cast<unsigned int>(stats->size);
+ memset(reinterpret_cast<char*>(stats) + stats->size, 0, delta);
+ stats->size = sizeof(*stats);
+ }
+
+ return true;
+}
+
+Stats::Stats() : size_histogram_(NULL) {
+}
+
+Stats::~Stats() {
+}
+
+bool Stats::Init(void* data, int num_bytes, Addr address) {
+ OnDiskStats local_stats;
+ OnDiskStats* stats = &local_stats;
+ if (!num_bytes) {
+ memset(stats, 0, sizeof(local_stats));
+ local_stats.signature = kDiskSignature;
+ local_stats.size = sizeof(local_stats);
+ } else if (num_bytes >= static_cast<int>(sizeof(*stats))) {
+ stats = reinterpret_cast<OnDiskStats*>(data);
+ if (!VerifyStats(stats))
+ return false;
+ } else {
+ return false;
+ }
+
+ storage_addr_ = address;
+
+ memcpy(data_sizes_, stats->data_sizes, sizeof(data_sizes_));
+ memcpy(counters_, stats->counters, sizeof(counters_));
+
+ // Clean up old value.
+ SetCounter(UNUSED, 0);
+ return true;
+}
+
+void Stats::InitSizeHistogram() {
+ // It seems impossible to support this histogram for more than one
+ // simultaneous objects with the current infrastructure.
+ static bool first_time = true;
+ if (first_time) {
+ first_time = false;
+ if (!size_histogram_) {
+ // Stats may be reused when the cache is re-created, but we want only one
+ // histogram at any given time.
+ size_histogram_ = StatsHistogram::FactoryGet("DiskCache.SizeStats", this);
+ }
+ }
+}
+
+int Stats::StorageSize() {
+ // If we have more than 512 bytes of counters, change kDiskSignature so we
+ // don't overwrite something else (LoadStats must fail).
+ COMPILE_ASSERT(sizeof(OnDiskStats) <= 256 * 2, use_more_blocks);
+ return 256 * 2;
+}
+
+void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
+ // We keep a counter of the data block size on an array where each entry is
+ // the adjusted log base 2 of the size. The first entry counts blocks of 256
+ // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
+ // one stores entries of more than 64 MB
+ int new_index = GetStatsBucket(new_size);
+ int old_index = GetStatsBucket(old_size);
+
+ if (new_size)
+ data_sizes_[new_index]++;
+
+ if (old_size)
+ data_sizes_[old_index]--;
+}
+
+void Stats::OnEvent(Counters an_event) {
+ DCHECK(an_event >= MIN_COUNTER && an_event < MAX_COUNTER);
+ counters_[an_event]++;
+}
+
+void Stats::SetCounter(Counters counter, int64 value) {
+ DCHECK(counter >= MIN_COUNTER && counter < MAX_COUNTER);
+ counters_[counter] = value;
+}
+
+int64 Stats::GetCounter(Counters counter) const {
+ DCHECK(counter >= MIN_COUNTER && counter < MAX_COUNTER);
+ return counters_[counter];
+}
+
+void Stats::GetItems(StatsItems* items) {
+ std::pair<std::string, std::string> item;
+ for (int i = 0; i < kDataSizesLength; i++) {
+ item.first = base::StringPrintf("Size%02d", i);
+ item.second = base::StringPrintf("0x%08x", data_sizes_[i]);
+ items->push_back(item);
+ }
+
+ for (int i = MIN_COUNTER; i < MAX_COUNTER; i++) {
+ item.first = kCounterNames[i];
+ item.second = base::StringPrintf("0x%" PRIx64, counters_[i]);
+ items->push_back(item);
+ }
+}
+
+int Stats::GetHitRatio() const {
+ return GetRatio(OPEN_HIT, OPEN_MISS);
+}
+
+int Stats::GetResurrectRatio() const {
+ return GetRatio(RESURRECT_HIT, CREATE_HIT);
+}
+
+void Stats::ResetRatios() {
+ SetCounter(OPEN_HIT, 0);
+ SetCounter(OPEN_MISS, 0);
+ SetCounter(RESURRECT_HIT, 0);
+ SetCounter(CREATE_HIT, 0);
+}
+
+int Stats::GetLargeEntriesSize() {
+ int total = 0;
+ // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
+ // GetStatsBucket()).
+ for (int bucket = 20; bucket < kDataSizesLength; bucket++)
+ total += data_sizes_[bucket] * GetBucketRange(bucket);
+
+ return total;
+}
+
+int Stats::SerializeStats(void* data, int num_bytes, Addr* address) {
+ OnDiskStats* stats = reinterpret_cast<OnDiskStats*>(data);
+ if (num_bytes < static_cast<int>(sizeof(*stats)))
+ return 0;
+
+ stats->signature = kDiskSignature;
+ stats->size = sizeof(*stats);
+ memcpy(stats->data_sizes, data_sizes_, sizeof(data_sizes_));
+ memcpy(stats->counters, counters_, sizeof(counters_));
+
+ *address = storage_addr_;
+ return sizeof(*stats);
+}
+
+int Stats::GetBucketRange(size_t i) const {
+ if (i < 2)
+ return static_cast<int>(1024 * i);
+
+ if (i < 12)
+ return static_cast<int>(2048 * (i - 1));
+
+ if (i < 17)
+ return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
+
+ int n = 64 * 1024;
+ if (i > static_cast<size_t>(kDataSizesLength)) {
+ NOTREACHED();
+ i = kDataSizesLength;
+ }
+
+ i -= 17;
+ n <<= i;
+ return n;
+}
+
+void Stats::Snapshot(base::HistogramSamples* samples) const {
+ for (int i = 0; i < kDataSizesLength; i++) {
+ int count = data_sizes_[i];
+ if (count < 0)
+ count = 0;
+ samples->Accumulate(GetBucketRange(i), count);
+ }
+}
+
+// The array will be filled this way:
+// index size
+// 0 [0, 1024)
+// 1 [1024, 2048)
+// 2 [2048, 4096)
+// 3 [4K, 6K)
+// ...
+// 10 [18K, 20K)
+// 11 [20K, 24K)
+// 12 [24k, 28K)
+// ...
+// 15 [36k, 40K)
+// 16 [40k, 64K)
+// 17 [64K, 128K)
+// 18 [128K, 256K)
+// ...
+// 23 [4M, 8M)
+// 24 [8M, 16M)
+// 25 [16M, 32M)
+// 26 [32M, 64M)
+// 27 [64M, ...)
+int Stats::GetStatsBucket(int32 size) {
+ if (size < 1024)
+ return 0;
+
+ // 10 slots more, until 20K.
+ if (size < 20 * 1024)
+ return size / 2048 + 1;
+
+ // 5 slots more, from 20K to 40K.
+ if (size < 40 * 1024)
+ return (size - 20 * 1024) / 4096 + 11;
+
+ // From this point on, use a logarithmic scale.
+ int result = LogBase2(size) + 1;
+
+ COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
+ if (result >= kDataSizesLength)
+ result = kDataSizesLength - 1;
+
+ return result;
+}
+
+int Stats::GetRatio(Counters hit, Counters miss) const {
+ int64 ratio = GetCounter(hit) * 100;
+ if (!ratio)
+ return 0;
+
+ ratio /= (GetCounter(hit) + GetCounter(miss));
+ return static_cast<int>(ratio);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/stats.h b/chromium/net/disk_cache/stats.h
new file mode 100644
index 00000000000..440334af207
--- /dev/null
+++ b/chromium/net/disk_cache/stats.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STATS_H_
+#define NET_DISK_CACHE_STATS_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/stats_histogram.h"
+
+namespace base {
+class HistogramSamples;
+} // namespace base
+
+namespace disk_cache {
+
+typedef std::vector<std::pair<std::string, std::string> > StatsItems;
+
+// This class stores cache-specific usage information, for tunning purposes.
+class Stats {
+ public:
+ static const int kDataSizesLength = 28;
+ enum Counters {
+ MIN_COUNTER = 0,
+ OPEN_MISS = MIN_COUNTER,
+ OPEN_HIT,
+ CREATE_MISS,
+ CREATE_HIT,
+ RESURRECT_HIT,
+ CREATE_ERROR,
+ TRIM_ENTRY,
+ DOOM_ENTRY,
+ DOOM_CACHE,
+ INVALID_ENTRY,
+ OPEN_ENTRIES, // Average number of open entries.
+ MAX_ENTRIES, // Maximum number of open entries.
+ TIMER,
+ READ_DATA,
+ WRITE_DATA,
+ OPEN_RANKINGS, // An entry has to be read just to modify rankings.
+ GET_RANKINGS, // We got the ranking info without reading the whole entry.
+ FATAL_ERROR,
+ LAST_REPORT, // Time of the last time we sent a report.
+ LAST_REPORT_TIMER, // Timer count of the last time we sent a report.
+ DOOM_RECENT, // The cache was partially cleared.
+ UNUSED, // Was: ga.js was evicted from the cache.
+ MAX_COUNTER
+ };
+
+ Stats();
+ ~Stats();
+
+ // Initializes this object with |data| from disk.
+ bool Init(void* data, int num_bytes, Addr address);
+
+ // Generates a size distribution histogram.
+ void InitSizeHistogram();
+
+ // Returns the number of bytes needed to store the stats on disk.
+ int StorageSize();
+
+ // Tracks changes to the stoage space used by an entry.
+ void ModifyStorageStats(int32 old_size, int32 new_size);
+
+ // Tracks general events.
+ void OnEvent(Counters an_event);
+ void SetCounter(Counters counter, int64 value);
+ int64 GetCounter(Counters counter) const;
+
+ void GetItems(StatsItems* items);
+ int GetHitRatio() const;
+ int GetResurrectRatio() const;
+ void ResetRatios();
+
+ // Returns the lower bound of the space used by entries bigger than 512 KB.
+ int GetLargeEntriesSize();
+
+ // Writes the stats into |data|, to be stored at the given cache address.
+ // Returns the number of bytes copied.
+ int SerializeStats(void* data, int num_bytes, Addr* address);
+
+ // Support for StatsHistograms. Together, these methods allow StatsHistograms
+ // to take a snapshot of the data_sizes_ as the histogram data.
+ int GetBucketRange(size_t i) const;
+ void Snapshot(base::HistogramSamples* samples) const;
+
+ private:
+ int GetStatsBucket(int32 size);
+ int GetRatio(Counters hit, Counters miss) const;
+
+ Addr storage_addr_;
+ int data_sizes_[kDataSizesLength];
+ int64 counters_[MAX_COUNTER];
+ StatsHistogram* size_histogram_;
+
+ DISALLOW_COPY_AND_ASSIGN(Stats);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STATS_H_
diff --git a/chromium/net/disk_cache/stats_histogram.cc b/chromium/net/disk_cache/stats_histogram.cc
new file mode 100644
index 00000000000..33adfeaae49
--- /dev/null
+++ b/chromium/net/disk_cache/stats_histogram.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/stats_histogram.h"
+
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "net/disk_cache/stats.h"
+
+namespace disk_cache {
+
+using base::BucketRanges;
+using base::Histogram;
+using base::HistogramSamples;
+using base::SampleVector;
+using base::StatisticsRecorder;
+
+StatsHistogram::StatsHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ const Stats* stats)
+ : Histogram(name, minimum, maximum, ranges),
+ stats_(stats) {}
+
+StatsHistogram::~StatsHistogram() {}
+
+// static
+void StatsHistogram::InitializeBucketRanges(const Stats* stats,
+ BucketRanges* ranges) {
+ for (size_t i = 0; i < ranges->size(); ++i) {
+ ranges->set_range(i, stats->GetBucketRange(i));
+ }
+ ranges->ResetChecksum();
+}
+
+StatsHistogram* StatsHistogram::FactoryGet(const std::string& name,
+ const Stats* stats) {
+ Sample minimum = 1;
+ Sample maximum = disk_cache::Stats::kDataSizesLength - 1;
+ size_t bucket_count = disk_cache::Stats::kDataSizesLength;
+ HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+ if (!histogram) {
+ DCHECK(stats);
+
+ // To avoid racy destruction at shutdown, the following will be leaked.
+ BucketRanges* ranges = new BucketRanges(bucket_count + 1);
+ InitializeBucketRanges(stats, ranges);
+ const BucketRanges* registered_ranges =
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
+
+ // To avoid racy destruction at shutdown, the following will be leaked.
+ StatsHistogram* stats_histogram =
+ new StatsHistogram(name, minimum, maximum, registered_ranges, stats);
+ stats_histogram->SetFlags(kUmaTargetedHistogramFlag);
+ histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(stats_histogram);
+ }
+
+ DCHECK(base::HISTOGRAM == histogram->GetHistogramType());
+ DCHECK(histogram->HasConstructionArguments(minimum, maximum, bucket_count));
+
+ // We're preparing for an otherwise unsafe upcast by ensuring we have the
+ // proper class type.
+ StatsHistogram* return_histogram = static_cast<StatsHistogram*>(histogram);
+ return return_histogram;
+}
+
+scoped_ptr<HistogramSamples> StatsHistogram::SnapshotSamples() const {
+ scoped_ptr<SampleVector> samples(new SampleVector(bucket_ranges()));
+ stats_->Snapshot(samples.get());
+
+ // Only report UMA data once.
+ StatsHistogram* mutable_me = const_cast<StatsHistogram*>(this);
+ mutable_me->ClearFlags(kUmaTargetedHistogramFlag);
+
+ return samples.PassAs<HistogramSamples>();
+}
+
+int StatsHistogram::FindCorruption(const HistogramSamples& samples) const {
+ // This class won't monitor inconsistencies.
+ return HistogramBase::NO_INCONSISTENCIES;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/stats_histogram.h b/chromium/net/disk_cache/stats_histogram.h
new file mode 100644
index 00000000000..279a1c3c71c
--- /dev/null
+++ b/chromium/net/disk_cache/stats_histogram.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STATS_HISTOGRAM_H_
+#define NET_DISK_CACHE_STATS_HISTOGRAM_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
+
+namespace base {
+class BucketRanges;
+class HistogramSamples;
+class SampleVector;
+} // namespace base
+
+namespace disk_cache {
+
+class Stats;
+
+// This class provides support for sending the disk cache size stats as a UMA
+// histogram. We'll provide our own storage and management for the data, and a
+// SampleVector with a copy of our data.
+//
+// Class derivation of Histogram "deprecated," and should not be copied, and
+// may eventually go away.
+//
+class StatsHistogram : public base::Histogram {
+ public:
+ StatsHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const base::BucketRanges* ranges,
+ const Stats* stats);
+ virtual ~StatsHistogram();
+
+ static void InitializeBucketRanges(const Stats* stats,
+ base::BucketRanges* ranges);
+ static StatsHistogram* FactoryGet(const std::string& name,
+ const Stats* stats);
+
+ virtual scoped_ptr<base::HistogramSamples> SnapshotSamples() const OVERRIDE;
+ virtual int FindCorruption(
+ const base::HistogramSamples& samples) const OVERRIDE;
+
+ private:
+ const Stats* stats_;
+ DISALLOW_COPY_AND_ASSIGN(StatsHistogram);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STATS_HISTOGRAM_H_
diff --git a/chromium/net/disk_cache/storage_block-inl.h b/chromium/net/disk_cache/storage_block-inl.h
new file mode 100644
index 00000000000..098cd74afa7
--- /dev/null
+++ b/chromium/net/disk_cache/storage_block-inl.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STORAGE_BLOCK_INL_H_
+#define NET_DISK_CACHE_STORAGE_BLOCK_INL_H_
+
+#include "net/disk_cache/storage_block.h"
+
+#include "base/hash.h"
+#include "base/logging.h"
+#include "net/disk_cache/trace.h"
+
+namespace disk_cache {
+
+template<typename T> StorageBlock<T>::StorageBlock(MappedFile* file,
+ Addr address)
+ : data_(NULL), file_(file), address_(address), modified_(false),
+ own_data_(false), extended_(false) {
+ if (address.num_blocks() > 1)
+ extended_ = true;
+ DCHECK(!address.is_initialized() || sizeof(*data_) == address.BlockSize());
+}
+
+template<typename T> StorageBlock<T>::~StorageBlock() {
+ if (modified_)
+ Store();
+ DeleteData();
+}
+
+template<typename T> void* StorageBlock<T>::buffer() const {
+ return data_;
+}
+
+template<typename T> size_t StorageBlock<T>::size() const {
+ if (!extended_)
+ return sizeof(*data_);
+ return address_.num_blocks() * sizeof(*data_);
+}
+
+template<typename T> int StorageBlock<T>::offset() const {
+ return address_.start_block() * address_.BlockSize();
+}
+
+template<typename T> bool StorageBlock<T>::LazyInit(MappedFile* file,
+ Addr address) {
+ if (file_ || address_.is_initialized()) {
+ NOTREACHED();
+ return false;
+ }
+ file_ = file;
+ address_.set_value(address.value());
+ if (address.num_blocks() > 1)
+ extended_ = true;
+
+ DCHECK(sizeof(*data_) == address.BlockSize());
+ return true;
+}
+
+template<typename T> void StorageBlock<T>::SetData(T* other) {
+ DCHECK(!modified_);
+ DeleteData();
+ data_ = other;
+}
+
+template<typename T> void StorageBlock<T>::Discard() {
+ if (!data_)
+ return;
+ if (!own_data_) {
+ NOTREACHED();
+ return;
+ }
+ DeleteData();
+ data_ = NULL;
+ modified_ = false;
+ extended_ = false;
+}
+
+template<typename T> void StorageBlock<T>::StopSharingData() {
+ if (!data_ || own_data_)
+ return;
+ DCHECK(!modified_);
+ data_ = NULL;
+}
+
+template<typename T> void StorageBlock<T>::set_modified() {
+ DCHECK(data_);
+ modified_ = true;
+}
+
+template<typename T> void StorageBlock<T>::clear_modified() {
+ modified_ = false;
+}
+
+template<typename T> T* StorageBlock<T>::Data() {
+ if (!data_)
+ AllocateData();
+ return data_;
+}
+
+template<typename T> bool StorageBlock<T>::HasData() const {
+ return (NULL != data_);
+}
+
+template<typename T> bool StorageBlock<T>::VerifyHash() const {
+ uint32 hash = CalculateHash();
+ return (!data_->self_hash || data_->self_hash == hash);
+}
+
+template<typename T> bool StorageBlock<T>::own_data() const {
+ return own_data_;
+}
+
+template<typename T> const Addr StorageBlock<T>::address() const {
+ return address_;
+}
+
+template<typename T> bool StorageBlock<T>::Load() {
+ if (file_) {
+ if (!data_)
+ AllocateData();
+
+ if (file_->Load(this)) {
+ modified_ = false;
+ return true;
+ }
+ }
+ LOG(WARNING) << "Failed data load.";
+ Trace("Failed data load.");
+ return false;
+}
+
+template<typename T> bool StorageBlock<T>::Store() {
+ if (file_ && data_) {
+ data_->self_hash = CalculateHash();
+ if (file_->Store(this)) {
+ modified_ = false;
+ return true;
+ }
+ }
+ LOG(ERROR) << "Failed data store.";
+ Trace("Failed data store.");
+ return false;
+}
+
+template<typename T> void StorageBlock<T>::AllocateData() {
+ DCHECK(!data_);
+ if (!extended_) {
+ data_ = new T;
+ } else {
+ void* buffer = new char[address_.num_blocks() * sizeof(*data_)];
+ data_ = new(buffer) T;
+ }
+ own_data_ = true;
+}
+
+template<typename T> void StorageBlock<T>::DeleteData() {
+ if (own_data_) {
+ if (!extended_) {
+ delete data_;
+ } else {
+ data_->~T();
+ delete[] reinterpret_cast<char*>(data_);
+ }
+ own_data_ = false;
+ }
+}
+
+template<typename T> uint32 StorageBlock<T>::CalculateHash() const {
+ return base::Hash(reinterpret_cast<char*>(data_), offsetof(T, self_hash));
+}
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STORAGE_BLOCK_INL_H_
diff --git a/chromium/net/disk_cache/storage_block.h b/chromium/net/disk_cache/storage_block.h
new file mode 100644
index 00000000000..65c67fc4b44
--- /dev/null
+++ b/chromium/net/disk_cache/storage_block.h
@@ -0,0 +1,95 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_STORAGE_BLOCK_H_
+#define NET_DISK_CACHE_STORAGE_BLOCK_H_
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+
+namespace disk_cache {
+
+// This class encapsulates common behavior of a single "block" of data that is
+// stored on a block-file. It implements the FileBlock interface, so it can be
+// serialized directly to the backing file.
+// This object provides a memory buffer for the related data, and it can be used
+// to actually share that memory with another instance of the class.
+//
+// The following example shows how to share storage with another object:
+// StorageBlock<TypeA> a(file, address);
+// StorageBlock<TypeB> b(file, address);
+// a.Load();
+// DoSomething(a.Data());
+// b.SetData(a.Data());
+// ModifySomething(b.Data());
+// // Data modified on the previous call will be saved by b's destructor.
+// b.set_modified();
+template<typename T>
+class StorageBlock : public FileBlock {
+ public:
+ StorageBlock(MappedFile* file, Addr address);
+ virtual ~StorageBlock();
+
+ // FileBlock interface.
+ virtual void* buffer() const;
+ virtual size_t size() const;
+ virtual int offset() const;
+
+ // Allows the overide of dummy values passed on the constructor.
+ bool LazyInit(MappedFile* file, Addr address);
+
+ // Sets the internal storage to share the memory provided by other instance.
+ void SetData(T* other);
+
+ // Deletes the data, even if it was modified and not saved. This object must
+ // own the memory buffer (it cannot be shared).
+ void Discard();
+
+ // Stops sharing the data with another object.
+ void StopSharingData();
+
+ // Sets the object to lazily save the in-memory data on destruction.
+ void set_modified();
+
+ // Forgets that the data was modified, so it's not lazily saved.
+ void clear_modified();
+
+ // Gets a pointer to the internal storage (allocates storage if needed).
+ T* Data();
+
+ // Returns true if there is data associated with this object.
+ bool HasData() const;
+
+ // Returns true if the internal hash is correct.
+ bool VerifyHash() const;
+
+ // Returns true if this object owns the data buffer, false if it is shared.
+ bool own_data() const;
+
+ const Addr address() const;
+
+ // Loads and store the data.
+ bool Load();
+ bool Store();
+
+ private:
+ void AllocateData();
+ void DeleteData();
+ uint32 CalculateHash() const;
+
+ T* data_;
+ MappedFile* file_;
+ Addr address_;
+ bool modified_;
+ bool own_data_; // Is data_ owned by this object or shared with someone else.
+ bool extended_; // Used to store an entry of more than one block.
+
+ DISALLOW_COPY_AND_ASSIGN(StorageBlock);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STORAGE_BLOCK_H_
diff --git a/chromium/net/disk_cache/storage_block_unittest.cc b/chromium/net/disk_cache/storage_block_unittest.cc
new file mode 100644
index 00000000000..f91b2b92ff2
--- /dev/null
+++ b/chromium/net/disk_cache/storage_block_unittest.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+typedef disk_cache::StorageBlock<disk_cache::EntryStore> CacheEntryBlock;
+
+TEST_F(DiskCacheTest, StorageBlock_LoadStore) {
+ base::FilePath filename = cache_path_.AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ CacheEntryBlock entry1(file.get(), disk_cache::Addr(0xa0010001));
+ memset(entry1.Data(), 0, sizeof(disk_cache::EntryStore));
+ entry1.Data()->hash = 0xaa5555aa;
+ entry1.Data()->rankings_node = 0xa0010002;
+
+ EXPECT_TRUE(entry1.Store());
+ entry1.Data()->hash = 0x88118811;
+ entry1.Data()->rankings_node = 0xa0040009;
+
+ EXPECT_TRUE(entry1.Load());
+ EXPECT_EQ(0xaa5555aa, entry1.Data()->hash);
+ EXPECT_EQ(0xa0010002, entry1.Data()->rankings_node);
+}
+
+TEST_F(DiskCacheTest, StorageBlock_SetData) {
+ base::FilePath filename = cache_path_.AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ CacheEntryBlock entry1(file.get(), disk_cache::Addr(0xa0010001));
+ entry1.Data()->hash = 0xaa5555aa;
+
+ CacheEntryBlock entry2(file.get(), disk_cache::Addr(0xa0010002));
+ EXPECT_TRUE(entry2.Load());
+ EXPECT_TRUE(entry2.Data() != NULL);
+ EXPECT_TRUE(0 == entry2.Data()->hash);
+
+ EXPECT_TRUE(entry2.Data() != entry1.Data());
+ entry2.SetData(entry1.Data());
+ EXPECT_EQ(0xaa5555aa, entry2.Data()->hash);
+ EXPECT_TRUE(entry2.Data() == entry1.Data());
+}
+
+TEST_F(DiskCacheTest, StorageBlock_SetModified) {
+ base::FilePath filename = cache_path_.AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ CacheEntryBlock* entry1 =
+ new CacheEntryBlock(file.get(), disk_cache::Addr(0xa0010003));
+ EXPECT_TRUE(entry1->Load());
+ EXPECT_TRUE(0 == entry1->Data()->hash);
+ entry1->Data()->hash = 0x45687912;
+ entry1->set_modified();
+ delete entry1;
+
+ CacheEntryBlock entry2(file.get(), disk_cache::Addr(0xa0010003));
+ EXPECT_TRUE(entry2.Load());
+ EXPECT_TRUE(0x45687912 == entry2.Data()->hash);
+}
diff --git a/chromium/net/disk_cache/stress_cache.cc b/chromium/net/disk_cache/stress_cache.cc
new file mode 100644
index 00000000000..9c3c5a6333e
--- /dev/null
+++ b/chromium/net/disk_cache/stress_cache.cc
@@ -0,0 +1,294 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a simple application that stress-tests the crash recovery of the disk
+// cache. The main application starts a copy of itself on a loop, checking the
+// exit code of the child process. When the child dies in an unexpected way,
+// the main application quits.
+
+// The child application has two threads: one to exercise the cache in an
+// infinite loop, and another one to asynchronously kill the process.
+
+// A regular build should never crash.
+// To test that the disk cache doesn't generate critical errors with regular
+// application level crashes, edit stress_support.h.
+
+#include <string>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/debugger.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/process/kill.h"
+#include "base/process/launch.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/stress_support.h"
+#include "net/disk_cache/trace.h"
+
+#if defined(OS_WIN)
+#include "base/logging_win.h"
+#endif
+
+using base::Time;
+
+const int kError = -1;
+const int kExpectedCrash = 100;
+
+// Starts a new process.
+int RunSlave(int iteration) {
+ base::FilePath exe;
+ PathService::Get(base::FILE_EXE, &exe);
+
+ CommandLine cmdline(exe);
+ cmdline.AppendArg(base::IntToString(iteration));
+
+ base::ProcessHandle handle;
+ if (!base::LaunchProcess(cmdline, base::LaunchOptions(), &handle)) {
+ printf("Unable to run test\n");
+ return kError;
+ }
+
+ int exit_code;
+ if (!base::WaitForExitCode(handle, &exit_code)) {
+ printf("Unable to get return code\n");
+ return kError;
+ }
+ return exit_code;
+}
+
+// Main loop for the master process.
+int MasterCode() {
+ for (int i = 0; i < 100000; i++) {
+ int ret = RunSlave(i);
+ if (kExpectedCrash != ret)
+ return ret;
+ }
+
+ printf("More than enough...\n");
+
+ return 0;
+}
+
+// -----------------------------------------------------------------------
+
+std::string GenerateStressKey() {
+ char key[20 * 1024];
+ size_t size = 50 + rand() % 20000;
+ CacheTestFillBuffer(key, size, true);
+
+ key[size - 1] = '\0';
+ return std::string(key);
+}
+
+// This thread will loop forever, adding and removing entries from the cache.
+// iteration is the current crash cycle, so the entries on the cache are marked
+// to know which instance of the application wrote them.
+void StressTheCache(int iteration) {
+ int cache_size = 0x2000000; // 32MB.
+ uint32 mask = 0xfff; // 4096 entries.
+
+ base::FilePath path;
+ PathService::Get(base::DIR_TEMP, &path);
+ path = path.AppendASCII("cache_test_stress");
+
+ base::Thread cache_thread("CacheThread");
+ if (!cache_thread.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)))
+ return;
+
+ disk_cache::BackendImpl* cache =
+ new disk_cache::BackendImpl(path, mask,
+ cache_thread.message_loop_proxy().get(),
+ NULL);
+ cache->SetMaxSize(cache_size);
+ cache->SetFlags(disk_cache::kNoLoadProtection);
+
+ net::TestCompletionCallback cb;
+ int rv = cache->Init(cb.callback());
+
+ if (cb.GetResult(rv) != net::OK) {
+ printf("Unable to initialize cache.\n");
+ return;
+ }
+ printf("Iteration %d, initial entries: %d\n", iteration,
+ cache->GetEntryCount());
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ // kNumKeys is meant to be enough to have about 3x or 4x iterations before
+ // the process crashes.
+#ifdef NDEBUG
+ const int kNumKeys = 4000;
+#else
+ const int kNumKeys = 1200;
+#endif
+ const int kNumEntries = 30;
+ std::string keys[kNumKeys];
+ disk_cache::Entry* entries[kNumEntries] = {0};
+
+ for (int i = 0; i < kNumKeys; i++) {
+ keys[i] = GenerateStressKey();
+ }
+
+ const int kSize = 20000;
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ memset(buffer->data(), 'k', kSize);
+
+ for (int i = 0;; i++) {
+ int slot = rand() % kNumEntries;
+ int key = rand() % kNumKeys;
+ bool truncate = (rand() % 2 == 0);
+ int size = kSize - (rand() % 20) * kSize / 20;
+
+ if (entries[slot])
+ entries[slot]->Close();
+
+ net::TestCompletionCallback cb;
+ rv = cache->OpenEntry(keys[key], &entries[slot], cb.callback());
+ if (cb.GetResult(rv) != net::OK) {
+ rv = cache->CreateEntry(keys[key], &entries[slot], cb.callback());
+ CHECK_EQ(net::OK, cb.GetResult(rv));
+ }
+
+ base::snprintf(buffer->data(), kSize,
+ "i: %d iter: %d, size: %d, truncate: %d ", i, iteration,
+ size, truncate ? 1 : 0);
+ rv = entries[slot]->WriteData(0, 0, buffer.get(), size, cb.callback(),
+ truncate);
+ CHECK_EQ(size, cb.GetResult(rv));
+
+ if (rand() % 100 > 80) {
+ key = rand() % kNumKeys;
+ net::TestCompletionCallback cb2;
+ rv = cache->DoomEntry(keys[key], cb2.callback());
+ cb2.GetResult(rv);
+ }
+
+ if (!(i % 100))
+ printf("Entries: %d \r", i);
+ }
+}
+
+// We want to prevent the timer thread from killing the process while we are
+// waiting for the debugger to attach.
+bool g_crashing = false;
+
+// RunSoon() and CrashCallback() reference each other, unfortunately.
+void RunSoon(base::MessageLoop* target_loop);
+
+void CrashCallback() {
+ // Keep trying to run.
+ RunSoon(base::MessageLoop::current());
+
+ if (g_crashing)
+ return;
+
+ if (rand() % 100 > 30) {
+ printf("sweet death...\n");
+#if defined(OS_WIN)
+ // Windows does more work on _exit() that we would like, so we use Kill.
+ base::KillProcessById(base::GetCurrentProcId(), kExpectedCrash, false);
+#elif defined(OS_POSIX)
+ // On POSIX, _exit() will terminate the process with minimal cleanup,
+ // and it is cleaner than killing.
+ _exit(kExpectedCrash);
+#endif
+ }
+}
+
+void RunSoon(base::MessageLoop* target_loop) {
+ const base::TimeDelta kTaskDelay = base::TimeDelta::FromSeconds(10);
+ target_loop->PostDelayedTask(
+ FROM_HERE, base::Bind(&CrashCallback), kTaskDelay);
+}
+
+// We leak everything here :)
+bool StartCrashThread() {
+ base::Thread* thread = new base::Thread("party_crasher");
+ if (!thread->Start())
+ return false;
+
+ RunSoon(thread->message_loop());
+ return true;
+}
+
+void CrashHandler(const std::string& str) {
+ g_crashing = true;
+ base::debug::BreakDebugger();
+}
+
+bool MessageHandler(int severity, const char* file, int line,
+ size_t message_start, const std::string& str) {
+ const size_t kMaxMessageLen = 48;
+ char message[kMaxMessageLen];
+ size_t len = std::min(str.length() - message_start, kMaxMessageLen - 1);
+
+ memcpy(message, str.c_str() + message_start, len);
+ message[len] = '\0';
+#if !defined(DISK_CACHE_TRACE_TO_LOG)
+ disk_cache::Trace("%s", message);
+#endif
+ return false;
+}
+
+// -----------------------------------------------------------------------
+
+#if defined(OS_WIN)
+// {B9A153D4-31C3-48e4-9ABF-D54383F14A0D}
+const GUID kStressCacheTraceProviderName = {
+ 0xb9a153d4, 0x31c3, 0x48e4,
+ { 0x9a, 0xbf, 0xd5, 0x43, 0x83, 0xf1, 0x4a, 0xd } };
+#endif
+
+int main(int argc, const char* argv[]) {
+ // Setup an AtExitManager so Singleton objects will be destructed.
+ base::AtExitManager at_exit_manager;
+
+ if (argc < 2)
+ return MasterCode();
+
+ logging::SetLogAssertHandler(CrashHandler);
+ logging::SetLogMessageHandler(MessageHandler);
+
+#if defined(OS_WIN)
+ logging::LogEventProvider::Initialize(kStressCacheTraceProviderName);
+#else
+ CommandLine::Init(argc, argv);
+ logging::LoggingSettings settings;
+ settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+ logging::InitLogging(settings);
+#endif
+
+ // Some time for the memory manager to flush stuff.
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(3));
+ base::MessageLoop message_loop(base::MessageLoop::TYPE_IO);
+
+ char* end;
+ long int iteration = strtol(argv[1], &end, 0);
+
+ if (!StartCrashThread()) {
+ printf("failed to start thread\n");
+ return kError;
+ }
+
+ StressTheCache(iteration);
+ return 0;
+}
diff --git a/chromium/net/disk_cache/stress_support.h b/chromium/net/disk_cache/stress_support.h
new file mode 100644
index 00000000000..48d7b5b7df2
--- /dev/null
+++ b/chromium/net/disk_cache/stress_support.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STRESS_SUPPORT_H_
+#define NET_DISK_CACHE_STRESS_SUPPORT_H_
+
+#include "base/logging.h"
+
+namespace disk_cache {
+
+// Uncomment this line to generate a debug build of stress_cache with checks
+// to ensure that we are not producing corrupt entries.
+// #define NET_BUILD_STRESS_CACHE 1
+
+// Uncomment this line to direct the in-memory disk cache tracing to the base
+// logging system. On Windows this option will enable ETW (Event Tracing for
+// Windows) so logs across multiple runs can be collected.
+// #define DISK_CACHE_TRACE_TO_LOG 1
+
+// Uncomment this line to perform extended integrity checks during init. It is
+// not recommended to enable this option unless some corruption is being tracked
+// down.
+// #define STRESS_CACHE_EXTENDED_VALIDATION 1
+
+#if defined(NET_BUILD_STRESS_CACHE)
+#define STRESS_NOTREACHED() NOTREACHED()
+#define STRESS_DCHECK(a) DCHECK(a)
+#else
+// We don't support streams with these macros, but that's a small price to pay
+// to have a straightforward logic here. Please don't add something like
+// LogMessageVoidify.
+#define STRESS_NOTREACHED() {}
+#define STRESS_DCHECK(a) {}
+#endif
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STRESS_SUPPORT_H_
diff --git a/chromium/net/disk_cache/trace.cc b/chromium/net/disk_cache/trace.cc
new file mode 100644
index 00000000000..56ebe9bb7ea
--- /dev/null
+++ b/chromium/net/disk_cache/trace.cc
@@ -0,0 +1,192 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/trace.h"
+
+#include <stdio.h>
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "net/disk_cache/stress_support.h"
+
+// Change this value to 1 to enable tracing on a release build. By default,
+// tracing is enabled only on debug builds.
+#define ENABLE_TRACING 0
+
+#ifndef NDEBUG
+#undef ENABLE_TRACING
+#define ENABLE_TRACING 1
+#endif
+
+namespace {
+
+const int kEntrySize = 12 * sizeof(size_t);
+#if defined(NET_BUILD_STRESS_CACHE)
+const int kNumberOfEntries = 500000;
+#else
+const int kNumberOfEntries = 5000; // 240 KB on 32bit, 480 KB on 64bit
+#endif
+
+bool s_trace_enabled = false;
+base::LazyInstance<base::Lock>::Leaky s_lock = LAZY_INSTANCE_INITIALIZER;
+
+struct TraceBuffer {
+ int num_traces;
+ int current;
+ char buffer[kNumberOfEntries][kEntrySize];
+};
+
+#if ENABLE_TRACING
+void DebugOutput(const char* msg) {
+#if defined(OS_WIN)
+ OutputDebugStringA(msg);
+#else
+ NOTIMPLEMENTED();
+#endif
+}
+#endif // ENABLE_TRACING
+
+} // namespace
+
+namespace disk_cache {
+
+// s_trace_buffer and s_trace_object are not singletons because I want the
+// buffer to be destroyed and re-created when the last user goes away, and it
+// must be straightforward to access the buffer from the debugger.
+static TraceObject* s_trace_object = NULL;
+
+// Static.
+TraceObject* TraceObject::GetTraceObject() {
+ base::AutoLock lock(s_lock.Get());
+
+ if (s_trace_object)
+ return s_trace_object;
+
+ s_trace_object = new TraceObject();
+ return s_trace_object;
+}
+
+TraceObject::TraceObject() {
+ InitTrace();
+}
+
+TraceObject::~TraceObject() {
+ DestroyTrace();
+}
+
+void TraceObject::EnableTracing(bool enable) {
+ base::AutoLock lock(s_lock.Get());
+ s_trace_enabled = enable;
+}
+
+#if ENABLE_TRACING
+
+static TraceBuffer* s_trace_buffer = NULL;
+
+void InitTrace(void) {
+ s_trace_enabled = true;
+ if (s_trace_buffer)
+ return;
+
+ s_trace_buffer = new TraceBuffer;
+ memset(s_trace_buffer, 0, sizeof(*s_trace_buffer));
+}
+
+void DestroyTrace(void) {
+ base::AutoLock lock(s_lock.Get());
+
+ delete s_trace_buffer;
+ s_trace_buffer = NULL;
+ s_trace_object = NULL;
+}
+
+void Trace(const char* format, ...) {
+ if (!s_trace_buffer || !s_trace_enabled)
+ return;
+
+ va_list ap;
+ va_start(ap, format);
+ char line[kEntrySize + 2];
+
+#if defined(OS_WIN)
+ vsprintf_s(line, format, ap);
+#else
+ vsnprintf(line, kEntrySize, format, ap);
+#endif
+
+#if defined(DISK_CACHE_TRACE_TO_LOG)
+ line[kEntrySize] = '\0';
+ LOG(INFO) << line;
+#endif
+
+ va_end(ap);
+
+ {
+ base::AutoLock lock(s_lock.Get());
+ if (!s_trace_buffer || !s_trace_enabled)
+ return;
+
+ memcpy(s_trace_buffer->buffer[s_trace_buffer->current], line, kEntrySize);
+
+ s_trace_buffer->num_traces++;
+ s_trace_buffer->current++;
+ if (s_trace_buffer->current == kNumberOfEntries)
+ s_trace_buffer->current = 0;
+ }
+}
+
+// Writes the last num_traces to the debugger output.
+void DumpTrace(int num_traces) {
+ DCHECK(s_trace_buffer);
+ DebugOutput("Last traces:\n");
+
+ if (num_traces > kNumberOfEntries || num_traces < 0)
+ num_traces = kNumberOfEntries;
+
+ if (s_trace_buffer->num_traces) {
+ char line[kEntrySize + 2];
+
+ int current = s_trace_buffer->current - num_traces;
+ if (current < 0)
+ current += kNumberOfEntries;
+
+ for (int i = 0; i < num_traces; i++) {
+ memcpy(line, s_trace_buffer->buffer[current], kEntrySize);
+ line[kEntrySize] = '\0';
+ size_t length = strlen(line);
+ if (length) {
+ line[length] = '\n';
+ line[length + 1] = '\0';
+ DebugOutput(line);
+ }
+
+ current++;
+ if (current == kNumberOfEntries)
+ current = 0;
+ }
+ }
+
+ DebugOutput("End of Traces\n");
+}
+
+#else // ENABLE_TRACING
+
+void InitTrace(void) {
+ return;
+}
+
+void DestroyTrace(void) {
+ s_trace_object = NULL;
+}
+
+void Trace(const char* format, ...) {
+}
+
+#endif // ENABLE_TRACING
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/trace.h b/chromium/net/disk_cache/trace.h
new file mode 100644
index 00000000000..b0bf1ad7284
--- /dev/null
+++ b/chromium/net/disk_cache/trace.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides support for basic in-memory tracing of short events. We
+// keep a static circular buffer where we store the last traced events, so we
+// can review the cache recent behavior should we need it.
+
+#ifndef NET_DISK_CACHE_TRACE_H__
+#define NET_DISK_CACHE_TRACE_H__
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+
+// Create and destroy the tracing buffer.
+void InitTrace(void);
+void DestroyTrace(void);
+
+// Simple class to handle the trace buffer lifetime. Any object interested in
+// tracing should keep a reference to the object returned by GetTraceObject().
+class TraceObject : public base::RefCounted<TraceObject> {
+ friend class base::RefCounted<TraceObject>;
+ public:
+ static TraceObject* GetTraceObject();
+ void EnableTracing(bool enable);
+
+ private:
+ TraceObject();
+ ~TraceObject();
+ DISALLOW_COPY_AND_ASSIGN(TraceObject);
+};
+
+// Traces to the internal buffer.
+NET_EXPORT_PRIVATE void Trace(const char* format, ...);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_TRACE_H__
diff --git a/chromium/net/disk_cache/tracing_cache_backend.cc b/chromium/net/disk_cache/tracing_cache_backend.cc
new file mode 100644
index 00000000000..4966133f00d
--- /dev/null
+++ b/chromium/net/disk_cache/tracing_cache_backend.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/tracing_cache_backend.h"
+
+#include "net/base/net_errors.h"
+
+namespace disk_cache {
+
+// Proxies entry objects created by the real underlying backend. Backend users
+// will only see the proxy entries. It is necessary for recording the backend
+// operations since often non-trivial work is invoked directly on entries.
+class EntryProxy : public Entry, public base::RefCountedThreadSafe<EntryProxy> {
+ public:
+ EntryProxy(Entry *entry, TracingCacheBackend* backend);
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE;
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ friend class base::RefCountedThreadSafe<EntryProxy>;
+ typedef TracingCacheBackend::Operation Operation;
+ virtual ~EntryProxy();
+
+ struct RwOpExtra {
+ int index;
+ int offset;
+ int buf_len;
+ bool truncate;
+ };
+
+ void RecordEvent(base::TimeTicks start_time, Operation op, RwOpExtra extra,
+ int result_to_record);
+ void EntryOpComplete(base::TimeTicks start_time, Operation op,
+ RwOpExtra extra, const CompletionCallback& cb,
+ int result);
+ Entry* entry_;
+ base::WeakPtr<TracingCacheBackend> backend_;
+
+ DISALLOW_COPY_AND_ASSIGN(EntryProxy);
+};
+
+EntryProxy::EntryProxy(Entry *entry, TracingCacheBackend* backend)
+ : entry_(entry),
+ backend_(backend->AsWeakPtr()) {
+}
+
+void EntryProxy::Doom() {
+ // TODO(pasko): Record the event.
+ entry_->Doom();
+}
+
+void EntryProxy::Close() {
+ // TODO(pasko): Record the event.
+ entry_->Close();
+ Release();
+}
+
+std::string EntryProxy::GetKey() const {
+ return entry_->GetKey();
+}
+
+base::Time EntryProxy::GetLastUsed() const {
+ return entry_->GetLastUsed();
+}
+
+base::Time EntryProxy::GetLastModified() const {
+ return entry_->GetLastModified();
+}
+
+int32 EntryProxy::GetDataSize(int index) const {
+ return entry_->GetDataSize(index);
+}
+
+int EntryProxy::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ RwOpExtra extra;
+ extra.index = index;
+ extra.offset = offset;
+ extra.buf_len = buf_len;
+ extra.truncate = false;
+ int rv = entry_->ReadData(
+ index, offset, buf, buf_len,
+ base::Bind(&EntryProxy::EntryOpComplete, this, start_time,
+ TracingCacheBackend::OP_READ, extra, callback));
+ if (rv != net::ERR_IO_PENDING) {
+ RecordEvent(start_time, TracingCacheBackend::OP_READ, extra, rv);
+ }
+ return rv;
+}
+
+int EntryProxy::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ RwOpExtra extra;
+ extra.index = index;
+ extra.offset = offset;
+ extra.buf_len = buf_len;
+ extra.truncate = truncate;
+ int rv = entry_->WriteData(index, offset, buf, buf_len,
+ base::Bind(&EntryProxy::EntryOpComplete, this, start_time,
+ TracingCacheBackend::OP_WRITE, extra, callback),
+ truncate);
+ if (rv != net::ERR_IO_PENDING) {
+ RecordEvent(start_time, TracingCacheBackend::OP_WRITE, extra, rv);
+ }
+ return rv;
+}
+
+int EntryProxy::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ // TODO(pasko): Record the event.
+ return entry_->ReadSparseData(offset, buf, buf_len, callback);
+}
+
+int EntryProxy::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ // TODO(pasko): Record the event.
+ return entry_->WriteSparseData(offset, buf, buf_len, callback);
+}
+
+int EntryProxy::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ return entry_->GetAvailableRange(offset, len, start, callback);
+}
+
+bool EntryProxy::CouldBeSparse() const {
+ return entry_->CouldBeSparse();
+}
+
+void EntryProxy::CancelSparseIO() {
+ return entry_->CancelSparseIO();
+}
+
+int EntryProxy::ReadyForSparseIO(const CompletionCallback& callback) {
+ return entry_->ReadyForSparseIO(callback);
+}
+
+void EntryProxy::RecordEvent(base::TimeTicks start_time, Operation op,
+ RwOpExtra extra, int result_to_record) {
+ // TODO(pasko): Implement.
+}
+
+void EntryProxy::EntryOpComplete(base::TimeTicks start_time, Operation op,
+ RwOpExtra extra, const CompletionCallback& cb,
+ int result) {
+ RecordEvent(start_time, op, extra, result);
+ if (!cb.is_null()) {
+ cb.Run(result);
+ }
+}
+
+EntryProxy::~EntryProxy() {
+ if (backend_.get()) {
+ backend_->OnDeleteEntry(entry_);
+ }
+}
+
+TracingCacheBackend::TracingCacheBackend(scoped_ptr<Backend> backend)
+ : backend_(backend.Pass()) {
+}
+
+TracingCacheBackend::~TracingCacheBackend() {
+}
+
+net::CacheType TracingCacheBackend::GetCacheType() const {
+ return backend_->GetCacheType();
+}
+
+int32 TracingCacheBackend::GetEntryCount() const {
+ return backend_->GetEntryCount();
+}
+
+void TracingCacheBackend::RecordEvent(base::TimeTicks start_time, Operation op,
+ std::string key, Entry* entry, int rv) {
+ // TODO(pasko): Implement.
+}
+
+EntryProxy* TracingCacheBackend::FindOrCreateEntryProxy(Entry* entry) {
+ EntryProxy* entry_proxy;
+ EntryToProxyMap::iterator it = open_entries_.find(entry);
+ if (it != open_entries_.end()) {
+ entry_proxy = it->second;
+ entry_proxy->AddRef();
+ return entry_proxy;
+ }
+ entry_proxy = new EntryProxy(entry, this);
+ entry_proxy->AddRef();
+ open_entries_[entry] = entry_proxy;
+ return entry_proxy;
+}
+
+void TracingCacheBackend::OnDeleteEntry(Entry* entry) {
+ EntryToProxyMap::iterator it = open_entries_.find(entry);
+ if (it != open_entries_.end()) {
+ open_entries_.erase(it);
+ }
+}
+
+void TracingCacheBackend::BackendOpComplete(base::TimeTicks start_time,
+ Operation op,
+ std::string key,
+ Entry** entry,
+ const CompletionCallback& callback,
+ int result) {
+ RecordEvent(start_time, op, key, *entry, result);
+ if (*entry) {
+ *entry = FindOrCreateEntryProxy(*entry);
+ }
+ if (!callback.is_null()) {
+ callback.Run(result);
+ }
+}
+
+net::CompletionCallback TracingCacheBackend::BindCompletion(
+ Operation op, base::TimeTicks start_time, const std::string& key,
+ Entry **entry, const net::CompletionCallback& cb) {
+ return base::Bind(&TracingCacheBackend::BackendOpComplete,
+ AsWeakPtr(), start_time, op, key, entry, cb);
+}
+
+int TracingCacheBackend::OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ DCHECK(*entry == NULL);
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ int rv = backend_->OpenEntry(key, entry,
+ BindCompletion(OP_OPEN, start_time, key, entry,
+ callback));
+ if (rv != net::ERR_IO_PENDING) {
+ RecordEvent(start_time, OP_OPEN, key, *entry, rv);
+ if (*entry) {
+ *entry = FindOrCreateEntryProxy(*entry);
+ }
+ }
+ return rv;
+}
+
+int TracingCacheBackend::CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ int rv = backend_->CreateEntry(key, entry,
+ BindCompletion(OP_CREATE, start_time, key,
+ entry, callback));
+ if (rv != net::ERR_IO_PENDING) {
+ RecordEvent(start_time, OP_CREATE, key, *entry, rv);
+ if (*entry) {
+ *entry = FindOrCreateEntryProxy(*entry);
+ }
+ }
+ return rv;
+}
+
+int TracingCacheBackend::DoomEntry(const std::string& key,
+ const CompletionCallback& callback) {
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ int rv = backend_->DoomEntry(key, BindCompletion(OP_DOOM_ENTRY,
+ start_time, key, NULL,
+ callback));
+ if (rv != net::ERR_IO_PENDING) {
+ RecordEvent(start_time, OP_DOOM_ENTRY, key, NULL, rv);
+ }
+ return rv;
+}
+
+int TracingCacheBackend::DoomAllEntries(const CompletionCallback& callback) {
+ return backend_->DoomAllEntries(callback);
+}
+
+int TracingCacheBackend::DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& cb) {
+ return backend_->DoomEntriesBetween(initial_time, end_time, cb);
+}
+
+int TracingCacheBackend::DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) {
+ return backend_->DoomEntriesSince(initial_time, callback);
+}
+
+int TracingCacheBackend::OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) {
+ return backend_->OpenNextEntry(iter, next_entry, callback);
+}
+
+void TracingCacheBackend::EndEnumeration(void** iter) {
+ return backend_->EndEnumeration(iter);
+}
+
+void TracingCacheBackend::GetStats(StatsItems* stats) {
+ return backend_->GetStats(stats);
+}
+
+void TracingCacheBackend::OnExternalCacheHit(const std::string& key) {
+ return backend_->OnExternalCacheHit(key);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/tracing_cache_backend.h b/chromium/net/disk_cache/tracing_cache_backend.h
new file mode 100644
index 00000000000..304a7334d83
--- /dev/null
+++ b/chromium/net/disk_cache/tracing_cache_backend.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_TRACING_CACHE_BACKEND_H_
+#define NET_DISK_CACHE_TRACING_CACHE_BACKEND_H_
+
+#include "base/memory/weak_ptr.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/stats.h"
+
+namespace disk_cache {
+
+class EntryProxy;
+
+// The TracingCacheBackend implements the Cache Backend interface. It intercepts
+// all backend operations from the IO thread and records the time from the start
+// of the operation until the result is delivered.
+class NET_EXPORT TracingCacheBackend : public Backend,
+ public base::SupportsWeakPtr<TracingCacheBackend> {
+ public:
+ explicit TracingCacheBackend(scoped_ptr<Backend> backend);
+
+ virtual net::CacheType GetCacheType() const OVERRIDE;
+ virtual int32 GetEntryCount() const OVERRIDE;
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual void EndEnumeration(void** iter) OVERRIDE;
+ virtual void GetStats(StatsItems* stats) OVERRIDE;
+ virtual void OnExternalCacheHit(const std::string& key) OVERRIDE;
+
+ private:
+ friend class EntryProxy;
+ enum Operation {
+ OP_OPEN,
+ OP_CREATE,
+ OP_DOOM_ENTRY,
+ OP_READ,
+ OP_WRITE
+ };
+
+ virtual ~TracingCacheBackend();
+
+ EntryProxy* FindOrCreateEntryProxy(Entry* entry);
+
+ void OnDeleteEntry(Entry* e);
+
+ void RecordEvent(base::TimeTicks start_time, Operation op, std::string key,
+ Entry* entry, int result);
+
+ void BackendOpComplete(base::TimeTicks start_time, Operation op,
+ std::string key, Entry** entry,
+ const CompletionCallback& callback, int result);
+
+ net::CompletionCallback BindCompletion(Operation op,
+ base::TimeTicks start_time,
+ const std::string& key, Entry **entry,
+ const net::CompletionCallback& cb);
+
+ scoped_ptr<Backend> backend_;
+ typedef std::map<Entry*, EntryProxy*> EntryToProxyMap;
+ EntryToProxyMap open_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(TracingCacheBackend);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_TRACING_CACHE_BACKEND_H_
diff --git a/chromium/net/disk_cache/v3/backend_impl_v3.cc b/chromium/net/disk_cache/v3/backend_impl_v3.cc
new file mode 100644
index 00000000000..92ea272226b
--- /dev/null
+++ b/chromium/net/disk_cache/v3/backend_impl_v3.cc
@@ -0,0 +1,1640 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/backend_impl.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/hash.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/stats_counters.h"
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/file.h"
+
+// This has to be defined before including histogram_macros.h from this file.
+#define NET_DISK_CACHE_BACKEND_IMPL_CC_
+#include "net/disk_cache/histogram_macros.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+const char* kIndexName = "index";
+
+// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
+// Note that the actual target is to keep the index table load factor under 55%
+// for most users.
+const int k64kEntriesStore = 240 * 1000 * 1000;
+const int kBaseTableLen = 64 * 1024;
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
+// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
+const int kTrimDelay = 10;
+
+int DesiredIndexTableLen(int32 storage_size) {
+ if (storage_size <= k64kEntriesStore)
+ return kBaseTableLen;
+ if (storage_size <= k64kEntriesStore * 2)
+ return kBaseTableLen * 2;
+ if (storage_size <= k64kEntriesStore * 4)
+ return kBaseTableLen * 4;
+ if (storage_size <= k64kEntriesStore * 8)
+ return kBaseTableLen * 8;
+
+ // The biggest storage_size for int32 requires a 4 MB table.
+ return kBaseTableLen * 16;
+}
+
+int MaxStorageSizeForTable(int table_len) {
+ return table_len * (k64kEntriesStore / kBaseTableLen);
+}
+
+size_t GetIndexSize(int table_len) {
+ size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
+ return sizeof(disk_cache::IndexHeader) + table_size;
+}
+
+} // namespace
+
+// ------------------------------------------------------------------------
+
+namespace disk_cache {
+
+BackendImpl::BackendImpl(const base::FilePath& path,
+ base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log)
+ : background_queue_(this, cache_thread),
+ path_(path),
+ block_files_(path),
+ mask_(0),
+ max_size_(0),
+ up_ticks_(0),
+ cache_type_(net::DISK_CACHE),
+ uma_report_(0),
+ user_flags_(0),
+ init_(false),
+ restarted_(false),
+ unit_test_(false),
+ read_only_(false),
+ disabled_(false),
+ new_eviction_(false),
+ first_timer_(true),
+ user_load_(false),
+ net_log_(net_log),
+ done_(true, false),
+ ptr_factory_(this) {
+}
+
+BackendImpl::BackendImpl(const base::FilePath& path,
+ uint32 mask,
+ base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log)
+ : background_queue_(this, cache_thread),
+ path_(path),
+ block_files_(path),
+ mask_(mask),
+ max_size_(0),
+ up_ticks_(0),
+ cache_type_(net::DISK_CACHE),
+ uma_report_(0),
+ user_flags_(kMask),
+ init_(false),
+ restarted_(false),
+ unit_test_(false),
+ read_only_(false),
+ disabled_(false),
+ new_eviction_(false),
+ first_timer_(true),
+ user_load_(false),
+ net_log_(net_log),
+ done_(true, false),
+ ptr_factory_(this) {
+}
+
+BackendImpl::~BackendImpl() {
+ if (user_flags_ & kNoRandom) {
+ // This is a unit test, so we want to be strict about not leaking entries
+ // and completing all the work.
+ background_queue_.WaitForPendingIO();
+ } else {
+ // This is most likely not a test, so we want to do as little work as
+ // possible at this time, at the price of leaving dirty entries behind.
+ background_queue_.DropPendingIO();
+ }
+
+ if (background_queue_.BackgroundIsCurrentThread()) {
+ // Unit tests may use the same thread for everything.
+ CleanupCache();
+ } else {
+ background_queue_.background_thread()->PostTask(
+ FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
+ // http://crbug.com/74623
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ done_.Wait();
+ }
+}
+
+int BackendImpl::Init(const CompletionCallback& callback) {
+ background_queue_.Init(callback);
+ return net::ERR_IO_PENDING;
+}
+
+// ------------------------------------------------------------------------
+
+int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.OpenPrevEntry(iter, prev_entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+bool BackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ // Avoid a DCHECK later on.
+ if (max_bytes >= kint32max - kint32max / 10)
+ max_bytes = kint32max - kint32max / 10 - 1;
+
+ user_flags_ |= kMaxSize;
+ max_size_ = max_bytes;
+ return true;
+}
+
+void BackendImpl::SetType(net::CacheType type) {
+ DCHECK_NE(net::MEMORY_CACHE, type);
+ cache_type_ = type;
+}
+
+bool BackendImpl::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ return block_files_.CreateBlock(block_type, block_count, block_address);
+}
+
+void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
+ if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
+ return;
+ eviction_.UpdateRank(entry, modified);
+}
+
+void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
+ uint32 hash = entry->GetHash();
+ std::string key = entry->GetKey();
+ Addr entry_addr = entry->entry()->address();
+ bool error;
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
+ CacheAddr child(entry->GetNextAddress());
+
+ Trace("Doom entry 0x%p", entry);
+
+ if (!entry->doomed()) {
+ // We may have doomed this entry from within MatchEntry.
+ eviction_.OnDoomEntry(entry);
+ entry->InternalDoom();
+ if (!new_eviction_) {
+ DecreaseNumEntries();
+ }
+ stats_.OnEvent(Stats::DOOM_ENTRY);
+ }
+
+ if (parent_entry) {
+ parent_entry->SetNextAddress(Addr(child));
+ parent_entry->Release();
+ } else if (!error) {
+ data_->table[hash & mask_] = child;
+ }
+
+ FlushIndex();
+}
+
+void BackendImpl::OnEntryDestroyBegin(Addr address) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end())
+ open_entries_.erase(it);
+}
+
+void BackendImpl::OnEntryDestroyEnd() {
+ DecreaseNumRefs();
+ if (data_->header.num_bytes > max_size_ && !read_only_ &&
+ (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
+ eviction_.TrimCache(false);
+}
+
+EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
+ DCHECK(rankings->HasData());
+ EntriesMap::const_iterator it =
+ open_entries_.find(rankings->Data()->contents);
+ if (it != open_entries_.end()) {
+ // We have this entry in memory.
+ return it->second;
+ }
+
+ return NULL;
+}
+
+int BackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (disabled_ || old_size == new_size)
+ return;
+ if (old_size > new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+
+ FlushIndex();
+
+ // Update the usage statistics.
+ stats_.ModifyStorageStats(old_size, new_size);
+}
+
+void BackendImpl::TooMuchStorageRequested(int32 size) {
+ stats_.ModifyStorageStats(0, size);
+}
+
+bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
+ DCHECK_GT(new_size, current_size);
+ if (user_flags_ & kNoBuffering)
+ return false;
+
+ int to_add = new_size - current_size;
+ if (buffer_bytes_ + to_add > MaxBuffersSize())
+ return false;
+
+ buffer_bytes_ += to_add;
+ CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
+ return true;
+}
+
+void BackendImpl::BufferDeleted(int size) {
+ buffer_bytes_ -= size;
+ DCHECK_GE(size, 0);
+}
+
+bool BackendImpl::IsLoaded() const {
+ CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
+ if (user_flags_ & kNoLoadProtection)
+ return false;
+
+ return (num_pending_io_ > 5 || user_load_);
+}
+
+std::string BackendImpl::HistogramName(const char* name, int experiment) const {
+ if (!experiment)
+ return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
+ return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
+ name, experiment);
+}
+
+base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
+ return ptr_factory_.GetWeakPtr();
+}
+
+// We want to remove biases from some histograms so we only send data once per
+// week.
+bool BackendImpl::ShouldReportAgain() {
+ if (uma_report_)
+ return uma_report_ == 2;
+
+ uma_report_++;
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+ Time last_time = Time::FromInternalValue(last_report);
+ if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
+ stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
+ uma_report_++;
+ return true;
+ }
+ return false;
+}
+
+void BackendImpl::FirstEviction() {
+ DCHECK(data_->header.create_time);
+ if (!GetEntryCount())
+ return; // This is just for unit tests.
+
+ Time create_time = Time::FromInternalValue(data_->header.create_time);
+ CACHE_UMA(AGE, "FillupAge", 0, create_time);
+
+ int64 use_time = stats_.GetCounter(Stats::TIMER);
+ CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
+ CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
+
+ if (!use_time)
+ use_time = 1;
+ CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
+ static_cast<int>(data_->header.num_entries / use_time));
+ CACHE_UMA(COUNTS, "FirstByteIORate", 0,
+ static_cast<int>((data_->header.num_bytes / 1024) / use_time));
+
+ int avg_size = data_->header.num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
+ CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
+
+ if (new_eviction_) {
+ CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
+ data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
+ data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
+ data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
+ }
+
+ stats_.ResetRatios();
+}
+
+void BackendImpl::OnEvent(Stats::Counters an_event) {
+ stats_.OnEvent(an_event);
+}
+
+void BackendImpl::OnRead(int32 bytes) {
+ DCHECK_GE(bytes, 0);
+ byte_count_ += bytes;
+ if (byte_count_ < 0)
+ byte_count_ = kint32max;
+}
+
+void BackendImpl::OnWrite(int32 bytes) {
+ // We use the same implementation as OnRead... just log the number of bytes.
+ OnRead(bytes);
+}
+
+void BackendImpl::OnStatsTimer() {
+ stats_.OnEvent(Stats::TIMER);
+ int64 time = stats_.GetCounter(Stats::TIMER);
+ int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
+
+ // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
+ // the bias towards 0.
+ if (num_refs_ && (current != num_refs_)) {
+ int64 diff = (num_refs_ - current) / 50;
+ if (!diff)
+ diff = num_refs_ > current ? 1 : -1;
+ current = current + diff;
+ stats_.SetCounter(Stats::OPEN_ENTRIES, current);
+ stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
+ }
+
+ CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
+
+ CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
+ CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
+
+ // These values cover about 99.5% of the population (Oct 2011).
+ user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
+ entry_count_ = 0;
+ byte_count_ = 0;
+ up_ticks_++;
+
+ if (!data_)
+ first_timer_ = false;
+ if (first_timer_) {
+ first_timer_ = false;
+ if (ShouldReportAgain())
+ ReportStats();
+ }
+
+ // Save stats to disk at 5 min intervals.
+ if (time % 10 == 0)
+ StoreStats();
+}
+
+void BackendImpl::SetUnitTestMode() {
+ user_flags_ |= kUnitTestMode;
+ unit_test_ = true;
+}
+
+void BackendImpl::SetUpgradeMode() {
+ user_flags_ |= kUpgradeMode;
+ read_only_ = true;
+}
+
+void BackendImpl::SetNewEviction() {
+ user_flags_ |= kNewEviction;
+ new_eviction_ = true;
+}
+
+void BackendImpl::SetFlags(uint32 flags) {
+ user_flags_ |= flags;
+}
+
+int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
+ background_queue_.FlushQueue(callback);
+ return net::ERR_IO_PENDING;
+}
+
+void BackendImpl::TrimForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimCache(empty);
+}
+
+void BackendImpl::TrimDeletedListForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimDeletedList(empty);
+}
+
+int BackendImpl::SelfCheck() {
+ if (!init_) {
+ LOG(ERROR) << "Init failed";
+ return ERR_INIT_FAILED;
+ }
+
+ int num_entries = rankings_.SelfCheck();
+ if (num_entries < 0) {
+ LOG(ERROR) << "Invalid rankings list, error " << num_entries;
+#if !defined(NET_BUILD_STRESS_CACHE)
+ return num_entries;
+#endif
+ }
+
+ if (num_entries != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+#if !defined(NET_BUILD_STRESS_CACHE)
+ return ERR_NUM_ENTRIES_MISMATCH;
+#endif
+ }
+
+ return CheckAllEntries();
+}
+
+// ------------------------------------------------------------------------
+
+net::CacheType BackendImpl::GetCacheType() const {
+ return cache_type_;
+}
+
+int32 BackendImpl::GetEntryCount() const {
+ if (!index_.get() || disabled_)
+ return 0;
+ // num_entries includes entries already evicted.
+ int32 not_deleted = data_->header.num_entries -
+ data_->header.lru.sizes[Rankings::DELETED];
+
+ if (not_deleted < 0) {
+ NOTREACHED();
+ not_deleted = 0;
+ }
+
+ return not_deleted;
+}
+
+EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
+ if (disabled_)
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ uint32 hash = base::Hash(key);
+ Trace("Open hash 0x%x", hash);
+
+ bool error;
+ EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
+ // The entry was already evicted.
+ cache_entry->Release();
+ cache_entry = NULL;
+ }
+
+ int current_size = data_->header.num_bytes / (1024 * 1024);
+ int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
+ int64 use_hours = total_hours - no_use_hours;
+
+ if (!cache_entry) {
+ CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
+ CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
+ CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
+ CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return NULL;
+ }
+
+ eviction_.OnOpenEntry(cache_entry);
+ entry_count_++;
+
+ Trace("Open hash 0x%x end: 0x%x", hash,
+ cache_entry->entry()->address().value());
+ CACHE_UMA(AGE_MS, "OpenTime", 0, start);
+ CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
+ CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
+ CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
+ stats_.OnEvent(Stats::OPEN_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.hit");
+ return cache_entry;
+}
+
+EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
+ if (disabled_ || key.empty())
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ Trace("Create hash 0x%x", hash);
+
+ scoped_refptr<EntryImpl> parent;
+ Addr entry_address(data_->table[hash & mask_]);
+ if (entry_address.is_initialized()) {
+ // We have an entry already. It could be the one we are looking for, or just
+ // a hash conflict.
+ bool error;
+ EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (old_entry)
+ return ResurrectEntry(old_entry);
+
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
+ DCHECK(!error);
+ if (parent_entry) {
+ parent.swap(&parent_entry);
+ } else if (data_->table[hash & mask_]) {
+ // We should have corrected the problem.
+ NOTREACHED();
+ return NULL;
+ }
+ }
+
+ // The general flow is to allocate disk space and initialize the entry data,
+ // followed by saving that to disk, then linking the entry though the index
+ // and finally through the lists. If there is a crash in this process, we may
+ // end up with:
+ // a. Used, unreferenced empty blocks on disk (basically just garbage).
+ // b. Used, unreferenced but meaningful data on disk (more garbage).
+ // c. A fully formed entry, reachable only through the index.
+ // d. A fully formed entry, also reachable through the lists, but still dirty.
+ //
+ // Anything after (b) can be automatically cleaned up. We may consider saving
+ // the current operation (as we do while manipulating the lists) so that we
+ // can detect and cleanup (a) and (b).
+
+ int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
+ if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ Addr node_address(0);
+ if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
+ block_files_.DeleteBlock(entry_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(
+ new EntryImpl(this, entry_address, false));
+ IncreaseNumRefs();
+
+ if (!cache_entry->CreateEntry(node_address, key, hash)) {
+ block_files_.DeleteBlock(entry_address, false);
+ block_files_.DeleteBlock(node_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ cache_entry->BeginLogging(net_log_, true);
+
+ // We are not failing the operation; let's add this to the map.
+ open_entries_[entry_address.value()] = cache_entry.get();
+
+ // Save the entry.
+ cache_entry->entry()->Store();
+ cache_entry->rankings()->Store();
+ IncreaseNumEntries();
+ entry_count_++;
+
+ // Link this entry through the index.
+ if (parent.get()) {
+ parent->SetNextAddress(entry_address);
+ } else {
+ data_->table[hash & mask_] = entry_address.value();
+ }
+
+ // Link this entry through the lists.
+ eviction_.OnCreateEntry(cache_entry.get());
+
+ CACHE_UMA(AGE_MS, "CreateTime", 0, start);
+ stats_.OnEvent(Stats::CREATE_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.miss");
+ Trace("create entry hit ");
+ FlushIndex();
+ cache_entry->AddRef();
+ return cache_entry.get();
+}
+
+int BackendImpl::SyncDoomEntry(const std::string& key) {
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ EntryImpl* entry = OpenEntryImpl(key);
+ if (!entry)
+ return net::ERR_FAILED;
+
+ entry->DoomImpl();
+ entry->Release();
+ return net::OK;
+}
+
+int BackendImpl::SyncDoomAllEntries() {
+ // This is not really an error, but it is an interesting condition.
+ ReportError(ERR_CACHE_DOOMED);
+ stats_.OnEvent(Stats::DOOM_CACHE);
+ if (!num_refs_) {
+ RestartCache(false);
+ return disabled_ ? net::ERR_FAILED : net::OK;
+ } else {
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ eviction_.TrimCache(true);
+ return net::OK;
+ }
+}
+
+int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time) {
+ DCHECK_NE(net::APP_CACHE, cache_type_);
+ if (end_time.is_null())
+ return SyncDoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ EntryImpl* node;
+ void* iter = NULL;
+ EntryImpl* next = OpenNextEntryImpl(&iter);
+ if (!next)
+ return net::OK;
+
+ while (next) {
+ node = next;
+ next = OpenNextEntryImpl(&iter);
+
+ if (node->GetLastUsed() >= initial_time &&
+ node->GetLastUsed() < end_time) {
+ node->DoomImpl();
+ } else if (node->GetLastUsed() < initial_time) {
+ if (next)
+ next->Release();
+ next = NULL;
+ SyncEndEnumeration(iter);
+ }
+
+ node->Release();
+ }
+
+ return net::OK;
+}
+
+// We use OpenNextEntryImpl to retrieve elements from the cache, until we get
+// entries that are too old.
+int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
+ DCHECK_NE(net::APP_CACHE, cache_type_);
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ stats_.OnEvent(Stats::DOOM_RECENT);
+ for (;;) {
+ void* iter = NULL;
+ EntryImpl* entry = OpenNextEntryImpl(&iter);
+ if (!entry)
+ return net::OK;
+
+ if (initial_time > entry->GetLastUsed()) {
+ entry->Release();
+ SyncEndEnumeration(iter);
+ return net::OK;
+ }
+
+ entry->DoomImpl();
+ entry->Release();
+ SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
+ }
+}
+
+int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ background_queue_.OpenNextEntry(iter, next_entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+void BackendImpl::EndEnumeration(void** iter) {
+ background_queue_.EndEnumeration(*iter);
+ *iter = NULL;
+}
+
+void BackendImpl::GetStats(StatsItems* stats) {
+ if (disabled_)
+ return;
+
+ std::pair<std::string, std::string> item;
+
+ item.first = "Entries";
+ item.second = base::StringPrintf("%d", data_->header.num_entries);
+ stats->push_back(item);
+
+ item.first = "Pending IO";
+ item.second = base::StringPrintf("%d", num_pending_io_);
+ stats->push_back(item);
+
+ item.first = "Max size";
+ item.second = base::StringPrintf("%d", max_size_);
+ stats->push_back(item);
+
+ item.first = "Current size";
+ item.second = base::StringPrintf("%d", data_->header.num_bytes);
+ stats->push_back(item);
+
+ item.first = "Cache type";
+ item.second = "Blockfile Cache";
+ stats->push_back(item);
+
+ stats_.GetItems(stats);
+}
+
+void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
+ if (disabled_)
+ return;
+
+ uint32 hash = base::Hash(key);
+ bool error;
+ EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (cache_entry) {
+ if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
+ UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
+ }
+ cache_entry->Release();
+ }
+}
+
+// ------------------------------------------------------------------------
+
+// The maximum cache size will be either set explicitly by the caller, or
+// calculated by this code.
+void BackendImpl::AdjustMaxCacheSize(int table_len) {
+ if (max_size_)
+ return;
+
+ // If table_len is provided, the index file exists.
+ DCHECK(!table_len || data_->header.magic);
+
+ // The user is not setting the size, let's figure it out.
+ int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
+ if (available < 0) {
+ max_size_ = kDefaultCacheSize;
+ return;
+ }
+
+ if (table_len)
+ available += data_->header.num_bytes;
+
+ max_size_ = PreferedCacheSize(available);
+
+ // Let's not use more than the default size while we tune-up the performance
+ // of bigger caches. TODO(rvargas): remove this limit.
+ if (max_size_ > kDefaultCacheSize * 4)
+ max_size_ = kDefaultCacheSize * 4;
+
+ if (!table_len)
+ return;
+
+ // If we already have a table, adjust the size to it.
+ int current_max_size = MaxStorageSizeForTable(table_len);
+ if (max_size_ > current_max_size)
+ max_size_= current_max_size;
+}
+
+bool BackendImpl::InitStats() {
+ Addr address(data_->header.stats);
+ int size = stats_.StorageSize();
+
+ if (!address.is_initialized()) {
+ FileType file_type = Addr::RequiredFileType(size);
+ DCHECK_NE(file_type, EXTERNAL);
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!CreateBlock(file_type, num_blocks, &address))
+ return false;
+ return stats_.Init(NULL, 0, address);
+ }
+
+ if (!address.is_block_file()) {
+ NOTREACHED();
+ return false;
+ }
+
+ // Load the required data.
+ size = address.num_blocks() * address.BlockSize();
+ MappedFile* file = File(address);
+ if (!file)
+ return false;
+
+ scoped_ptr<char[]> data(new char[size]);
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (!file->Read(data.get(), size, offset))
+ return false;
+
+ if (!stats_.Init(data.get(), size, address))
+ return false;
+ if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
+ stats_.InitSizeHistogram();
+ return true;
+}
+
+void BackendImpl::StoreStats() {
+ int size = stats_.StorageSize();
+ scoped_ptr<char[]> data(new char[size]);
+ Addr address;
+ size = stats_.SerializeStats(data.get(), size, &address);
+ DCHECK(size);
+ if (!address.is_initialized())
+ return;
+
+ MappedFile* file = File(address);
+ if (!file)
+ return;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ file->Write(data.get(), size, offset); // ignore result.
+}
+
+void BackendImpl::RestartCache(bool failure) {
+ int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
+ int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
+ int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+
+ PrepareForRestart();
+ if (failure) {
+ DCHECK(!num_refs_);
+ DCHECK(!open_entries_.size());
+ DelayedCacheCleanup(path_);
+ } else {
+ DeleteCache(path_, false);
+ }
+
+ // Don't call Init() if directed by the unit test: we are simulating a failure
+ // trying to re-enable the cache.
+ if (unit_test_)
+ init_ = true; // Let the destructor do proper cleanup.
+ else if (SyncInit() == net::OK) {
+ stats_.SetCounter(Stats::FATAL_ERROR, errors);
+ stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
+ stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
+ stats_.SetCounter(Stats::LAST_REPORT, last_report);
+ }
+}
+
+void BackendImpl::PrepareForRestart() {
+ // Reset the mask_ if it was not given by the user.
+ if (!(user_flags_ & kMask))
+ mask_ = 0;
+
+ if (!(user_flags_ & kNewEviction))
+ new_eviction_ = false;
+
+ disabled_ = true;
+ data_->header.crash = 0;
+ index_->Flush();
+ index_ = NULL;
+ data_ = NULL;
+ block_files_.CloseFiles();
+ rankings_.Reset();
+ init_ = false;
+ restarted_ = true;
+}
+
+void BackendImpl::CleanupCache() {
+ Trace("Backend Cleanup");
+ eviction_.Stop();
+ timer_.reset();
+
+ if (init_) {
+ StoreStats();
+ if (data_)
+ data_->header.crash = 0;
+
+ if (user_flags_ & kNoRandom) {
+ // This is a net_unittest, verify that we are not 'leaking' entries.
+ File::WaitForPendingIO(&num_pending_io_);
+ DCHECK(!num_refs_);
+ } else {
+ File::DropPendingIO();
+ }
+ }
+ block_files_.CloseFiles();
+ FlushIndex();
+ index_ = NULL;
+ ptr_factory_.InvalidateWeakPtrs();
+ done_.Signal();
+}
+
+int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end()) {
+ // Easy job. This entry is already in memory.
+ EntryImpl* this_entry = it->second;
+ this_entry->AddRef();
+ *entry = this_entry;
+ return 0;
+ }
+
+ STRESS_DCHECK(block_files_.IsValid(address));
+
+ if (!address.SanityCheckForEntry()) {
+ LOG(WARNING) << "Wrong entry address.";
+ STRESS_NOTREACHED();
+ return ERR_INVALID_ADDRESS;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(
+ new EntryImpl(this, address, read_only_));
+ IncreaseNumRefs();
+ *entry = NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ if (!cache_entry->entry()->Load())
+ return ERR_READ_FAILURE;
+
+ if (IsLoaded()) {
+ CACHE_UMA(AGE_MS, "LoadTime", 0, start);
+ }
+
+ if (!cache_entry->SanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ STRESS_NOTREACHED();
+ return ERR_INVALID_ENTRY;
+ }
+
+ STRESS_DCHECK(block_files_.IsValid(
+ Addr(cache_entry->entry()->Data()->rankings_node)));
+
+ if (!cache_entry->LoadNodeAddress())
+ return ERR_READ_FAILURE;
+
+ if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
+ STRESS_NOTREACHED();
+ cache_entry->SetDirtyFlag(0);
+ // Don't remove this from the list (it is not linked properly). Instead,
+ // break the link back to the entry because it is going away, and leave the
+ // rankings node to be deleted if we find it through a list.
+ rankings_.SetContents(cache_entry->rankings(), 0);
+ } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
+ STRESS_NOTREACHED();
+ cache_entry->SetDirtyFlag(0);
+ rankings_.SetContents(cache_entry->rankings(), address.value());
+ }
+
+ if (!cache_entry->DataSanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ cache_entry->SetDirtyFlag(0);
+ cache_entry->FixForDelete();
+ }
+
+ // Prevent overwriting the dirty flag on the destructor.
+ cache_entry->SetDirtyFlag(GetCurrentEntryId());
+
+ if (cache_entry->dirty()) {
+ Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
+ address.value());
+ }
+
+ open_entries_[address.value()] = cache_entry.get();
+
+ cache_entry->BeginLogging(net_log_, false);
+ cache_entry.swap(entry);
+ return 0;
+}
+
+// This is the actual implementation for OpenNextEntry and OpenPrevEntry.
+EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
+ if (disabled_)
+ return NULL;
+
+ DCHECK(iter);
+
+ const int kListsToSearch = 3;
+ scoped_refptr<EntryImpl> entries[kListsToSearch];
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(*iter));
+ *iter = NULL;
+
+ if (!iterator.get()) {
+ iterator.reset(new Rankings::Iterator(&rankings_));
+ bool ret = false;
+
+ // Get an entry from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
+ &iterator->nodes[i], &temp);
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ if (!ret)
+ return NULL;
+ } else {
+ // Get the next entry from the last list, and the actual entries for the
+ // elements on the other lists.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ if (iterator->list == i) {
+ OpenFollowingEntryFromList(forward, iterator->list,
+ &iterator->nodes[i], &temp);
+ } else {
+ temp = GetEnumeratedEntry(iterator->nodes[i],
+ static_cast<Rankings::List>(i));
+ }
+
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ }
+
+ int newest = -1;
+ int oldest = -1;
+ Time access_times[kListsToSearch];
+ for (int i = 0; i < kListsToSearch; i++) {
+ if (entries[i].get()) {
+ access_times[i] = entries[i]->GetLastUsed();
+ if (newest < 0) {
+ DCHECK_LT(oldest, 0);
+ newest = oldest = i;
+ continue;
+ }
+ if (access_times[i] > access_times[newest])
+ newest = i;
+ if (access_times[i] < access_times[oldest])
+ oldest = i;
+ }
+ }
+
+ if (newest < 0 || oldest < 0)
+ return NULL;
+
+ EntryImpl* next_entry;
+ if (forward) {
+ next_entry = entries[newest].get();
+ iterator->list = static_cast<Rankings::List>(newest);
+ } else {
+ next_entry = entries[oldest].get();
+ iterator->list = static_cast<Rankings::List>(oldest);
+ }
+
+ *iter = iterator.release();
+ next_entry->AddRef();
+ return next_entry;
+}
+
+void BackendImpl::AddStorageSize(int32 bytes) {
+ data_->header.num_bytes += bytes;
+ DCHECK_GE(data_->header.num_bytes, 0);
+}
+
+void BackendImpl::SubstractStorageSize(int32 bytes) {
+ data_->header.num_bytes -= bytes;
+ DCHECK_GE(data_->header.num_bytes, 0);
+}
+
+void BackendImpl::IncreaseNumRefs() {
+ num_refs_++;
+ if (max_refs_ < num_refs_)
+ max_refs_ = num_refs_;
+}
+
+void BackendImpl::DecreaseNumRefs() {
+ DCHECK(num_refs_);
+ num_refs_--;
+
+ if (!num_refs_ && disabled_)
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
+}
+
+void BackendImpl::IncreaseNumEntries() {
+ data_->header.num_entries++;
+ DCHECK_GT(data_->header.num_entries, 0);
+}
+
+void BackendImpl::DecreaseNumEntries() {
+ data_->header.num_entries--;
+ if (data_->header.num_entries < 0) {
+ NOTREACHED();
+ data_->header.num_entries = 0;
+ }
+}
+
+int BackendImpl::SyncInit() {
+#if defined(NET_BUILD_STRESS_CACHE)
+ // Start evictions right away.
+ up_ticks_ = kTrimDelay * 2;
+#endif
+ DCHECK(!init_);
+ if (init_)
+ return net::ERR_FAILED;
+
+ bool create_files = false;
+ if (!InitBackingStore(&create_files)) {
+ ReportError(ERR_STORAGE_ERROR);
+ return net::ERR_FAILED;
+ }
+
+ num_refs_ = num_pending_io_ = max_refs_ = 0;
+ entry_count_ = byte_count_ = 0;
+
+ if (!restarted_) {
+ buffer_bytes_ = 0;
+ trace_object_ = TraceObject::GetTraceObject();
+ // Create a recurrent timer of 30 secs.
+ int timer_delay = unit_test_ ? 1000 : 30000;
+ timer_.reset(new base::RepeatingTimer<BackendImpl>());
+ timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
+ &BackendImpl::OnStatsTimer);
+ }
+
+ init_ = true;
+ Trace("Init");
+
+ if (data_->header.experiment != NO_EXPERIMENT &&
+ cache_type_ != net::DISK_CACHE) {
+ // No experiment for other caches.
+ return net::ERR_FAILED;
+ }
+
+ if (!(user_flags_ & kNoRandom)) {
+ // The unit test controls directly what to test.
+ new_eviction_ = (cache_type_ == net::DISK_CACHE);
+ }
+
+ if (!CheckIndex()) {
+ ReportError(ERR_INIT_FAILED);
+ return net::ERR_FAILED;
+ }
+
+ if (!restarted_ && (create_files || !data_->header.num_entries))
+ ReportError(ERR_CACHE_CREATED);
+
+ if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
+ !InitExperiment(&data_->header, create_files)) {
+ return net::ERR_FAILED;
+ }
+
+ // We don't care if the value overflows. The only thing we care about is that
+ // the id cannot be zero, because that value is used as "not dirty".
+ // Increasing the value once per second gives us many years before we start
+ // having collisions.
+ data_->header.this_id++;
+ if (!data_->header.this_id)
+ data_->header.this_id++;
+
+ bool previous_crash = (data_->header.crash != 0);
+ data_->header.crash = 1;
+
+ if (!block_files_.Init(create_files))
+ return net::ERR_FAILED;
+
+ // We want to minimize the changes to cache for an AppCache.
+ if (cache_type() == net::APP_CACHE) {
+ DCHECK(!new_eviction_);
+ read_only_ = true;
+ } else if (cache_type() == net::SHADER_CACHE) {
+ DCHECK(!new_eviction_);
+ }
+
+ eviction_.Init(this);
+
+ // stats_ and rankings_ may end up calling back to us so we better be enabled.
+ disabled_ = false;
+ if (!InitStats())
+ return net::ERR_FAILED;
+
+ disabled_ = !rankings_.Init(this, new_eviction_);
+
+#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
+ trace_object_->EnableTracing(false);
+ int sc = SelfCheck();
+ if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
+ NOTREACHED();
+ trace_object_->EnableTracing(true);
+#endif
+
+ if (previous_crash) {
+ ReportError(ERR_PREVIOUS_CRASH);
+ } else if (!restarted_) {
+ ReportError(ERR_NO_ERROR);
+ }
+
+ FlushIndex();
+
+ return disabled_ ? net::ERR_FAILED : net::OK;
+}
+
+EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
+ if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
+ deleted_entry->Release();
+ stats_.OnEvent(Stats::CREATE_MISS);
+ Trace("create entry miss ");
+ return NULL;
+ }
+
+ // We are attempting to create an entry and found out that the entry was
+ // previously deleted.
+
+ eviction_.OnCreateEntry(deleted_entry);
+ entry_count_++;
+
+ stats_.OnEvent(Stats::RESURRECT_HIT);
+ Trace("Resurrect entry hit ");
+ return deleted_entry;
+}
+
+EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
+ if (disabled_ || key.empty())
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ Trace("Create hash 0x%x", hash);
+
+ scoped_refptr<EntryImpl> parent;
+ Addr entry_address(data_->table[hash & mask_]);
+ if (entry_address.is_initialized()) {
+ // We have an entry already. It could be the one we are looking for, or just
+ // a hash conflict.
+ bool error;
+ EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
+ if (old_entry)
+ return ResurrectEntry(old_entry);
+
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
+ DCHECK(!error);
+ if (parent_entry) {
+ parent.swap(&parent_entry);
+ } else if (data_->table[hash & mask_]) {
+ // We should have corrected the problem.
+ NOTREACHED();
+ return NULL;
+ }
+ }
+
+ // The general flow is to allocate disk space and initialize the entry data,
+ // followed by saving that to disk, then linking the entry though the index
+ // and finally through the lists. If there is a crash in this process, we may
+ // end up with:
+ // a. Used, unreferenced empty blocks on disk (basically just garbage).
+ // b. Used, unreferenced but meaningful data on disk (more garbage).
+ // c. A fully formed entry, reachable only through the index.
+ // d. A fully formed entry, also reachable through the lists, but still dirty.
+ //
+ // Anything after (b) can be automatically cleaned up. We may consider saving
+ // the current operation (as we do while manipulating the lists) so that we
+ // can detect and cleanup (a) and (b).
+
+ int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
+ if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ Addr node_address(0);
+ if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
+ block_files_.DeleteBlock(entry_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(
+ new EntryImpl(this, entry_address, false));
+ IncreaseNumRefs();
+
+ if (!cache_entry->CreateEntry(node_address, key, hash)) {
+ block_files_.DeleteBlock(entry_address, false);
+ block_files_.DeleteBlock(node_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ cache_entry->BeginLogging(net_log_, true);
+
+ // We are not failing the operation; let's add this to the map.
+ open_entries_[entry_address.value()] = cache_entry;
+
+ // Save the entry.
+ cache_entry->entry()->Store();
+ cache_entry->rankings()->Store();
+ IncreaseNumEntries();
+ entry_count_++;
+
+ // Link this entry through the index.
+ if (parent.get()) {
+ parent->SetNextAddress(entry_address);
+ } else {
+ data_->table[hash & mask_] = entry_address.value();
+ }
+
+ // Link this entry through the lists.
+ eviction_.OnCreateEntry(cache_entry);
+
+ CACHE_UMA(AGE_MS, "CreateTime", 0, start);
+ stats_.OnEvent(Stats::CREATE_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.miss");
+ Trace("create entry hit ");
+ FlushIndex();
+ cache_entry->AddRef();
+ return cache_entry.get();
+}
+
+void BackendImpl::LogStats() {
+ StatsItems stats;
+ GetStats(&stats);
+
+ for (size_t index = 0; index < stats.size(); index++)
+ VLOG(1) << stats[index].first << ": " << stats[index].second;
+}
+
+void BackendImpl::ReportStats() {
+ CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
+
+ int current_size = data_->header.num_bytes / (1024 * 1024);
+ int max_size = max_size_ / (1024 * 1024);
+ int hit_ratio_as_percentage = stats_.GetHitRatio();
+
+ CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
+ // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
+ // ratio of that bin's total count to the count in the same bin in the Size2
+ // histogram.
+ if (base::RandInt(0, 99) < hit_ratio_as_percentage)
+ CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
+ CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
+ if (!max_size)
+ max_size++;
+ CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
+
+ CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
+ static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
+ CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
+ static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
+ stats_.SetCounter(Stats::MAX_ENTRIES, 0);
+
+ CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
+ static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
+ CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
+ static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
+ CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
+ static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
+ stats_.SetCounter(Stats::FATAL_ERROR, 0);
+ stats_.SetCounter(Stats::DOOM_CACHE, 0);
+ stats_.SetCounter(Stats::DOOM_RECENT, 0);
+
+ int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ if (!data_->header.create_time || !data_->header.lru.filled) {
+ int cause = data_->header.create_time ? 0 : 1;
+ if (!data_->header.lru.filled)
+ cause |= 2;
+ CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
+ CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
+ return;
+ }
+
+ // This is an up to date client that will report FirstEviction() data. After
+ // that event, start reporting this:
+
+ CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
+ // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
+ // time is the ratio of that bin's total count to the count in the same bin in
+ // the TotalTime histogram.
+ if (base::RandInt(0, 99) < hit_ratio_as_percentage)
+ CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
+
+ int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
+ stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
+
+ // We may see users with no use_hours at this point if this is the first time
+ // we are running this code.
+ if (use_hours)
+ use_hours = total_hours - use_hours;
+
+ if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
+ return;
+
+ CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
+ // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
+ // is the ratio of that bin's total count to the count in the same bin in the
+ // UseTime histogram.
+ if (base::RandInt(0, 99) < hit_ratio_as_percentage)
+ CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
+ CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
+
+ int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
+ CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
+
+ int avg_size = data_->header.num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
+ CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
+
+ CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
+ data_->header.num_entries * 100 / (mask_ + 1));
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
+ CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
+
+ if (new_eviction_) {
+ CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
+ data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
+ data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
+ data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
+ data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
+ }
+
+ stats_.ResetRatios();
+ stats_.SetCounter(Stats::TRIM_ENTRY, 0);
+
+ if (cache_type_ == net::DISK_CACHE)
+ block_files_.ReportStats();
+}
+
+void BackendImpl::ReportError(int error) {
+ STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
+ error == ERR_CACHE_CREATED);
+
+ // We transmit positive numbers, instead of direct error codes.
+ DCHECK_LE(error, 0);
+ CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
+}
+
+bool BackendImpl::CheckIndex() {
+ DCHECK(data_);
+
+ size_t current_size = index_->GetLength();
+ if (current_size < sizeof(Index)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ if (new_eviction_) {
+ // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion >> 16 != data_->header.version >> 16) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ if (kCurrentVersion == data_->header.version) {
+ // We need file version 2.1 for the new eviction algorithm.
+ UpgradeTo2_1();
+ }
+ } else {
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion != data_->header.version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ }
+
+ if (!data_->header.table_len) {
+ LOG(ERROR) << "Invalid table size";
+ return false;
+ }
+
+ if (current_size < GetIndexSize(data_->header.table_len) ||
+ data_->header.table_len & (kBaseTableLen - 1)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ AdjustMaxCacheSize(data_->header.table_len);
+
+#if !defined(NET_BUILD_STRESS_CACHE)
+ if (data_->header.num_bytes < 0 ||
+ (max_size_ < kint32max - kDefaultCacheSize &&
+ data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
+ LOG(ERROR) << "Invalid cache (current) size";
+ return false;
+ }
+#endif
+
+ if (data_->header.num_entries < 0) {
+ LOG(ERROR) << "Invalid number of entries";
+ return false;
+ }
+
+ if (!mask_)
+ mask_ = data_->header.table_len - 1;
+
+ // Load the table into memory with a single read.
+ scoped_ptr<char[]> buf(new char[current_size]);
+ return index_->Read(buf.get(), current_size, 0);
+}
+
+int BackendImpl::CheckAllEntries() {
+ int num_dirty = 0;
+ int num_entries = 0;
+ DCHECK(mask_ < kuint32max);
+ for (unsigned int i = 0; i <= mask_; i++) {
+ Addr address(data_->table[i]);
+ if (!address.is_initialized())
+ continue;
+ for (;;) {
+ EntryImpl* tmp;
+ int ret = NewEntry(address, &tmp);
+ if (ret) {
+ STRESS_NOTREACHED();
+ return ret;
+ }
+ scoped_refptr<EntryImpl> cache_entry;
+ cache_entry.swap(&tmp);
+
+ if (cache_entry->dirty())
+ num_dirty++;
+ else if (CheckEntry(cache_entry.get()))
+ num_entries++;
+ else
+ return ERR_INVALID_ENTRY;
+
+ DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
+ address.set_value(cache_entry->GetNextAddress());
+ if (!address.is_initialized())
+ break;
+ }
+ }
+
+ Trace("CheckAllEntries End");
+ if (num_entries + num_dirty != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
+ " " << data_->header.num_entries;
+ DCHECK_LT(num_entries, data_->header.num_entries);
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return num_dirty;
+}
+
+bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
+ bool ok = block_files_.IsValid(cache_entry->entry()->address());
+ ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
+ EntryStore* data = cache_entry->entry()->Data();
+ for (size_t i = 0; i < arraysize(data->data_addr); i++) {
+ if (data->data_addr[i]) {
+ Addr address(data->data_addr[i]);
+ if (address.is_block_file())
+ ok = ok && block_files_.IsValid(address);
+ }
+ }
+
+ return ok && cache_entry->rankings()->VerifyHash();
+}
+
+int BackendImpl::MaxBuffersSize() {
+ static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
+ static bool done = false;
+
+ if (!done) {
+ const int kMaxBuffersSize = 30 * 1024 * 1024;
+
+ // We want to use up to 2% of the computer's memory.
+ total_memory = total_memory * 2 / 100;
+ if (total_memory > kMaxBuffersSize || total_memory <= 0)
+ total_memory = kMaxBuffersSize;
+
+ done = true;
+ }
+
+ return static_cast<int>(total_memory);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/backend_impl_v3.h b/chromium/net/disk_cache/v3/backend_impl_v3.h
new file mode 100644
index 00000000000..08cc5b1dd52
--- /dev/null
+++ b/chromium/net/disk_cache/v3/backend_impl_v3.h
@@ -0,0 +1,288 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_BACKEND_IMPL_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/timer/timer.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/eviction.h"
+#include "net/disk_cache/in_flight_backend_io.h"
+#include "net/disk_cache/rankings.h"
+#include "net/disk_cache/stats.h"
+#include "net/disk_cache/stress_support.h"
+#include "net/disk_cache/trace.h"
+
+namespace net {
+class NetLog;
+} // namespace net
+
+namespace disk_cache {
+
+enum BackendFlags {
+ kNone = 0,
+ kMask = 1, // A mask (for the index table) was specified.
+ kMaxSize = 1 << 1, // A maximum size was provided.
+ kUnitTestMode = 1 << 2, // We are modifying the behavior for testing.
+ kUpgradeMode = 1 << 3, // This is the upgrade tool (dump).
+ kNewEviction = 1 << 4, // Use of new eviction was specified.
+ kNoRandom = 1 << 5, // Don't add randomness to the behavior.
+ kNoLoadProtection = 1 << 6, // Don't act conservatively under load.
+ kNoBuffering = 1 << 7 // Disable extended IO buffering.
+};
+
+// This class implements the Backend interface. An object of this
+// class handles the operations of the cache for a particular profile.
+class NET_EXPORT_PRIVATE BackendImpl : public Backend {
+ friend class Eviction;
+ public:
+ BackendImpl(const base::FilePath& path, base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log);
+ // mask can be used to limit the usable size of the hash table, for testing.
+ BackendImpl(const base::FilePath& path, uint32 mask,
+ base::MessageLoopProxy* cache_thread, net::NetLog* net_log);
+ virtual ~BackendImpl();
+
+ // Performs general initialization for this current instance of the cache.
+ int Init(const CompletionCallback& callback);
+
+ // Same behavior as OpenNextEntry but walks the list from back to front.
+ int OpenPrevEntry(void** iter, Entry** prev_entry,
+ const CompletionCallback& callback);
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Sets the cache type for this backend.
+ void SetType(net::CacheType type);
+
+ // Creates a new storage block of size block_count.
+ bool CreateBlock(FileType block_type, int block_count,
+ Addr* block_address);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(EntryImpl* entry, bool modified);
+
+ // Permanently deletes an entry, but still keeps track of it.
+ void InternalDoomEntry(EntryImpl* entry);
+
+ // This method must be called when an entry is released for the last time, so
+ // the entry should not be used anymore. |address| is the cache address of the
+ // entry.
+ void OnEntryDestroyBegin(Addr address);
+
+ // This method must be called after all resources for an entry have been
+ // released.
+ void OnEntryDestroyEnd();
+
+ // If the data stored by the provided |rankings| points to an open entry,
+ // returns a pointer to that entry, otherwise returns NULL. Note that this
+ // method does NOT increase the ref counter for the entry.
+ EntryImpl* GetOpenEntry(CacheRankingsBlock* rankings) const;
+
+ // Returns the id being used on this run of the cache.
+ int32 GetCurrentEntryId() const;
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Logs requests that are denied due to being too big.
+ void TooMuchStorageRequested(int32 size);
+
+ // Returns true if a temporary buffer is allowed to be extended.
+ bool IsAllocAllowed(int current_size, int new_size);
+
+ // Tracks the release of |size| bytes by an entry buffer.
+ void BufferDeleted(int size);
+
+ // Only intended for testing the two previous methods.
+ int GetTotalBuffersSize() const {
+ return buffer_bytes_;
+ }
+
+ // Returns true if this instance seems to be under heavy load.
+ bool IsLoaded() const;
+
+ // Returns the full histogram name, for the given base |name| and experiment,
+ // and the current cache type. The name will be "DiskCache.t.name_e" where n
+ // is the cache type and e the provided |experiment|.
+ std::string HistogramName(const char* name, int experiment) const;
+
+ net::CacheType cache_type() const {
+ return cache_type_;
+ }
+
+ bool read_only() const {
+ return read_only_;
+ }
+
+ // Returns a weak pointer to this object.
+ base::WeakPtr<BackendImpl> GetWeakPtr();
+
+ // Returns true if we should send histograms for this user again. The caller
+ // must call this function only once per run (because it returns always the
+ // same thing on a given run).
+ bool ShouldReportAgain();
+
+ // Reports some data when we filled up the cache.
+ void FirstEviction();
+
+ // Called when an interesting event should be logged (counted).
+ void OnEvent(Stats::Counters an_event);
+
+ // Keeps track of payload access (doesn't include metadata).
+ void OnRead(int bytes);
+ void OnWrite(int bytes);
+
+ // Timer callback to calculate usage statistics.
+ void OnStatsTimer();
+
+ // Sets internal parameters to enable unit testing mode.
+ void SetUnitTestMode();
+
+ // Sets internal parameters to enable upgrade mode (for internal tools).
+ void SetUpgradeMode();
+
+ // Sets the eviction algorithm to version 2.
+ void SetNewEviction();
+
+ // Sets an explicit set of BackendFlags.
+ void SetFlags(uint32 flags);
+
+ // Sends a dummy operation through the operation queue, for unit tests.
+ int FlushQueueForTest(const CompletionCallback& callback);
+
+ // Trims an entry (all if |empty| is true) from the list of deleted
+ // entries. This method should be called directly on the cache thread.
+ void TrimForTest(bool empty);
+
+ // Trims an entry (all if |empty| is true) from the list of deleted
+ // entries. This method should be called directly on the cache thread.
+ void TrimDeletedListForTest(bool empty);
+
+ // Performs a simple self-check, and returns the number of dirty items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Backend implementation.
+ virtual net::CacheType GetCacheType() const OVERRIDE;
+ virtual int32 GetEntryCount() const OVERRIDE;
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual void EndEnumeration(void** iter) OVERRIDE;
+ virtual void GetStats(StatsItems* stats) OVERRIDE;
+ virtual void OnExternalCacheHit(const std::string& key) OVERRIDE;
+
+ private:
+ typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap;
+
+ void AdjustMaxCacheSize(int table_len);
+
+ bool InitStats();
+ void StoreStats();
+
+ // Deletes the cache and starts again.
+ void RestartCache(bool failure);
+ void PrepareForRestart();
+
+ void CleanupCache();
+
+ // Creates a new entry object. Returns zero on success, or a disk_cache error
+ // on failure.
+ int NewEntry(Addr address, EntryImpl** entry);
+
+ // Opens the next or previous entry on a cache iteration.
+ EntryImpl* OpenFollowingEntry(bool forward, void** iter);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ // Update the number of referenced cache entries.
+ void IncreaseNumRefs();
+ void DecreaseNumRefs();
+ void IncreaseNumEntries();
+ void DecreaseNumEntries();
+
+ // Dumps current cache statistics to the log.
+ void LogStats();
+
+ // Send UMA stats.
+ void ReportStats();
+
+ // Reports an uncommon, recoverable error.
+ void ReportError(int error);
+
+ // Performs basic checks on the index file. Returns false on failure.
+ bool CheckIndex();
+
+ // Part of the self test. Returns the number or dirty entries, or an error.
+ int CheckAllEntries();
+
+ // Part of the self test. Returns false if the entry is corrupt.
+ bool CheckEntry(EntryImpl* cache_entry);
+
+ // Returns the maximum total memory for the memory buffers.
+ int MaxBuffersSize();
+
+ scoped_refptr<MappedFile> index_; // The main cache index.
+ base::FilePath path_; // Path to the folder used as backing storage.
+ BlockFiles block_files_; // Set of files used to store all data.
+ int32 max_size_; // Maximum data size for this instance.
+ Eviction eviction_; // Handler of the eviction algorithm.
+ EntriesMap open_entries_; // Map of open entries.
+ int num_refs_; // Number of referenced cache entries.
+ int max_refs_; // Max number of referenced cache entries.
+ int entry_count_; // Number of entries accessed lately.
+ int byte_count_; // Number of bytes read/written lately.
+ int buffer_bytes_; // Total size of the temporary entries' buffers.
+ int up_ticks_; // The number of timer ticks received (OnStatsTimer).
+ net::CacheType cache_type_;
+ int uma_report_; // Controls transmission of UMA data.
+ uint32 user_flags_; // Flags set by the user.
+ bool init_; // controls the initialization of the system.
+ bool restarted_;
+ bool unit_test_;
+ bool read_only_; // Prevents updates of the rankings data (used by tools).
+ bool disabled_;
+ bool new_eviction_; // What eviction algorithm should be used.
+ bool first_timer_; // True if the timer has not been called.
+ bool user_load_; // True if we see a high load coming from the caller.
+
+ net::NetLog* net_log_;
+
+ Stats stats_; // Usage statistics.
+ scoped_ptr<base::RepeatingTimer<BackendImpl> > timer_; // Usage timer.
+ scoped_refptr<TraceObject> trace_object_; // Initializes internal tracing.
+ base::WeakPtrFactory<BackendImpl> ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackendImpl);
+};
+
+// Returns the preferred max cache size given the available disk space.
+NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BACKEND_IMPL_H_
diff --git a/chromium/net/disk_cache/v3/backend_worker.cc b/chromium/net/disk_cache/v3/backend_worker.cc
new file mode 100644
index 00000000000..cbccfddb5c6
--- /dev/null
+++ b/chromium/net/disk_cache/v3/backend_worker.cc
@@ -0,0 +1,485 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/backend_impl.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/hash.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/stats_counters.h"
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/file.h"
+
+// This has to be defined before including histogram_macros.h from this file.
+#define NET_DISK_CACHE_BACKEND_IMPL_CC_
+#include "net/disk_cache/histogram_macros.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+const char* kIndexName = "index";
+
+// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
+// Note that the actual target is to keep the index table load factor under 55%
+// for most users.
+const int k64kEntriesStore = 240 * 1000 * 1000;
+const int kBaseTableLen = 64 * 1024;
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
+// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
+const int kTrimDelay = 10;
+
+int DesiredIndexTableLen(int32 storage_size) {
+ if (storage_size <= k64kEntriesStore)
+ return kBaseTableLen;
+ if (storage_size <= k64kEntriesStore * 2)
+ return kBaseTableLen * 2;
+ if (storage_size <= k64kEntriesStore * 4)
+ return kBaseTableLen * 4;
+ if (storage_size <= k64kEntriesStore * 8)
+ return kBaseTableLen * 8;
+
+ // The biggest storage_size for int32 requires a 4 MB table.
+ return kBaseTableLen * 16;
+}
+
+int MaxStorageSizeForTable(int table_len) {
+ return table_len * (k64kEntriesStore / kBaseTableLen);
+}
+
+size_t GetIndexSize(int table_len) {
+ size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
+ return sizeof(disk_cache::IndexHeader) + table_size;
+}
+
+// ------------------------------------------------------------------------
+
+// Sets group for the current experiment. Returns false if the files should be
+// discarded.
+bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
+ if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
+ header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
+ // Discard current cache.
+ return false;
+ }
+
+ if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
+ "ExperimentControl") {
+ if (cache_created) {
+ header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
+ return true;
+ } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) {
+ return false;
+ }
+ }
+
+ header->experiment = disk_cache::NO_EXPERIMENT;
+ return true;
+}
+
+} // namespace
+
+// ------------------------------------------------------------------------
+
+namespace disk_cache {
+
+BackendImpl::BackendImpl(const base::FilePath& path,
+ base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log)
+ : background_queue_(this, cache_thread),
+ path_(path),
+ block_files_(path),
+ mask_(0),
+ max_size_(0),
+ up_ticks_(0),
+ cache_type_(net::DISK_CACHE),
+ uma_report_(0),
+ user_flags_(0),
+ init_(false),
+ restarted_(false),
+ unit_test_(false),
+ read_only_(false),
+ disabled_(false),
+ new_eviction_(false),
+ first_timer_(true),
+ user_load_(false),
+ net_log_(net_log),
+ done_(true, false),
+ ptr_factory_(this) {
+}
+
+int BackendImpl::SyncInit() {
+#if defined(NET_BUILD_STRESS_CACHE)
+ // Start evictions right away.
+ up_ticks_ = kTrimDelay * 2;
+#endif
+ DCHECK(!init_);
+ if (init_)
+ return net::ERR_FAILED;
+
+ bool create_files = false;
+ if (!InitBackingStore(&create_files)) {
+ ReportError(ERR_STORAGE_ERROR);
+ return net::ERR_FAILED;
+ }
+
+ num_refs_ = num_pending_io_ = max_refs_ = 0;
+ entry_count_ = byte_count_ = 0;
+
+ if (!restarted_) {
+ buffer_bytes_ = 0;
+ trace_object_ = TraceObject::GetTraceObject();
+ // Create a recurrent timer of 30 secs.
+ int timer_delay = unit_test_ ? 1000 : 30000;
+ timer_.reset(new base::RepeatingTimer<BackendImpl>());
+ timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
+ &BackendImpl::OnStatsTimer);
+ }
+
+ init_ = true;
+ Trace("Init");
+
+ if (data_->header.experiment != NO_EXPERIMENT &&
+ cache_type_ != net::DISK_CACHE) {
+ // No experiment for other caches.
+ return net::ERR_FAILED;
+ }
+
+ if (!(user_flags_ & kNoRandom)) {
+ // The unit test controls directly what to test.
+ new_eviction_ = (cache_type_ == net::DISK_CACHE);
+ }
+
+ if (!CheckIndex()) {
+ ReportError(ERR_INIT_FAILED);
+ return net::ERR_FAILED;
+ }
+
+ if (!restarted_ && (create_files || !data_->header.num_entries))
+ ReportError(ERR_CACHE_CREATED);
+
+ if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
+ !InitExperiment(&data_->header, create_files)) {
+ return net::ERR_FAILED;
+ }
+
+ // We don't care if the value overflows. The only thing we care about is that
+ // the id cannot be zero, because that value is used as "not dirty".
+ // Increasing the value once per second gives us many years before we start
+ // having collisions.
+ data_->header.this_id++;
+ if (!data_->header.this_id)
+ data_->header.this_id++;
+
+ bool previous_crash = (data_->header.crash != 0);
+ data_->header.crash = 1;
+
+ if (!block_files_.Init(create_files))
+ return net::ERR_FAILED;
+
+ // We want to minimize the changes to cache for an AppCache.
+ if (cache_type() == net::APP_CACHE) {
+ DCHECK(!new_eviction_);
+ read_only_ = true;
+ } else if (cache_type() == net::SHADER_CACHE) {
+ DCHECK(!new_eviction_);
+ }
+
+ eviction_.Init(this);
+
+ // stats_ and rankings_ may end up calling back to us so we better be enabled.
+ disabled_ = false;
+ if (!InitStats())
+ return net::ERR_FAILED;
+
+ disabled_ = !rankings_.Init(this, new_eviction_);
+
+#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
+ trace_object_->EnableTracing(false);
+ int sc = SelfCheck();
+ if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
+ NOTREACHED();
+ trace_object_->EnableTracing(true);
+#endif
+
+ if (previous_crash) {
+ ReportError(ERR_PREVIOUS_CRASH);
+ } else if (!restarted_) {
+ ReportError(ERR_NO_ERROR);
+ }
+
+ FlushIndex();
+
+ return disabled_ ? net::ERR_FAILED : net::OK;
+}
+
+void BackendImpl::PrepareForRestart() {
+ // Reset the mask_ if it was not given by the user.
+ if (!(user_flags_ & kMask))
+ mask_ = 0;
+
+ if (!(user_flags_ & kNewEviction))
+ new_eviction_ = false;
+
+ disabled_ = true;
+ data_->header.crash = 0;
+ index_->Flush();
+ index_ = NULL;
+ data_ = NULL;
+ block_files_.CloseFiles();
+ rankings_.Reset();
+ init_ = false;
+ restarted_ = true;
+}
+
+BackendImpl::~BackendImpl() {
+ if (user_flags_ & kNoRandom) {
+ // This is a unit test, so we want to be strict about not leaking entries
+ // and completing all the work.
+ background_queue_.WaitForPendingIO();
+ } else {
+ // This is most likely not a test, so we want to do as little work as
+ // possible at this time, at the price of leaving dirty entries behind.
+ background_queue_.DropPendingIO();
+ }
+
+ if (background_queue_.BackgroundIsCurrentThread()) {
+ // Unit tests may use the same thread for everything.
+ CleanupCache();
+ } else {
+ background_queue_.background_thread()->PostTask(
+ FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
+ // http://crbug.com/74623
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ done_.Wait();
+ }
+}
+
+void BackendImpl::CleanupCache() {
+ Trace("Backend Cleanup");
+ eviction_.Stop();
+ timer_.reset();
+
+ if (init_) {
+ StoreStats();
+ if (data_)
+ data_->header.crash = 0;
+
+ if (user_flags_ & kNoRandom) {
+ // This is a net_unittest, verify that we are not 'leaking' entries.
+ File::WaitForPendingIO(&num_pending_io_);
+ DCHECK(!num_refs_);
+ } else {
+ File::DropPendingIO();
+ }
+ }
+ block_files_.CloseFiles();
+ FlushIndex();
+ index_ = NULL;
+ ptr_factory_.InvalidateWeakPtrs();
+ done_.Signal();
+}
+
+base::FilePath BackendImpl::GetFileName(Addr address) const {
+ if (!address.is_separate_file() || !address.is_initialized()) {
+ NOTREACHED();
+ return base::FilePath();
+ }
+
+ std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
+ return path_.AppendASCII(tmp);
+}
+
+// We just created a new file so we're going to write the header and set the
+// file length to include the hash table (zero filled).
+bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
+ AdjustMaxCacheSize(0);
+
+ IndexHeader header;
+ header.table_len = DesiredIndexTableLen(max_size_);
+
+ // We need file version 2.1 for the new eviction algorithm.
+ if (new_eviction_)
+ header.version = 0x20001;
+
+ header.create_time = Time::Now().ToInternalValue();
+
+ if (!file->Write(&header, sizeof(header), 0))
+ return false;
+
+ return file->SetLength(GetIndexSize(header.table_len));
+}
+
+bool BackendImpl::InitBackingStore(bool* file_created) {
+ if (!file_util::CreateDirectory(path_))
+ return false;
+
+ base::FilePath index_name = path_.AppendASCII(kIndexName);
+
+ int flags = base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_OPEN_ALWAYS |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(index_name, flags, file_created, NULL)));
+
+ if (!file->IsValid())
+ return false;
+
+ bool ret = true;
+ if (*file_created)
+ ret = CreateBackingStore(file.get());
+
+ file = NULL;
+ if (!ret)
+ return false;
+
+ index_ = new MappedFile();
+ data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
+ if (!data_) {
+ LOG(ERROR) << "Unable to map Index file";
+ return false;
+ }
+
+ if (index_->GetLength() < sizeof(Index)) {
+ // We verify this again on CheckIndex() but it's easier to make sure now
+ // that the header is there.
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ return true;
+}
+
+void BackendImpl::ReportError(int error) {
+ STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
+ error == ERR_CACHE_CREATED);
+
+ // We transmit positive numbers, instead of direct error codes.
+ DCHECK_LE(error, 0);
+ CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
+}
+
+
+bool BackendImpl::CheckIndex() {
+ DCHECK(data_);
+
+ size_t current_size = index_->GetLength();
+ if (current_size < sizeof(Index)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ if (new_eviction_) {
+ // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion >> 16 != data_->header.version >> 16) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ if (kCurrentVersion == data_->header.version) {
+ // We need file version 2.1 for the new eviction algorithm.
+ UpgradeTo2_1();
+ }
+ } else {
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion != data_->header.version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ }
+
+ if (!data_->header.table_len) {
+ LOG(ERROR) << "Invalid table size";
+ return false;
+ }
+
+ if (current_size < GetIndexSize(data_->header.table_len) ||
+ data_->header.table_len & (kBaseTableLen - 1)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ AdjustMaxCacheSize(data_->header.table_len);
+
+#if !defined(NET_BUILD_STRESS_CACHE)
+ if (data_->header.num_bytes < 0 ||
+ (max_size_ < kint32max - kDefaultCacheSize &&
+ data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
+ LOG(ERROR) << "Invalid cache (current) size";
+ return false;
+ }
+#endif
+
+ if (data_->header.num_entries < 0) {
+ LOG(ERROR) << "Invalid number of entries";
+ return false;
+ }
+
+ if (!mask_)
+ mask_ = data_->header.table_len - 1;
+
+ // Load the table into memory with a single read.
+ scoped_ptr<char[]> buf(new char[current_size]);
+ return index_->Read(buf.get(), current_size, 0);
+}
+
+bool BackendImpl::InitStats() {
+ Addr address(data_->header.stats);
+ int size = stats_.StorageSize();
+
+ if (!address.is_initialized()) {
+ FileType file_type = Addr::RequiredFileType(size);
+ DCHECK_NE(file_type, EXTERNAL);
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!CreateBlock(file_type, num_blocks, &address))
+ return false;
+ return stats_.Init(NULL, 0, address);
+ }
+
+ if (!address.is_block_file()) {
+ NOTREACHED();
+ return false;
+ }
+
+ // Load the required data.
+ size = address.num_blocks() * address.BlockSize();
+ MappedFile* file = File(address);
+ if (!file)
+ return false;
+
+ scoped_ptr<char[]> data(new char[size]);
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (!file->Read(data.get(), size, offset))
+ return false;
+
+ if (!stats_.Init(data.get(), size, address))
+ return false;
+ if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
+ stats_.InitSizeHistogram();
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/backend_worker.h b/chromium/net/disk_cache/v3/backend_worker.h
new file mode 100644
index 00000000000..42fd4b232f8
--- /dev/null
+++ b/chromium/net/disk_cache/v3/backend_worker.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_BACKEND_IMPL_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/timer/timer.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/eviction.h"
+#include "net/disk_cache/in_flight_backend_io.h"
+#include "net/disk_cache/rankings.h"
+#include "net/disk_cache/stats.h"
+#include "net/disk_cache/stress_support.h"
+#include "net/disk_cache/trace.h"
+
+namespace disk_cache {
+
+// This class implements the Backend interface. An object of this
+// class handles the operations of the cache for a particular profile.
+class NET_EXPORT_PRIVATE BackendImpl : public Backend {
+ friend class Eviction;
+ public:
+ BackendImpl(const base::FilePath& path, base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log);
+
+ // Performs general initialization for this current instance of the cache.
+ int Init(const CompletionCallback& callback);
+
+ private:
+ void CleanupCache();
+
+ // Returns the full name for an external storage file.
+ base::FilePath GetFileName(Addr address) const;
+
+ // Creates a new backing file for the cache index.
+ bool CreateBackingStore(disk_cache::File* file);
+ bool InitBackingStore(bool* file_created);
+
+ // Reports an uncommon, recoverable error.
+ void ReportError(int error);
+
+ // Performs basic checks on the index file. Returns false on failure.
+ bool CheckIndex();
+
+ base::FilePath path_; // Path to the folder used as backing storage.
+ BlockFiles block_files_; // Set of files used to store all data.
+ bool init_; // controls the initialization of the system.
+
+ DISALLOW_COPY_AND_ASSIGN(BackendImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BACKEND_IMPL_H_
diff --git a/chromium/net/disk_cache/v3/block_bitmaps.cc b/chromium/net/disk_cache/v3/block_bitmaps.cc
new file mode 100644
index 00000000000..0d0317b39dc
--- /dev/null
+++ b/chromium/net/disk_cache/v3/block_bitmaps.cc
@@ -0,0 +1,332 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/block_files.h"
+
+#include "base/atomicops.h"
+#include "base/file_util.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/file_lock.h"
+#include "net/disk_cache/trace.h"
+
+using base::TimeTicks;
+
+namespace disk_cache {
+
+BlockFiles::BlockFiles(const base::FilePath& path)
+ : init_(false), zero_buffer_(NULL), path_(path) {
+}
+
+BlockFiles::~BlockFiles() {
+ if (zero_buffer_)
+ delete[] zero_buffer_;
+ CloseFiles();
+}
+
+bool BlockFiles::Init(bool create_files) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ thread_checker_.reset(new base::ThreadChecker);
+
+ block_files_.resize(kFirstAdditionalBlockFile);
+ for (int i = 0; i < kFirstAdditionalBlockFile; i++) {
+ if (create_files)
+ if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true))
+ return false;
+
+ if (!OpenBlockFile(i))
+ return false;
+
+ // Walk this chain of files removing empty ones.
+ if (!RemoveEmptyFile(static_cast<FileType>(i + 1)))
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+bool BlockFiles::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ if (block_type < RANKINGS || block_type > BLOCK_4K ||
+ block_count < 1 || block_count > 4)
+ return false;
+ if (!init_)
+ return false;
+
+ MappedFile* file = FileForNewBlock(block_type, block_count);
+ if (!file)
+ return false;
+
+ ScopedFlush flush(file);
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ int target_size = 0;
+ for (int i = block_count; i <= 4; i++) {
+ if (header->empty[i - 1]) {
+ target_size = i;
+ break;
+ }
+ }
+
+ DCHECK(target_size);
+ int index;
+ if (!CreateMapBlock(target_size, block_count, header, &index))
+ return false;
+
+ Addr address(block_type, block_count, header->this_file, index);
+ block_address->set_value(address.value());
+ Trace("CreateBlock 0x%x", address.value());
+ return true;
+}
+
+void BlockFiles::DeleteBlock(Addr address, bool deep) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ if (!address.is_initialized() || address.is_separate_file())
+ return;
+
+ if (!zero_buffer_) {
+ zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4];
+ memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4);
+ }
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return;
+
+ Trace("DeleteBlock 0x%x", address.value());
+
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (deep)
+ file->Write(zero_buffer_, size, offset);
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ DeleteMapBlock(address.start_block(), address.num_blocks(), header);
+ file->Flush();
+
+ if (!header->num_entries) {
+ // This file is now empty. Let's try to delete it.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size)
+ type = RANKINGS;
+ RemoveEmptyFile(type); // Ignore failures.
+ }
+}
+
+void BlockFiles::CloseFiles() {
+ if (init_) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ }
+ init_ = false;
+ for (unsigned int i = 0; i < block_files_.size(); i++) {
+ if (block_files_[i]) {
+ block_files_[i]->Release();
+ block_files_[i] = NULL;
+ }
+ }
+ block_files_.clear();
+}
+
+void BlockFiles::ReportStats() {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ int used_blocks[kFirstAdditionalBlockFile];
+ int load[kFirstAdditionalBlockFile];
+ for (int i = 0; i < kFirstAdditionalBlockFile; i++) {
+ GetFileStats(i, &used_blocks[i], &load[i]);
+ }
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_0", used_blocks[0]);
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_1", used_blocks[1]);
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_2", used_blocks[2]);
+ UMA_HISTOGRAM_COUNTS("DiskCache.Blocks_3", used_blocks[3]);
+
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_0", load[0], 101);
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_1", load[1], 101);
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_2", load[2], 101);
+ UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_3", load[3], 101);
+}
+
+bool BlockFiles::IsValid(Addr address) {
+#ifdef NDEBUG
+ return true;
+#else
+ if (!address.is_initialized() || address.is_separate_file())
+ return false;
+
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return false;
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ bool rv = UsedMapBlock(address.start_block(), address.num_blocks(), header);
+ DCHECK(rv);
+
+ static bool read_contents = false;
+ if (read_contents) {
+ scoped_ptr<char[]> buffer;
+ buffer.reset(new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]);
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ bool ok = file->Read(buffer.get(), size, offset);
+ DCHECK(ok);
+ }
+
+ return rv;
+#endif
+}
+
+MappedFile* BlockFiles::GetFile(Addr address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ DCHECK(block_files_.size() >= 4);
+ DCHECK(address.is_block_file() || !address.is_initialized());
+ if (!address.is_initialized())
+ return NULL;
+
+ int file_index = address.FileNumber();
+ if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
+ !block_files_[file_index]) {
+ // We need to open the file
+ if (!OpenBlockFile(file_index))
+ return NULL;
+ }
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ return block_files_[file_index];
+}
+
+bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) {
+ if (kMaxBlocks == header->max_entries)
+ return false;
+
+ ScopedFlush flush(file);
+ DCHECK(!header->empty[3]);
+ int new_size = header->max_entries + 1024;
+ if (new_size > kMaxBlocks)
+ new_size = kMaxBlocks;
+
+ int new_size_bytes = new_size * header->entry_size + sizeof(*header);
+
+ if (!file->SetLength(new_size_bytes)) {
+ // Most likely we are trying to truncate the file, so the header is wrong.
+ if (header->updating < 10 && !FixBlockFileHeader(file)) {
+ // If we can't fix the file increase the lock guard so we'll pick it on
+ // the next start and replace it.
+ header->updating = 100;
+ return false;
+ }
+ return (header->max_entries >= new_size);
+ }
+
+ FileLock lock(header);
+ header->empty[3] = (new_size - header->max_entries) / 4; // 4 blocks entries
+ header->max_entries = new_size;
+
+ return true;
+}
+
+MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) {
+ COMPILE_ASSERT(RANKINGS == 1, invalid_file_type);
+ MappedFile* file = block_files_[block_type - 1];
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ TimeTicks start = TimeTicks::Now();
+ while (NeedToGrowBlockFile(header, block_count)) {
+ if (kMaxBlocks == header->max_entries) {
+ file = NextFile(file);
+ if (!file)
+ return NULL;
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ continue;
+ }
+
+ if (!GrowBlockFile(file, header))
+ return NULL;
+ break;
+ }
+ HISTOGRAM_TIMES("DiskCache.GetFileForNewBlock", TimeTicks::Now() - start);
+ return file;
+}
+
+// Note that we expect to be called outside of a FileLock... however, we cannot
+// DCHECK on header->updating because we may be fixing a crash.
+bool BlockFiles::FixBlockFileHeader(MappedFile* file) {
+ ScopedFlush flush(file);
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ int file_size = static_cast<int>(file->GetLength());
+ if (file_size < static_cast<int>(sizeof(*header)))
+ return false; // file_size > 2GB is also an error.
+
+ const int kMinBlockSize = 36;
+ const int kMaxBlockSize = 4096;
+ if (header->entry_size < kMinBlockSize ||
+ header->entry_size > kMaxBlockSize || header->num_entries < 0)
+ return false;
+
+ // Make sure that we survive crashes.
+ header->updating = 1;
+ int expected = header->entry_size * header->max_entries + sizeof(*header);
+ if (file_size != expected) {
+ int max_expected = header->entry_size * kMaxBlocks + sizeof(*header);
+ if (file_size < expected || header->empty[3] || file_size > max_expected) {
+ NOTREACHED();
+ LOG(ERROR) << "Unexpected file size";
+ return false;
+ }
+ // We were in the middle of growing the file.
+ int num_entries = (file_size - sizeof(*header)) / header->entry_size;
+ header->max_entries = num_entries;
+ }
+
+ FixAllocationCounters(header);
+ int empty_blocks = EmptyBlocks(header);
+ if (empty_blocks + header->num_entries > header->max_entries)
+ header->num_entries = header->max_entries - empty_blocks;
+
+ if (!ValidateCounters(header))
+ return false;
+
+ header->updating = 0;
+ return true;
+}
+
+// We are interested in the total number of blocks used by this file type, and
+// the max number of blocks that we can store (reported as the percentage of
+// used blocks). In order to find out the number of used blocks, we have to
+// substract the empty blocks from the total blocks for each file in the chain.
+void BlockFiles::GetFileStats(int index, int* used_count, int* load) {
+ int max_blocks = 0;
+ *used_count = 0;
+ *load = 0;
+ for (;;) {
+ if (!block_files_[index] && !OpenBlockFile(index))
+ return;
+
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(block_files_[index]->buffer());
+
+ max_blocks += header->max_entries;
+ int used = header->max_entries;
+ for (int i = 0; i < 4; i++) {
+ used -= header->empty[i] * (i + 1);
+ DCHECK_GE(used, 0);
+ }
+ *used_count += used;
+
+ if (!header->next_file)
+ break;
+ index = header->next_file;
+ }
+ if (max_blocks)
+ *load = *used_count * 100 / max_blocks;
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/block_bitmaps.h b/chromium/net/disk_cache/v3/block_bitmaps.h
new file mode 100644
index 00000000000..eaf87609912
--- /dev/null
+++ b/chromium/net/disk_cache/v3/block_bitmaps.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_BLOCK_FILES_H_
+#define NET_DISK_CACHE_BLOCK_FILES_H_
+
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_export.h"
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+
+namespace disk_cache {
+
+// This class handles the set of block-files open by the disk cache.
+class NET_EXPORT_PRIVATE BlockFiles {
+ public:
+ explicit BlockFiles(const base::FilePath& path);
+ ~BlockFiles();
+
+ // Performs the object initialization. create_files indicates if the backing
+ // files should be created or just open.
+ bool Init(bool create_files);
+
+ // Creates a new entry on a block file. block_type indicates the size of block
+ // to be used (as defined on cache_addr.h), block_count is the number of
+ // blocks to allocate, and block_address is the address of the new entry.
+ bool CreateBlock(FileType block_type, int block_count, Addr* block_address);
+
+ // Removes an entry from the block files. If deep is true, the storage is zero
+ // filled; otherwise the entry is removed but the data is not altered (must be
+ // already zeroed).
+ void DeleteBlock(Addr address, bool deep);
+
+ // Close all the files and set the internal state to be initializad again. The
+ // cache is being purged.
+ void CloseFiles();
+
+ // Sends UMA stats.
+ void ReportStats();
+
+ // Returns true if the blocks pointed by a given address are currently used.
+ // This method is only intended for debugging.
+ bool IsValid(Addr address);
+
+ private:
+ // Returns the file that stores a given address.
+ MappedFile* GetFile(Addr address);
+
+ // Attemp to grow this file. Fails if the file cannot be extended anymore.
+ bool GrowBlockFile(MappedFile* file, BlockFileHeader* header);
+
+ // Returns the appropriate file to use for a new block.
+ MappedFile* FileForNewBlock(FileType block_type, int block_count);
+
+ // Restores the header of a potentially inconsistent file.
+ bool FixBlockFileHeader(MappedFile* file);
+
+ // Retrieves stats for the given file index.
+ void GetFileStats(int index, int* used_count, int* load);
+
+ bool init_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockFiles);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BLOCK_FILES_H_
diff --git a/chromium/net/disk_cache/v3/block_bitmaps_unittest.cc b/chromium/net/disk_cache/v3/block_bitmaps_unittest.cc
new file mode 100644
index 00000000000..fa7c5dbb742
--- /dev/null
+++ b/chromium/net/disk_cache/v3/block_bitmaps_unittest.cc
@@ -0,0 +1,350 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "base/files/file_enumerator.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+namespace {
+
+// Returns the number of files in this folder.
+int NumberOfFiles(const base::FilePath& path) {
+ base::FileEnumerator iter(path, false, base::FileEnumerator::FILES);
+ int count = 0;
+ for (base::FilePath file = iter.Next(); !file.value().empty();
+ file = iter.Next()) {
+ count++;
+ }
+ return count;
+}
+
+} // namespace;
+
+namespace disk_cache {
+
+TEST_F(DiskCacheTest, BlockFiles_Grow) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kMaxSize = 35000;
+ Addr address[kMaxSize];
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kMaxSize; i++) {
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[i]));
+ }
+ EXPECT_EQ(6, NumberOfFiles(cache_path_));
+
+ // Make sure we don't keep adding files.
+ for (int i = 0; i < kMaxSize * 4; i += 2) {
+ int target = i % kMaxSize;
+ files.DeleteBlock(address[target], false);
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[target]));
+ }
+ EXPECT_EQ(6, NumberOfFiles(cache_path_));
+}
+
+// We should be able to delete empty block files.
+TEST_F(DiskCacheTest, BlockFiles_Shrink) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kMaxSize = 35000;
+ Addr address[kMaxSize];
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kMaxSize; i++) {
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[i]));
+ }
+
+ // Now delete all the blocks, so that we can delete the two extra files.
+ for (int i = 0; i < kMaxSize; i++) {
+ files.DeleteBlock(address[i], false);
+ }
+ EXPECT_EQ(4, NumberOfFiles(cache_path_));
+}
+
+// Handling of block files not properly closed.
+TEST_F(DiskCacheTest, BlockFiles_Recover) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kNumEntries = 2000;
+ CacheAddr entries[kNumEntries];
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ for (int i = 0; i < kNumEntries; i++) {
+ Addr address(0);
+ int size = (rand() % 4) + 1;
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, size, &address));
+ entries[i] = address.value();
+ }
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ CacheAddr temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ Addr address(entries[i]);
+ files.DeleteBlock(address, false);
+ }
+
+ // At this point, there are kNumEntries / 2 entries on the file, randomly
+ // distributed both on location and size.
+
+ Addr address(entries[kNumEntries / 2]);
+ MappedFile* file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ int max_entries = header->max_entries;
+ int empty_1 = header->empty[0];
+ int empty_2 = header->empty[1];
+ int empty_3 = header->empty[2];
+ int empty_4 = header->empty[3];
+
+ // Corrupt the file.
+ header->max_entries = header->empty[0] = 0;
+ header->empty[1] = header->empty[2] = header->empty[3] = 0;
+ header->updating = -1;
+
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+
+ // The file must have been fixed.
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ EXPECT_EQ(max_entries, header->max_entries);
+ EXPECT_EQ(empty_1, header->empty[0]);
+ EXPECT_EQ(empty_2, header->empty[1]);
+ EXPECT_EQ(empty_3, header->empty[2]);
+ EXPECT_EQ(empty_4, header->empty[3]);
+}
+
+// Handling of truncated files.
+TEST_F(DiskCacheTest, BlockFiles_ZeroSizeFile) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ base::FilePath filename = files.Name(0);
+ files.CloseFiles();
+ // Truncate one of the files.
+ {
+ scoped_refptr<File> file(new File);
+ ASSERT_TRUE(file->Init(filename));
+ EXPECT_TRUE(file->SetLength(0));
+ }
+
+ // Initializing should fail, not crash.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// Handling of truncated files (non empty).
+TEST_F(DiskCacheTest, BlockFiles_TruncatedFile) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+ Addr address;
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 2, &address));
+
+ base::FilePath filename = files.Name(0);
+ files.CloseFiles();
+ // Truncate one of the files.
+ {
+ scoped_refptr<File> file(new File);
+ ASSERT_TRUE(file->Init(filename));
+ EXPECT_TRUE(file->SetLength(15000));
+ }
+
+ // Initializing should fail, not crash.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// Tests detection of out of sync counters.
+TEST_F(DiskCacheTest, BlockFiles_Counters) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ // Create a block of size 2.
+ Addr address(0);
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 2, &address));
+
+ MappedFile* file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+ ASSERT_EQ(0, header->updating);
+
+ // Alter the counters so that the free space doesn't add up.
+ header->empty[2] = 50; // 50 free blocks of size 3.
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ // The file must have been fixed.
+ ASSERT_EQ(0, header->empty[2]);
+
+ // Change the number of entries.
+ header->num_entries = 3;
+ header->updating = 1;
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ // The file must have been "fixed".
+ ASSERT_EQ(2, header->num_entries);
+
+ // Change the number of entries.
+ header->num_entries = -1;
+ header->updating = 1;
+ files.CloseFiles();
+
+ // Detect the error.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// An invalid file can be detected after init.
+TEST_F(DiskCacheTest, BlockFiles_InvalidFile) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ // Let's access block 10 of file 5. (There is no file).
+ Addr addr(BLOCK_256, 1, 5, 10);
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+
+ // Let's create an invalid file.
+ base::FilePath filename(files.Name(5));
+ char header[kBlockHeaderSize];
+ memset(header, 'a', kBlockHeaderSize);
+ EXPECT_EQ(kBlockHeaderSize,
+ file_util::WriteFile(filename, header, kBlockHeaderSize));
+
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+
+ // The file should not have been changed (it is still invalid).
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+}
+
+// Tests that we generate the correct file stats.
+TEST_F(DiskCacheTest, BlockFiles_Stats) {
+ ASSERT_TRUE(CopyTestCache("remove_load1"));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(false));
+ int used, load;
+
+ files.GetFileStats(0, &used, &load);
+ EXPECT_EQ(101, used);
+ EXPECT_EQ(9, load);
+
+ files.GetFileStats(1, &used, &load);
+ EXPECT_EQ(203, used);
+ EXPECT_EQ(19, load);
+
+ files.GetFileStats(2, &used, &load);
+ EXPECT_EQ(0, used);
+ EXPECT_EQ(0, load);
+}
+
+// Tests that we add and remove blocks correctly.
+TEST_F(DiskCacheTest, AllocationMap) {
+ ASSERT_TRUE(CleanupCacheDir());
+ ASSERT_TRUE(file_util::CreateDirectory(cache_path_));
+
+ BlockFiles files(cache_path_);
+ ASSERT_TRUE(files.Init(true));
+
+ // Create a bunch of entries.
+ const int kSize = 100;
+ Addr address[kSize];
+ for (int i = 0; i < kSize; i++) {
+ SCOPED_TRACE(i);
+ int block_size = i % 4 + 1;
+ EXPECT_TRUE(files.CreateBlock(BLOCK_1K, block_size, &address[i]));
+ EXPECT_EQ(BLOCK_1K, address[i].file_type());
+ EXPECT_EQ(block_size, address[i].num_blocks());
+ int start = address[i].start_block();
+ EXPECT_EQ(start / 4, (start + block_size - 1) / 4);
+ }
+
+ for (int i = 0; i < kSize; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_TRUE(files.IsValid(address[i]));
+ }
+
+ // The first part of the allocation map should be completely filled. We used
+ // 10 bits per each four entries, so 250 bits total.
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(files.GetFile(address[0])->buffer());
+ uint8* buffer = reinterpret_cast<uint8*>(&header->allocation_map);
+ for (int i =0; i < 29; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(0xff, buffer[i]);
+ }
+
+ for (int i = 0; i < kSize; i++) {
+ SCOPED_TRACE(i);
+ files.DeleteBlock(address[i], false);
+ }
+
+ // The allocation map should be empty.
+ for (int i =0; i < 50; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(0, buffer[i]);
+ }
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/disk_format_v3.h b/chromium/net/disk_cache/v3/disk_format_v3.h
new file mode 100644
index 00000000000..56163770cfa
--- /dev/null
+++ b/chromium/net/disk_cache/v3/disk_format_v3.h
@@ -0,0 +1,190 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The cache is stored on disk as a collection of block-files, plus an index
+// plus a collection of external files.
+//
+// Any data blob bigger than kMaxBlockSize (disk_cache/addr.h) will be stored in
+// a separate file named f_xxx where x is a hexadecimal number. Shorter data
+// will be stored as a series of blocks on a block-file. In any case, CacheAddr
+// represents the address of the data inside the cache.
+//
+// The index is actually a collection of four files that store a hash table with
+// allocation bitmaps and backup data. Hash collisions are handled directly by
+// the table, which from some point of view behaves like a 4-way associative
+// cache with overflow buckets (so not really open addressing).
+//
+// Basically the hash table is a collection of buckets. The first part of the
+// table has a fixed number of buckets and it is directly addressed by the hash,
+// while the second part of the table (stored on a second file) has a variable
+// number of buckets. Each bucket stores up to four cells (each cell represents
+// a possibl entry). The index bitmap tracks the state of individual cells.
+//
+// The last element of the cache is the block-file. A block file is a file
+// designed to store blocks of data of a given size. For more details see
+// disk_cache/disk_format_base.h
+//
+// A new cache is initialized with a set of block files (named data_0 through
+// data_6), each one dedicated to store blocks of a given size or function. The
+// number at the end of the file name is the block file number (in decimal).
+//
+// There are three "special" types of blocks: normal entries, evicted entries
+// and control data for external files.
+//
+// The files that store internal information for the cache (blocks and index)
+// are memory mapped. They have a location that is signaled every time the
+// internal structures are modified, so it is possible to detect (most of the
+// time) when the process dies in the middle of an update. There are dedicated
+// backup files for cache bitmaps, used to detect entries out of date.
+
+#ifndef NET_DISK_CACHE_V3_DISK_FORMAT_V3_H_
+#define NET_DISK_CACHE_V3_DISK_FORMAT_V3_H_
+
+#include "base/basictypes.h"
+#include "net/disk_cache/disk_format_base.h"
+
+namespace disk_cache {
+
+const int kBaseTableLen = 0x10000;
+const uint32 kIndexMagicV3 = 0xC103CAC3;
+const uint32 kVersion3 = 0x30000; // Version 3.0.
+
+// Flags for a given cache.
+enum CacheFlags {
+ CACHE_EVICTION_2 = 1, // Keep multiple lists for eviction.
+ CACHE_EVICTED = 1 << 1 // Already evicted at least one entry.
+};
+
+// Header for the master index file.
+struct IndexHeaderV3 {
+ uint32 magic;
+ uint32 version;
+ int32 num_entries; // Number of entries currently stored.
+ int32 num_bytes; // Total size of the stored data.
+ int32 last_file; // Last external file created.
+ int32 reserved1;
+ CacheAddr stats; // Storage for usage data.
+ int32 table_len; // Actual size of the table.
+ int32 crash; // Signals a previous crash.
+ int32 experiment; // Id of an ongoing test.
+ int32 max_bytes; // Total maximum size of the stored data.
+ uint32 flags;
+ int32 used_cells;
+ int32 max_bucket;
+ uint64 create_time; // Creation time for this set of files.
+ uint64 base_time; // Current base for timestamps.
+ uint64 old_time; // Previous time used for timestamps.
+ int32 max_block_file;
+ int32 num_no_use_entries;
+ int32 num_low_use_entries;
+ int32 num_high_use_entries;
+ int32 reserved;
+ int32 num_evicted_entries;
+ int32 pad[6];
+};
+
+const int kBaseBitmapBytes = 3968;
+// The IndexBitmap is directly saved to a file named index. The file grows in
+// page increments (4096 bytes), but all bits don't have to be in use at any
+// given time. The required file size can be computed from header.table_len.
+struct IndexBitmap {
+ IndexHeaderV3 header;
+ uint32 bitmap[kBaseBitmapBytes / 4]; // First page of the bitmap.
+};
+COMPILE_ASSERT(sizeof(IndexBitmap) == 4096, bad_IndexHeader);
+
+// Possible states for a given entry.
+enum EntryState {
+ ENTRY_FREE = 0, // Available slot.
+ ENTRY_NEW, // The entry is being created.
+ ENTRY_OPEN, // The entry is being accessed.
+ ENTRY_MODIFIED, // The entry is being modified.
+ ENTRY_DELETED, // The entry is being deleted.
+ ENTRY_FIXING, // Inconsistent state. The entry is being verified.
+ ENTRY_USED // The slot is in use (entry is present).
+};
+COMPILE_ASSERT(ENTRY_USED <= 7, state_uses_3_bits);
+
+enum EntryGroup {
+ ENTRY_NO_USE = 0, // The entry has not been reused.
+ ENTRY_LOW_USE, // The entry has low reuse.
+ ENTRY_HIGH_USE, // The entry has high reuse.
+ ENTRY_RESERVED, // Reserved for future use.
+ ENTRY_EVICTED // The entry was deleted.
+};
+COMPILE_ASSERT(ENTRY_USED <= 7, group_uses_3_bits);
+
+#pragma pack(push, 1)
+struct IndexCell {
+ void Clear() { memset(this, 0, sizeof(*this)); }
+
+ uint64 address : 22;
+ uint64 hash : 18;
+ uint64 timestamp : 20;
+ uint64 reuse : 4;
+ uint8 state : 3;
+ uint8 group : 3;
+ uint8 sum : 2;
+};
+COMPILE_ASSERT(sizeof(IndexCell) == 9, bad_IndexCell);
+
+struct IndexBucket {
+ IndexCell cells[4];
+ int32 next;
+ uint32 hash : 24; // The last byte is only defined for buckets of
+ uint32 reserved : 8; // the extra table.
+};
+COMPILE_ASSERT(sizeof(IndexBucket) == 44, bad_IndexBucket);
+const int kBytesPerCell = 44 / 4;
+
+// The main cache index. Backed by a file named index_tb1.
+// The extra table (index_tb2) has a similar format, but different size.
+struct Index {
+ // Default size. Actual size controlled by header.table_len.
+ IndexBucket table[kBaseTableLen / 4];
+};
+#pragma pack(pop)
+
+// Flags that can be applied to an entry.
+enum EntryFlags {
+ PARENT_ENTRY = 1, // This entry has children (sparse) entries.
+ CHILD_ENTRY = 1 << 1 // Child entry that stores sparse data.
+};
+
+struct EntryRecord {
+ uint32 hash;
+ uint32 pad1;
+ uint8 reuse_count;
+ uint8 refetch_count;
+ int8 state; // Current EntryState.
+ uint8 flags; // Any combination of EntryFlags.
+ int32 key_len;
+ int32 data_size[4]; // We can store up to 4 data streams for each
+ CacheAddr data_addr[4]; // entry.
+ uint32 data_hash[4];
+ uint64 creation_time;
+ uint64 last_modified_time;
+ uint64 last_access_time;
+ int32 pad[3];
+ uint32 self_hash;
+};
+COMPILE_ASSERT(sizeof(EntryRecord) == 104, bad_EntryRecord);
+
+struct ShortEntryRecord {
+ uint32 hash;
+ uint32 pad1;
+ uint8 reuse_count;
+ uint8 refetch_count;
+ int8 state; // Current EntryState.
+ uint8 flags;
+ int32 key_len;
+ uint64 last_access_time;
+ uint32 long_hash[5];
+ uint32 self_hash;
+};
+COMPILE_ASSERT(sizeof(ShortEntryRecord) == 48, bad_ShortEntryRecord);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_V3_DISK_FORMAT_V3_H_
diff --git a/chromium/net/disk_cache/v3/entry_impl_v3.cc b/chromium/net/disk_cache/v3/entry_impl_v3.cc
new file mode 100644
index 00000000000..35b2e5644a7
--- /dev/null
+++ b/chromium/net/disk_cache/v3/entry_impl_v3.cc
@@ -0,0 +1,1395 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/entry_impl.h"
+
+#include "base/hash.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/net_log_parameters.h"
+#include "net/disk_cache/sparse_control.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+const int kMaxBufferSize = 1024 * 1024; // 1 MB.
+
+} // namespace
+
+namespace disk_cache {
+
+// This class handles individual memory buffers that store data before it is
+// sent to disk. The buffer can start at any offset, but if we try to write to
+// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
+// zero. The buffer grows up to a size determined by the backend, to keep the
+// total memory used under control.
+class EntryImpl::UserBuffer {
+ public:
+ explicit UserBuffer(BackendImpl* backend)
+ : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) {
+ buffer_.reserve(kMaxBlockSize);
+ }
+ ~UserBuffer() {
+ if (backend_)
+ backend_->BufferDeleted(capacity() - kMaxBlockSize);
+ }
+
+ // Returns true if we can handle writing |len| bytes to |offset|.
+ bool PreWrite(int offset, int len);
+
+ // Truncates the buffer to |offset| bytes.
+ void Truncate(int offset);
+
+ // Writes |len| bytes from |buf| at the given |offset|.
+ void Write(int offset, IOBuffer* buf, int len);
+
+ // Returns true if we can read |len| bytes from |offset|, given that the
+ // actual file has |eof| bytes stored. Note that the number of bytes to read
+ // may be modified by this method even though it returns false: that means we
+ // should do a smaller read from disk.
+ bool PreRead(int eof, int offset, int* len);
+
+ // Read |len| bytes from |buf| at the given |offset|.
+ int Read(int offset, IOBuffer* buf, int len);
+
+ // Prepare this buffer for reuse.
+ void Reset();
+
+ char* Data() { return buffer_.size() ? &buffer_[0] : NULL; }
+ int Size() { return static_cast<int>(buffer_.size()); }
+ int Start() { return offset_; }
+ int End() { return offset_ + Size(); }
+
+ private:
+ int capacity() { return static_cast<int>(buffer_.capacity()); }
+ bool GrowBuffer(int required, int limit);
+
+ base::WeakPtr<BackendImpl> backend_;
+ int offset_;
+ std::vector<char> buffer_;
+ bool grow_allowed_;
+ DISALLOW_COPY_AND_ASSIGN(UserBuffer);
+};
+
+bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(len, 0);
+ DCHECK_GE(offset + len, 0);
+
+ // We don't want to write before our current start.
+ if (offset < offset_)
+ return false;
+
+ // Lets get the common case out of the way.
+ if (offset + len <= capacity())
+ return true;
+
+ // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
+ // buffer offset_ at 0.
+ if (!Size() && offset > kMaxBlockSize)
+ return GrowBuffer(len, kMaxBufferSize);
+
+ int required = offset - offset_ + len;
+ return GrowBuffer(required, kMaxBufferSize * 6 / 5);
+}
+
+void EntryImpl::UserBuffer::Truncate(int offset) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(offset, offset_);
+ DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
+
+ offset -= offset_;
+ if (Size() >= offset)
+ buffer_.resize(offset);
+}
+
+void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(len, 0);
+ DCHECK_GE(offset + len, 0);
+ DCHECK_GE(offset, offset_);
+ DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
+
+ if (!Size() && offset > kMaxBlockSize)
+ offset_ = offset;
+
+ offset -= offset_;
+
+ if (offset > Size())
+ buffer_.resize(offset);
+
+ if (!len)
+ return;
+
+ char* buffer = buf->data();
+ int valid_len = Size() - offset;
+ int copy_len = std::min(valid_len, len);
+ if (copy_len) {
+ memcpy(&buffer_[offset], buffer, copy_len);
+ len -= copy_len;
+ buffer += copy_len;
+ }
+ if (!len)
+ return;
+
+ buffer_.insert(buffer_.end(), buffer, buffer + len);
+}
+
+bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GT(*len, 0);
+
+ if (offset < offset_) {
+ // We are reading before this buffer.
+ if (offset >= eof)
+ return true;
+
+ // If the read overlaps with the buffer, change its length so that there is
+ // no overlap.
+ *len = std::min(*len, offset_ - offset);
+ *len = std::min(*len, eof - offset);
+
+ // We should read from disk.
+ return false;
+ }
+
+ if (!Size())
+ return false;
+
+ // See if we can fulfill the first part of the operation.
+ return (offset - offset_ < Size());
+}
+
+int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GT(len, 0);
+ DCHECK(Size() || offset < offset_);
+
+ int clean_bytes = 0;
+ if (offset < offset_) {
+ // We don't have a file so lets fill the first part with 0.
+ clean_bytes = std::min(offset_ - offset, len);
+ memset(buf->data(), 0, clean_bytes);
+ if (len == clean_bytes)
+ return len;
+ offset = offset_;
+ len -= clean_bytes;
+ }
+
+ int start = offset - offset_;
+ int available = Size() - start;
+ DCHECK_GE(start, 0);
+ DCHECK_GE(available, 0);
+ len = std::min(len, available);
+ memcpy(buf->data() + clean_bytes, &buffer_[start], len);
+ return len + clean_bytes;
+}
+
+void EntryImpl::UserBuffer::Reset() {
+ if (!grow_allowed_) {
+ if (backend_)
+ backend_->BufferDeleted(capacity() - kMaxBlockSize);
+ grow_allowed_ = true;
+ std::vector<char> tmp;
+ buffer_.swap(tmp);
+ buffer_.reserve(kMaxBlockSize);
+ }
+ offset_ = 0;
+ buffer_.clear();
+}
+
+bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
+ DCHECK_GE(required, 0);
+ int current_size = capacity();
+ if (required <= current_size)
+ return true;
+
+ if (required > limit)
+ return false;
+
+ if (!backend_)
+ return false;
+
+ int to_add = std::max(required - current_size, kMaxBlockSize * 4);
+ to_add = std::max(current_size, to_add);
+ required = std::min(current_size + to_add, limit);
+
+ grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
+ if (!grow_allowed_)
+ return false;
+
+ DVLOG(3) << "Buffer grow to " << required;
+
+ buffer_.reserve(required);
+ return true;
+}
+
+// ------------------------------------------------------------------------
+
+EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
+ : entry_(NULL, Addr(0)), node_(NULL, Addr(0)),
+ backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only),
+ dirty_(false) {
+ entry_.LazyInit(backend->File(address), address);
+ for (int i = 0; i < kNumStreams; i++) {
+ unreported_size_[i] = 0;
+ }
+}
+
+bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
+ uint32 hash) {
+ Trace("Create entry In");
+ EntryStore* entry_store = entry_.Data();
+ RankingsNode* node = node_.Data();
+ memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
+ memset(node, 0, sizeof(RankingsNode));
+ if (!node_.LazyInit(backend_->File(node_address), node_address))
+ return false;
+
+ entry_store->rankings_node = node_address.value();
+ node->contents = entry_.address().value();
+
+ entry_store->hash = hash;
+ entry_store->creation_time = Time::Now().ToInternalValue();
+ entry_store->key_len = static_cast<int32>(key.size());
+ if (entry_store->key_len > kMaxInternalKeyLength) {
+ Addr address(0);
+ if (!CreateBlock(entry_store->key_len + 1, &address))
+ return false;
+
+ entry_store->long_key = address.value();
+ File* key_file = GetBackingFile(address, kKeyFileIndex);
+ key_ = key;
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!key_file || !key_file->Write(key.data(), key.size(), offset)) {
+ DeleteData(address, kKeyFileIndex);
+ return false;
+ }
+
+ if (address.is_separate_file())
+ key_file->SetLength(key.size() + 1);
+ } else {
+ memcpy(entry_store->key, key.data(), key.size());
+ entry_store->key[key.size()] = '\0';
+ }
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size()));
+ node->dirty = backend_->GetCurrentEntryId();
+ Log("Create Entry ");
+ return true;
+}
+
+uint32 EntryImpl::GetHash() {
+ return entry_.Data()->hash;
+}
+
+bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
+ if (entry_.Data()->hash != hash ||
+ static_cast<size_t>(entry_.Data()->key_len) != key.size())
+ return false;
+
+ return (key.compare(GetKey()) == 0);
+}
+
+void EntryImpl::InternalDoom() {
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ DCHECK(node_.HasData());
+ if (!node_.Data()->dirty) {
+ node_.Data()->dirty = backend_->GetCurrentEntryId();
+ node_.Store();
+ }
+ doomed_ = true;
+}
+
+// This only includes checks that relate to the first block of the entry (the
+// first 256 bytes), and values that should be set from the entry creation.
+// Basically, even if there is something wrong with this entry, we want to see
+// if it is possible to load the rankings node and delete them together.
+bool EntryImpl::SanityCheck() {
+ if (!entry_.VerifyHash())
+ return false;
+
+ EntryStore* stored = entry_.Data();
+ if (!stored->rankings_node || stored->key_len <= 0)
+ return false;
+
+ if (stored->reuse_count < 0 || stored->refetch_count < 0)
+ return false;
+
+ Addr rankings_addr(stored->rankings_node);
+ if (!rankings_addr.SanityCheckForRankings())
+ return false;
+
+ Addr next_addr(stored->next);
+ if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
+ STRESS_NOTREACHED();
+ return false;
+ }
+ STRESS_DCHECK(next_addr.value() != entry_.address().value());
+
+ if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
+ return false;
+
+ Addr key_addr(stored->long_key);
+ if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
+ (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
+ return false;
+
+ if (!key_addr.SanityCheck())
+ return false;
+
+ if (key_addr.is_initialized() &&
+ ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
+ (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
+ return false;
+
+ int num_blocks = NumBlocksForEntry(stored->key_len);
+ if (entry_.address().num_blocks() != num_blocks)
+ return false;
+
+ return true;
+}
+
+bool EntryImpl::DataSanityCheck() {
+ EntryStore* stored = entry_.Data();
+ Addr key_addr(stored->long_key);
+
+ // The key must be NULL terminated.
+ if (!key_addr.is_initialized() && stored->key[stored->key_len])
+ return false;
+
+ if (stored->hash != base::Hash(GetKey()))
+ return false;
+
+ for (int i = 0; i < kNumStreams; i++) {
+ Addr data_addr(stored->data_addr[i]);
+ int data_size = stored->data_size[i];
+ if (data_size < 0)
+ return false;
+ if (!data_size && data_addr.is_initialized())
+ return false;
+ if (!data_addr.SanityCheck())
+ return false;
+ if (!data_size)
+ continue;
+ if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
+ return false;
+ if (data_size > kMaxBlockSize && data_addr.is_block_file())
+ return false;
+ }
+ return true;
+}
+
+void EntryImpl::FixForDelete() {
+ EntryStore* stored = entry_.Data();
+ Addr key_addr(stored->long_key);
+
+ if (!key_addr.is_initialized())
+ stored->key[stored->key_len] = '\0';
+
+ for (int i = 0; i < kNumStreams; i++) {
+ Addr data_addr(stored->data_addr[i]);
+ int data_size = stored->data_size[i];
+ if (data_addr.is_initialized()) {
+ if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
+ (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
+ !data_addr.SanityCheck()) {
+ STRESS_NOTREACHED();
+ // The address is weird so don't attempt to delete it.
+ stored->data_addr[i] = 0;
+ // In general, trust the stored size as it should be in sync with the
+ // total size tracked by the backend.
+ }
+ }
+ if (data_size < 0)
+ stored->data_size[i] = 0;
+ }
+ entry_.Store();
+}
+
+void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
+ node_.Data()->last_used = last_used.ToInternalValue();
+ node_.Data()->last_modified = last_modified.ToInternalValue();
+ node_.set_modified();
+}
+
+void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
+ DCHECK(!net_log_.net_log());
+ net_log_ = net::BoundNetLog::Make(
+ net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
+ CreateNetLogEntryCreationCallback(this, created));
+}
+
+const net::BoundNetLog& EntryImpl::net_log() const {
+ return net_log_;
+}
+
+// ------------------------------------------------------------------------
+
+void EntryImpl::Doom() {
+ if (background_queue_)
+ background_queue_->DoomEntryImpl(this);
+}
+
+void EntryImpl::DoomImpl() {
+ if (doomed_ || !backend_)
+ return;
+
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
+ backend_->InternalDoomEntry(this);
+}
+
+void EntryImpl::Close() {
+ if (background_queue_)
+ background_queue_->CloseEntryImpl(this);
+}
+
+std::string EntryImpl::GetKey() const {
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ int key_len = entry->Data()->key_len;
+ if (key_len <= kMaxInternalKeyLength)
+ return std::string(entry->Data()->key);
+
+ // We keep a copy of the key so that we can always return it, even if the
+ // backend is disabled.
+ if (!key_.empty())
+ return key_;
+
+ Addr address(entry->Data()->long_key);
+ DCHECK(address.is_initialized());
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
+ File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+ kKeyFileIndex);
+ if (!key_file)
+ return std::string();
+
+ ++key_len; // We store a trailing \0 on disk that we read back below.
+ if (!offset && key_file->GetLength() != static_cast<size_t>(key_len))
+ return std::string();
+
+ if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset))
+ key_.clear();
+ return key_;
+}
+
+Time EntryImpl::GetLastUsed() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_used);
+}
+
+Time EntryImpl::GetLastModified() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_modified);
+}
+
+int32 EntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index >= kNumStreams)
+ return 0;
+
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ return entry->Data()->data_size[index];
+}
+
+int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (callback.is_null())
+ return ReadDataImpl(index, offset, buf, buf_len, callback);
+
+ DCHECK(node_.Data()->dirty || read_only_);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!background_queue_)
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->ReadData(this, index, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
+ }
+
+ int result = InternalReadData(index, offset, buf, buf_len, callback);
+
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate) {
+ if (callback.is_null())
+ return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
+
+ DCHECK(node_.Data()->dirty || read_only_);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!background_queue_)
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
+ callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
+ }
+
+ int result = InternalWriteData(index, offset, buf, buf_len, callback,
+ truncate);
+
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (callback.is_null())
+ return ReadSparseDataImpl(offset, buf, buf_len, callback);
+
+ if (!background_queue_)
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->ReadSparseData(this, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ TimeTicks start = TimeTicks::Now();
+ result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
+ callback);
+ ReportIOTime(kSparseRead, start);
+ return result;
+}
+
+int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (callback.is_null())
+ return WriteSparseDataImpl(offset, buf, buf_len, callback);
+
+ if (!background_queue_)
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->WriteSparseData(this, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ TimeTicks start = TimeTicks::Now();
+ result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
+ buf_len, callback);
+ ReportIOTime(kSparseWrite, start);
+ return result;
+}
+
+int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ if (!background_queue_)
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->GetAvailableRange(this, offset, len, start, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ return sparse_->GetAvailableRange(offset, len, start);
+}
+
+bool EntryImpl::CouldBeSparse() const {
+ if (sparse_.get())
+ return true;
+
+ scoped_ptr<SparseControl> sparse;
+ sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
+ return sparse->CouldBeSparse();
+}
+
+void EntryImpl::CancelSparseIO() {
+ if (background_queue_)
+ background_queue_->CancelSparseIO(this);
+}
+
+void EntryImpl::CancelSparseIOImpl() {
+ if (!sparse_.get())
+ return;
+
+ sparse_->CancelIO();
+}
+
+int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ if (!background_queue_)
+ return net::ERR_UNEXPECTED;
+
+ background_queue_->ReadyForSparseIO(this, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) {
+ DCHECK(sparse_.get());
+ return sparse_->ReadyToUse(callback);
+}
+
+// ------------------------------------------------------------------------
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImpl::~EntryImpl() {
+ if (!backend_) {
+ entry_.clear_modified();
+ node_.clear_modified();
+ return;
+ }
+ Log("~EntryImpl in");
+
+ // Save the sparse info to disk. This will generate IO for this entry and
+ // maybe for a child entry, so it is important to do it before deleting this
+ // entry.
+ sparse_.reset();
+
+ // Remove this entry from the list of open entries.
+ backend_->OnEntryDestroyBegin(entry_.address());
+
+ if (doomed_) {
+ DeleteEntryData(true);
+ } else {
+#if defined(NET_BUILD_STRESS_CACHE)
+ SanityCheck();
+#endif
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE);
+ bool ret = true;
+ for (int index = 0; index < kNumStreams; index++) {
+ if (user_buffers_[index].get()) {
+ if (!(ret = Flush(index, 0)))
+ LOG(ERROR) << "Failed to save user data";
+ }
+ if (unreported_size_[index]) {
+ backend_->ModifyStorageSize(
+ entry_.Data()->data_size[index] - unreported_size_[index],
+ entry_.Data()->data_size[index]);
+ }
+ }
+
+ if (!ret) {
+ // There was a failure writing the actual data. Mark the entry as dirty.
+ int current_id = backend_->GetCurrentEntryId();
+ node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
+ node_.Store();
+ } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
+ node_.Data()->dirty = 0;
+ node_.Store();
+ }
+ }
+
+ Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
+ net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
+ backend_->OnEntryDestroyEnd();
+}
+
+int EntryImpl::InternalReadData(int index, int offset,
+ IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!backend_)
+ return net::ERR_UNEXPECTED;
+
+ TimeTicks start = TimeTicks::Now();
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ backend_->OnEvent(Stats::READ_DATA);
+ backend_->OnRead(buf_len);
+
+ Addr address(entry_.Data()->data_addr[index]);
+ int eof = address.is_initialized() ? entry_size : 0;
+ if (user_buffers_[index].get() &&
+ user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
+ // Complete the operation locally.
+ buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
+ ReportIOTime(kRead, start);
+ return buf_len;
+ }
+
+ address.set_value(entry_.Data()->data_addr[index]);
+ DCHECK(address.is_initialized());
+ if (!address.is_initialized()) {
+ DoomImpl();
+ return net::ERR_FAILED;
+ }
+
+ File* file = GetBackingFile(address, index);
+ if (!file) {
+ DoomImpl();
+ LOG(ERROR) << "No file for " << std::hex << address.value();
+ return net::ERR_FILE_NOT_FOUND;
+ }
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ DCHECK_LE(offset + buf_len, kMaxBlockSize);
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ }
+
+ SyncCallback* io_callback = NULL;
+ if (!callback.is_null()) {
+ io_callback = new SyncCallback(this, buf, callback,
+ net::NetLog::TYPE_ENTRY_READ_DATA);
+ }
+
+ TimeTicks start_async = TimeTicks::Now();
+
+ bool completed;
+ if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ DoomImpl();
+ return net::ERR_CACHE_READ_FAILURE;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ if (io_callback)
+ ReportIOTime(kReadAsync1, start_async);
+
+ ReportIOTime(kRead, start);
+ return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::InternalWriteData(int index, int offset,
+ IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) {
+ DCHECK(node_.Data()->dirty || read_only_);
+ DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!backend_)
+ return net::ERR_UNEXPECTED;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset or buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ backend_->TooMuchStorageRequested(size);
+ return net::ERR_FAILED;
+ }
+
+ TimeTicks start = TimeTicks::Now();
+
+ // Read the size at this point (it may change inside prepare).
+ int entry_size = entry_.Data()->data_size[index];
+ bool extending = entry_size < offset + buf_len;
+ truncate = truncate && entry_size > offset + buf_len;
+ Trace("To PrepareTarget 0x%x", entry_.address().value());
+ if (!PrepareTarget(index, offset, buf_len, truncate))
+ return net::ERR_FAILED;
+
+ Trace("From PrepareTarget 0x%x", entry_.address().value());
+ if (extending || truncate)
+ UpdateSize(index, entry_size, offset + buf_len);
+
+ UpdateRank(true);
+
+ backend_->OnEvent(Stats::WRITE_DATA);
+ backend_->OnWrite(buf_len);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ user_buffers_[index]->Write(offset, buf, buf_len);
+ ReportIOTime(kWrite, start);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (offset + buf_len == 0) {
+ if (truncate) {
+ DCHECK(!address.is_initialized());
+ }
+ return 0;
+ }
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FILE_NOT_FOUND;
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ DCHECK_LE(offset + buf_len, kMaxBlockSize);
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ } else if (truncate || (extending && !buf_len)) {
+ if (!file->SetLength(offset + buf_len))
+ return net::ERR_FAILED;
+ }
+
+ if (!buf_len)
+ return 0;
+
+ SyncCallback* io_callback = NULL;
+ if (!callback.is_null()) {
+ io_callback = new SyncCallback(this, buf, callback,
+ net::NetLog::TYPE_ENTRY_WRITE_DATA);
+ }
+
+ TimeTicks start_async = TimeTicks::Now();
+
+ bool completed;
+ if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
+ &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_CACHE_WRITE_FAILURE;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ if (io_callback)
+ ReportIOTime(kWriteAsync1, start_async);
+
+ ReportIOTime(kWrite, start);
+ return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
+}
+
+// ------------------------------------------------------------------------
+
+bool EntryImpl::CreateDataBlock(int index, int size) {
+ DCHECK(index >= 0 && index < kNumStreams);
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (!CreateBlock(size, &address))
+ return false;
+
+ entry_.Data()->data_addr[index] = address.value();
+ entry_.Store();
+ return true;
+}
+
+bool EntryImpl::CreateBlock(int size, Addr* address) {
+ DCHECK(!address->is_initialized());
+ if (!backend_)
+ return false;
+
+ FileType file_type = Addr::RequiredFileType(size);
+ if (EXTERNAL == file_type) {
+ if (size > backend_->MaxFileSize())
+ return false;
+ if (!backend_->CreateExternalFile(address))
+ return false;
+ } else {
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!backend_->CreateBlock(file_type, num_blocks, address))
+ return false;
+ }
+ return true;
+}
+
+// Note that this method may end up modifying a block file so upon return the
+// involved block will be free, and could be reused for something else. If there
+// is a crash after that point (and maybe before returning to the caller), the
+// entry will be left dirty... and at some point it will be discarded; it is
+// important that the entry doesn't keep a reference to this address, or we'll
+// end up deleting the contents of |address| once again.
+void EntryImpl::DeleteData(Addr address, int index) {
+ DCHECK(backend_);
+ if (!address.is_initialized())
+ return;
+ if (address.is_separate_file()) {
+ int failure = !DeleteCacheFile(backend_->GetFileName(address));
+ CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
+ if (failure) {
+ LOG(ERROR) << "Failed to delete " <<
+ backend_->GetFileName(address).value() << " from the cache.";
+ }
+ if (files_[index])
+ files_[index] = NULL; // Releases the object.
+ } else {
+ backend_->DeleteBlock(address, true);
+ }
+}
+
+void EntryImpl::UpdateRank(bool modified) {
+ if (!backend_)
+ return;
+
+ if (!doomed_) {
+ // Everything is handled by the backend.
+ backend_->UpdateRank(this, modified);
+ return;
+ }
+
+ Time current = Time::Now();
+ node_.Data()->last_used = current.ToInternalValue();
+
+ if (modified)
+ node_.Data()->last_modified = current.ToInternalValue();
+}
+
+void EntryImpl::DeleteEntryData(bool everything) {
+ DCHECK(doomed_ || !everything);
+
+ if (GetEntryFlags() & PARENT_ENTRY) {
+ // We have some child entries that must go away.
+ SparseControl::DeleteChildren(this);
+ }
+
+ if (GetDataSize(0))
+ CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
+ if (GetDataSize(1))
+ CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
+ for (int index = 0; index < kNumStreams; index++) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ entry_.Store();
+ DeleteData(address, index);
+ }
+ }
+
+ if (!everything)
+ return;
+
+ // Remove all traces of this entry.
+ backend_->RemoveEntry(this);
+
+ // Note that at this point node_ and entry_ are just two blocks of data, and
+ // even if they reference each other, nobody should be referencing them.
+
+ Addr address(entry_.Data()->long_key);
+ DeleteData(address, kKeyFileIndex);
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
+
+ backend_->DeleteBlock(entry_.address(), true);
+ entry_.Discard();
+
+ if (!LeaveRankingsBehind()) {
+ backend_->DeleteBlock(node_.address(), true);
+ node_.Discard();
+ }
+}
+
+// We keep a memory buffer for everything that ends up stored on a block file
+// (because we don't know yet the final data size), and for some of the data
+// that end up on external files. This function will initialize that memory
+// buffer and / or the files needed to store the data.
+//
+// In general, a buffer may overlap data already stored on disk, and in that
+// case, the contents of the buffer are the most accurate. It may also extend
+// the file, but we don't want to read from disk just to keep the buffer up to
+// date. This means that as soon as there is a chance to get confused about what
+// is the most recent version of some part of a file, we'll flush the buffer and
+// reuse it for the new data. Keep in mind that the normal use pattern is quite
+// simple (write sequentially from the beginning), so we optimize for handling
+// that case.
+bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ if (truncate)
+ return HandleTruncation(index, offset, buf_len);
+
+ if (!offset && !buf_len)
+ return true;
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ if (address.is_block_file() && !MoveToLocalBuffer(index))
+ return false;
+
+ if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
+ // We are about to create a buffer for the first 16KB, make sure that we
+ // preserve existing data.
+ if (!CopyToLocalBuffer(index))
+ return false;
+ }
+ }
+
+ if (!user_buffers_[index].get())
+ user_buffers_[index].reset(new UserBuffer(backend_.get()));
+
+ return PrepareBuffer(index, offset, buf_len);
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
+ Addr address(entry_.Data()->data_addr[index]);
+
+ int current_size = entry_.Data()->data_size[index];
+ int new_size = offset + buf_len;
+
+ if (!new_size) {
+ // This is by far the most common scenario.
+ backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ unreported_size_[index] = 0;
+ entry_.Store();
+ DeleteData(address, index);
+
+ user_buffers_[index].reset();
+ return true;
+ }
+
+ // We never postpone truncating a file, if there is one, but we may postpone
+ // telling the backend about the size reduction.
+ if (user_buffers_[index].get()) {
+ DCHECK_GE(current_size, user_buffers_[index]->Start());
+ if (!address.is_initialized()) {
+ // There is no overlap between the buffer and disk.
+ if (new_size > user_buffers_[index]->Start()) {
+ // Just truncate our buffer.
+ DCHECK_LT(new_size, user_buffers_[index]->End());
+ user_buffers_[index]->Truncate(new_size);
+ return true;
+ }
+
+ // Just discard our buffer.
+ user_buffers_[index]->Reset();
+ return PrepareBuffer(index, offset, buf_len);
+ }
+
+ // There is some overlap or we need to extend the file before the
+ // truncation.
+ if (offset > user_buffers_[index]->Start())
+ user_buffers_[index]->Truncate(new_size);
+ UpdateSize(index, current_size, new_size);
+ if (!Flush(index, 0))
+ return false;
+ user_buffers_[index].reset();
+ }
+
+ // We have data somewhere, and it is not in a buffer.
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ if (new_size > kMaxBlockSize)
+ return true; // Let the operation go directly to disk.
+
+ return ImportSeparateFile(index, offset + buf_len);
+}
+
+bool EntryImpl::CopyToLocalBuffer(int index) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
+ user_buffers_[index].reset(new UserBuffer(backend_.get()));
+ user_buffers_[index]->Write(len, NULL, 0);
+
+ File* file = GetBackingFile(address, index);
+ int offset = 0;
+
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file ||
+ !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
+ user_buffers_[index].reset();
+ return false;
+ }
+ return true;
+}
+
+bool EntryImpl::MoveToLocalBuffer(int index) {
+ if (!CopyToLocalBuffer(index))
+ return false;
+
+ Addr address(entry_.Data()->data_addr[index]);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Store();
+ DeleteData(address, index);
+
+ // If we lose this entry we'll see it as zero sized.
+ int len = entry_.Data()->data_size[index];
+ backend_->ModifyStorageSize(len - unreported_size_[index], 0);
+ unreported_size_[index] = len;
+ return true;
+}
+
+bool EntryImpl::ImportSeparateFile(int index, int new_size) {
+ if (entry_.Data()->data_size[index] > new_size)
+ UpdateSize(index, entry_.Data()->data_size[index], new_size);
+
+ return MoveToLocalBuffer(index);
+}
+
+bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
+ DCHECK(user_buffers_[index].get());
+ if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
+ offset > entry_.Data()->data_size[index]) {
+ // We are about to extend the buffer or the file (with zeros), so make sure
+ // that we are not overwriting anything.
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized() && address.is_separate_file()) {
+ if (!Flush(index, 0))
+ return false;
+ // There is an actual file already, and we don't want to keep track of
+ // its length so we let this operation go straight to disk.
+ // The only case when a buffer is allowed to extend the file (as in fill
+ // with zeros before the start) is when there is no file yet to extend.
+ user_buffers_[index].reset();
+ return true;
+ }
+ }
+
+ if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
+ if (!Flush(index, offset + buf_len))
+ return false;
+
+ // Lets try again.
+ if (offset > user_buffers_[index]->End() ||
+ !user_buffers_[index]->PreWrite(offset, buf_len)) {
+ // We cannot complete the operation with a buffer.
+ DCHECK(!user_buffers_[index]->Size());
+ DCHECK(!user_buffers_[index]->Start());
+ user_buffers_[index].reset();
+ }
+ }
+ return true;
+}
+
+bool EntryImpl::Flush(int index, int min_len) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(user_buffers_[index].get());
+ DCHECK(!address.is_initialized() || address.is_separate_file());
+ DVLOG(3) << "Flush";
+
+ int size = std::max(entry_.Data()->data_size[index], min_len);
+ if (size && !address.is_initialized() && !CreateDataBlock(index, size))
+ return false;
+
+ if (!entry_.Data()->data_size[index]) {
+ DCHECK(!user_buffers_[index]->Size());
+ return true;
+ }
+
+ address.set_value(entry_.Data()->data_addr[index]);
+
+ int len = user_buffers_[index]->Size();
+ int offset = user_buffers_[index]->Start();
+ if (!len && !offset)
+ return true;
+
+ if (address.is_block_file()) {
+ DCHECK_EQ(len, entry_.Data()->data_size[index]);
+ DCHECK(!offset);
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+ }
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return false;
+
+ if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL))
+ return false;
+ user_buffers_[index]->Reset();
+
+ return true;
+}
+
+void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
+ if (entry_.Data()->data_size[index] == new_size)
+ return;
+
+ unreported_size_[index] += new_size - old_size;
+ entry_.Data()->data_size[index] = new_size;
+ entry_.set_modified();
+}
+
+int EntryImpl::InitSparseData() {
+ if (sparse_.get())
+ return net::OK;
+
+ // Use a local variable so that sparse_ never goes from 'valid' to NULL.
+ scoped_ptr<SparseControl> sparse(new SparseControl(this));
+ int result = sparse->Init();
+ if (net::OK == result)
+ sparse_.swap(sparse);
+
+ return result;
+}
+
+void EntryImpl::SetEntryFlags(uint32 flags) {
+ entry_.Data()->flags |= flags;
+ entry_.set_modified();
+}
+
+uint32 EntryImpl::GetEntryFlags() {
+ return entry_.Data()->flags;
+}
+
+void EntryImpl::GetData(int index, char** buffer, Addr* address) {
+ DCHECK(backend_);
+ if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
+ !user_buffers_[index]->Start()) {
+ // The data is already in memory, just copy it and we're done.
+ int data_len = entry_.Data()->data_size[index];
+ if (data_len <= user_buffers_[index]->Size()) {
+ DCHECK(!user_buffers_[index]->Start());
+ *buffer = new char[data_len];
+ memcpy(*buffer, user_buffers_[index]->Data(), data_len);
+ return;
+ }
+ }
+
+ // Bad news: we'd have to read the info from disk so instead we'll just tell
+ // the caller where to read from.
+ *buffer = NULL;
+ address->set_value(entry_.Data()->data_addr[index]);
+ if (address->is_initialized()) {
+ // Prevent us from deleting the block from the backing store.
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ }
+}
+
+void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
+ if (!backend_)
+ return;
+
+ switch (op) {
+ case kRead:
+ CACHE_UMA(AGE_MS, "ReadTime", 0, start);
+ break;
+ case kWrite:
+ CACHE_UMA(AGE_MS, "WriteTime", 0, start);
+ break;
+ case kSparseRead:
+ CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
+ break;
+ case kSparseWrite:
+ CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
+ break;
+ case kAsyncIO:
+ CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
+ break;
+ case kReadAsync1:
+ CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
+ break;
+ case kWriteAsync1:
+ CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+void EntryImpl::Log(const char* msg) {
+ int dirty = 0;
+ if (node_.HasData()) {
+ dirty = node_.Data()->dirty;
+ }
+
+ Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
+ entry_.address().value(), node_.address().value());
+
+ Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
+ entry_.Data()->data_addr[1], entry_.Data()->long_key);
+
+ Trace(" doomed: %d 0x%x", doomed_, dirty);
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/entry_impl_v3.h b/chromium/net/disk_cache/v3/entry_impl_v3.h
new file mode 100644
index 00000000000..af5bb4f5561
--- /dev/null
+++ b/chromium/net/disk_cache/v3/entry_impl_v3.h
@@ -0,0 +1,223 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_ENTRY_IMPL_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_log.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class InFlightBackendIO;
+class SparseControl;
+
+// This class implements the Entry interface. An object of this
+// class represents a single entry on the cache.
+class NET_EXPORT_PRIVATE EntryImpl
+ : public Entry,
+ public base::RefCounted<EntryImpl> {
+ friend class base::RefCounted<EntryImpl>;
+ friend class SparseControl;
+ public:
+ enum Operation {
+ kRead,
+ kWrite,
+ kSparseRead,
+ kSparseWrite,
+ kAsyncIO,
+ kReadAsync1,
+ kWriteAsync1
+ };
+
+ EntryImpl(BackendImpl* backend, Addr address, bool read_only);
+
+ // Background implementation of the Entry interface.
+ void DoomImpl();
+ int ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback);
+ int WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate);
+ int ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback);
+ int WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback);
+ int GetAvailableRangeImpl(int64 offset, int len, int64* start);
+ void CancelSparseIOImpl();
+ int ReadyForSparseIOImpl(const CompletionCallback& callback);
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(Addr node_address, const std::string& key, uint32 hash);
+
+ uint32 GetHash();
+
+ // Returns true if this entry matches the lookup arguments.
+ bool IsSameEntry(const std::string& key, uint32 hash);
+
+ // Permamently destroys this entry.
+ void InternalDoom();
+
+ bool dirty() {
+ return dirty_;
+ }
+
+ bool doomed() {
+ return doomed_;
+ }
+
+ // Returns false if the entry is clearly invalid.
+ bool SanityCheck();
+ bool DataSanityCheck();
+
+ // Attempts to make this entry reachable though the key.
+ void FixForDelete();
+
+ // Set the access times for this entry. This method provides support for
+ // the upgrade tool.
+ void SetTimes(base::Time last_used, base::Time last_modified);
+
+ // Logs a begin event and enables logging for the EntryImpl. Will also cause
+ // an end event to be logged on destruction. The EntryImpl must have its key
+ // initialized before this is called. |created| is true if the Entry was
+ // created rather than opened.
+ void BeginLogging(net::NetLog* net_log, bool created);
+
+ const net::BoundNetLog& net_log() const;
+
+ // Entry interface.
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE;
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ enum {
+ kNumStreams = 3
+ };
+ class UserBuffer;
+
+ virtual ~EntryImpl();
+
+ // Do all the work for ReadDataImpl and WriteDataImpl. Implemented as
+ // separate functions to make logging of results simpler.
+ int InternalReadData(int index, int offset, IOBuffer* buf,
+ int buf_len, const CompletionCallback& callback);
+ int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate);
+
+ // Initializes the storage for an internal or external data block.
+ bool CreateDataBlock(int index, int size);
+
+ // Initializes the storage for an internal or external generic block.
+ bool CreateBlock(int size, Addr* address);
+
+ // Deletes the data pointed by address, maybe backed by files_[index].
+ // Note that most likely the caller should delete (and store) the reference to
+ // |address| *before* calling this method because we don't want to have an
+ // entry using an address that is already free.
+ void DeleteData(Addr address, int index);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Deletes this entry from disk. If |everything| is false, only the user data
+ // will be removed, leaving the key and control data intact.
+ void DeleteEntryData(bool everything);
+
+ // Prepares the target file or buffer for a write of buf_len bytes at the
+ // given offset.
+ bool PrepareTarget(int index, int offset, int buf_len, bool truncate);
+
+ // Adjusts the internal buffer and file handle for a write that truncates this
+ // stream.
+ bool HandleTruncation(int index, int offset, int buf_len);
+
+ // Copies data from disk to the internal buffer.
+ bool CopyToLocalBuffer(int index);
+
+ // Reads from a block data file to this object's memory buffer.
+ bool MoveToLocalBuffer(int index);
+
+ // Loads the external file to this object's memory buffer.
+ bool ImportSeparateFile(int index, int new_size);
+
+ // Makes sure that the internal buffer can handle the a write of |buf_len|
+ // bytes to |offset|.
+ bool PrepareBuffer(int index, int offset, int buf_len);
+
+ // Flushes the in-memory data to the backing storage. The data destination
+ // is determined based on the current data length and |min_len|.
+ bool Flush(int index, int min_len);
+
+ // Updates the size of a given data stream.
+ void UpdateSize(int index, int old_size, int new_size);
+
+ // Initializes the sparse control object. Returns a net error code.
+ int InitSparseData();
+
+ // Adds the provided |flags| to the current EntryFlags for this entry.
+ void SetEntryFlags(uint32 flags);
+
+ // Returns the current EntryFlags for this entry.
+ uint32 GetEntryFlags();
+
+ // Gets the data stored at the given index. If the information is in memory,
+ // a buffer will be allocated and the data will be copied to it (the caller
+ // can find out the size of the buffer before making this call). Otherwise,
+ // the cache address of the data will be returned, and that address will be
+ // removed from the regular book keeping of this entry so the caller is
+ // responsible for deleting the block (or file) from the backing store at some
+ // point; there is no need to report any storage-size change, only to do the
+ // actual cleanup.
+ void GetData(int index, char** buffer, Addr* address);
+
+ // Generates a histogram for the time spent working on this operation.
+ void ReportIOTime(Operation op, const base::TimeTicks& start);
+
+ // Logs this entry to the internal trace buffer.
+ void Log(const char* msg);
+
+ CacheEntryBlock entry_; // Key related information for this entry.
+ CacheRankingsBlock node_; // Rankings related information for this entry.
+ base::WeakPtr<BackendImpl> backend_; // Back pointer to the cache.
+ base::WeakPtr<InFlightBackendIO> background_queue_; // In-progress queue.
+ scoped_ptr<UserBuffer> user_buffers_[kNumStreams]; // Stores user data.
+ // Files to store external user data and key.
+ scoped_refptr<File> files_[kNumStreams + 1];
+ mutable std::string key_; // Copy of the key.
+ int unreported_size_[kNumStreams]; // Bytes not reported yet to the backend.
+ bool doomed_; // True if this entry was removed from the cache.
+ bool read_only_; // True if not yet writing.
+ bool dirty_; // True if we detected that this is a dirty entry.
+ scoped_ptr<SparseControl> sparse_; // Support for sparse entries.
+
+ net::BoundNetLog net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(EntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ENTRY_IMPL_H_
diff --git a/chromium/net/disk_cache/v3/eviction_v3.cc b/chromium/net/disk_cache/v3/eviction_v3.cc
new file mode 100644
index 00000000000..91275fc7bc2
--- /dev/null
+++ b/chromium/net/disk_cache/v3/eviction_v3.cc
@@ -0,0 +1,502 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The eviction policy is a very simple pure LRU, so the elements at the end of
+// the list are evicted until kCleanUpMargin free space is available. There is
+// only one list in use (Rankings::NO_USE), and elements are sent to the front
+// of the list whenever they are accessed.
+
+// The new (in-development) eviction policy adds re-use as a factor to evict
+// an entry. The story so far:
+
+// Entries are linked on separate lists depending on how often they are used.
+// When we see an element for the first time, it goes to the NO_USE list; if
+// the object is reused later on, we move it to the LOW_USE list, until it is
+// used kHighUse times, at which point it is moved to the HIGH_USE list.
+// Whenever an element is evicted, we move it to the DELETED list so that if the
+// element is accessed again, we remember the fact that it was already stored
+// and maybe in the future we don't evict that element.
+
+// When we have to evict an element, first we try to use the last element from
+// the NO_USE list, then we move to the LOW_USE and only then we evict an entry
+// from the HIGH_USE. We attempt to keep entries on the cache for at least
+// kTargetTime hours (with frequently accessed items stored for longer periods),
+// but if we cannot do that, we fall-back to keep each list roughly the same
+// size so that we have a chance to see an element again and move it to another
+// list.
+
+#include "net/disk_cache/eviction.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/trace.h"
+
+using base::Time;
+using base::TimeTicks;
+
+namespace {
+
+const int kCleanUpMargin = 1024 * 1024;
+const int kHighUse = 10; // Reuse count to be on the HIGH_USE list.
+const int kTargetTime = 24 * 7; // Time to be evicted (hours since last use).
+const int kMaxDelayedTrims = 60;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+bool FallingBehind(int current_size, int max_size) {
+ return current_size > max_size - kCleanUpMargin * 20;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+// The real initialization happens during Init(), init_ is the only member that
+// has to be initialized here.
+Eviction::Eviction()
+ : backend_(NULL),
+ init_(false),
+ ptr_factory_(this) {
+}
+
+Eviction::~Eviction() {
+}
+
+void Eviction::Init(BackendImpl* backend) {
+ // We grab a bunch of info from the backend to make the code a little cleaner
+ // when we're actually doing work.
+ backend_ = backend;
+ rankings_ = &backend->rankings_;
+ header_ = &backend_->data_->header;
+ max_size_ = LowWaterAdjust(backend_->max_size_);
+ index_size_ = backend->mask_ + 1;
+ new_eviction_ = backend->new_eviction_;
+ first_trim_ = true;
+ trimming_ = false;
+ delay_trim_ = false;
+ trim_delays_ = 0;
+ init_ = true;
+ test_mode_ = false;
+}
+
+void Eviction::Stop() {
+ // It is possible for the backend initialization to fail, in which case this
+ // object was never initialized... and there is nothing to do.
+ if (!init_)
+ return;
+
+ // We want to stop further evictions, so let's pretend that we are busy from
+ // this point on.
+ DCHECK(!trimming_);
+ trimming_ = true;
+ ptr_factory_.InvalidateWeakPtrs();
+}
+
+void Eviction::TrimCache(bool empty) {
+ if (backend_->disabled_ || trimming_)
+ return;
+
+ if (!empty && !ShouldTrim())
+ return PostDelayedTrim();
+
+ if (new_eviction_)
+ return TrimCacheV2(empty);
+
+ Trace("*** Trim Cache ***");
+ trimming_ = true;
+ TimeTicks start = TimeTicks::Now();
+ Rankings::ScopedRankingsBlock node(rankings_);
+ Rankings::ScopedRankingsBlock next(
+ rankings_, rankings_->GetPrev(node.get(), Rankings::NO_USE));
+ int deleted_entries = 0;
+ int target_size = empty ? 0 : max_size_;
+ while ((header_->num_bytes > target_size || test_mode_) && next.get()) {
+ // The iterator could be invalidated within EvictEntry().
+ if (!next->HasData())
+ break;
+ node.reset(next.release());
+ next.reset(rankings_->GetPrev(node.get(), Rankings::NO_USE));
+ if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
+ // This entry is not being used by anybody.
+ // Do NOT use node as an iterator after this point.
+ rankings_->TrackRankingsBlock(node.get(), false);
+ if (EvictEntry(node.get(), empty, Rankings::NO_USE) && !test_mode_)
+ deleted_entries++;
+
+ if (!empty && test_mode_)
+ break;
+ }
+ if (!empty && (deleted_entries > 20 ||
+ (TimeTicks::Now() - start).InMilliseconds() > 20)) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimCache, ptr_factory_.GetWeakPtr(), false));
+ break;
+ }
+ }
+
+ if (empty) {
+ CACHE_UMA(AGE_MS, "TotalClearTimeV1", 0, start);
+ } else {
+ CACHE_UMA(AGE_MS, "TotalTrimTimeV1", 0, start);
+ }
+ CACHE_UMA(COUNTS, "TrimItemsV1", 0, deleted_entries);
+
+ trimming_ = false;
+ Trace("*** Trim Cache end ***");
+ return;
+}
+
+void Eviction::OnOpenEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK_EQ(ENTRY_NORMAL, info->state);
+
+ if (info->reuse_count < kint32max) {
+ info->reuse_count++;
+ entry->entry()->set_modified();
+
+ // We may need to move this to a new list.
+ if (1 == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::NO_USE, true);
+ rankings_->Insert(entry->rankings(), false, Rankings::LOW_USE);
+ entry->entry()->Store();
+ } else if (kHighUse == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::LOW_USE, true);
+ rankings_->Insert(entry->rankings(), false, Rankings::HIGH_USE);
+ entry->entry()->Store();
+ }
+ }
+}
+
+void Eviction::OnCreateEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ switch (info->state) {
+ case ENTRY_NORMAL: {
+ DCHECK(!info->reuse_count);
+ DCHECK(!info->refetch_count);
+ break;
+ };
+ case ENTRY_EVICTED: {
+ if (info->refetch_count < kint32max)
+ info->refetch_count++;
+
+ if (info->refetch_count > kHighUse && info->reuse_count < kHighUse) {
+ info->reuse_count = kHighUse;
+ } else {
+ info->reuse_count++;
+ }
+ info->state = ENTRY_NORMAL;
+ entry->entry()->Store();
+ rankings_->Remove(entry->rankings(), Rankings::DELETED, true);
+ break;
+ };
+ default:
+ NOTREACHED();
+ }
+
+ rankings_->Insert(entry->rankings(), true, GetListForEntryV2(entry));
+}
+
+void Eviction::SetTestMode() {
+ test_mode_ = true;
+}
+
+void Eviction::TrimDeletedList(bool empty) {
+ DCHECK(test_mode_ && new_eviction_);
+ TrimDeleted(empty);
+}
+
+// -----------------------------------------------------------------------
+
+void Eviction::PostDelayedTrim() {
+ // Prevent posting multiple tasks.
+ if (delay_trim_)
+ return;
+ delay_trim_ = true;
+ trim_delays_++;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&Eviction::DelayedTrim, ptr_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(1000));
+}
+
+void Eviction::DelayedTrim() {
+ delay_trim_ = false;
+ if (trim_delays_ < kMaxDelayedTrims && backend_->IsLoaded())
+ return PostDelayedTrim();
+
+ TrimCache(false);
+}
+
+bool Eviction::ShouldTrim() {
+ if (!FallingBehind(header_->num_bytes, max_size_) &&
+ trim_delays_ < kMaxDelayedTrims && backend_->IsLoaded()) {
+ return false;
+ }
+
+ UMA_HISTOGRAM_COUNTS("DiskCache.TrimDelays", trim_delays_);
+ trim_delays_ = 0;
+ return true;
+}
+
+bool Eviction::ShouldTrimDeleted() {
+ int index_load = header_->num_entries * 100 / index_size_;
+
+ // If the index is not loaded, the deleted list will tend to double the size
+ // of the other lists 3 lists (40% of the total). Otherwise, all lists will be
+ // about the same size.
+ int max_length = (index_load < 25) ? header_->num_entries * 2 / 5 :
+ header_->num_entries / 4;
+ return (!test_mode_ && header_->lru.sizes[Rankings::DELETED] > max_length);
+}
+
+bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty,
+ Rankings::List list) {
+ EntryImpl* entry = backend_->GetEnumeratedEntry(node, list);
+ if (!entry) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ return false;
+ }
+
+ ReportTrimTimes(entry);
+ if (empty || !new_eviction_) {
+ entry->DoomImpl();
+ } else {
+ entry->DeleteEntryData(false);
+ EntryStore* info = entry->entry()->Data();
+ DCHECK_EQ(ENTRY_NORMAL, info->state);
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry), true);
+ info->state = ENTRY_EVICTED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+ }
+ if (!empty)
+ backend_->OnEvent(Stats::TRIM_ENTRY);
+
+ entry->Release();
+
+ return true;
+}
+
+void Eviction::TrimCacheV2(bool empty) {
+ Trace("*** Trim Cache ***");
+ trimming_ = true;
+ TimeTicks start = TimeTicks::Now();
+
+ const int kListsToSearch = 3;
+ Rankings::ScopedRankingsBlock next[kListsToSearch];
+ int list = Rankings::LAST_ELEMENT;
+
+ // Get a node from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ bool done = false;
+ next[i].set_rankings(rankings_);
+ if (done)
+ continue;
+ next[i].reset(rankings_->GetPrev(NULL, static_cast<Rankings::List>(i)));
+ if (!empty && NodeIsOldEnough(next[i].get(), i)) {
+ list = static_cast<Rankings::List>(i);
+ done = true;
+ }
+ }
+
+ // If we are not meeting the time targets lets move on to list length.
+ if (!empty && Rankings::LAST_ELEMENT == list)
+ list = SelectListByLength(next);
+
+ if (empty)
+ list = 0;
+
+ Rankings::ScopedRankingsBlock node(rankings_);
+ int deleted_entries = 0;
+ int target_size = empty ? 0 : max_size_;
+
+ for (; list < kListsToSearch; list++) {
+ while ((header_->num_bytes > target_size || test_mode_) &&
+ next[list].get()) {
+ // The iterator could be invalidated within EvictEntry().
+ if (!next[list]->HasData())
+ break;
+ node.reset(next[list].release());
+ next[list].reset(rankings_->GetPrev(node.get(),
+ static_cast<Rankings::List>(list)));
+ if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
+ // This entry is not being used by anybody.
+ // Do NOT use node as an iterator after this point.
+ rankings_->TrackRankingsBlock(node.get(), false);
+ if (EvictEntry(node.get(), empty, static_cast<Rankings::List>(list)))
+ deleted_entries++;
+
+ if (!empty && test_mode_)
+ break;
+ }
+ if (!empty && (deleted_entries > 20 ||
+ (TimeTicks::Now() - start).InMilliseconds() > 20)) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimCache, ptr_factory_.GetWeakPtr(), false));
+ break;
+ }
+ }
+ if (!empty)
+ list = kListsToSearch;
+ }
+
+ if (empty) {
+ TrimDeleted(true);
+ } else if (ShouldTrimDeleted()) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimDeleted, ptr_factory_.GetWeakPtr(), empty));
+ }
+
+ if (empty) {
+ CACHE_UMA(AGE_MS, "TotalClearTimeV2", 0, start);
+ } else {
+ CACHE_UMA(AGE_MS, "TotalTrimTimeV2", 0, start);
+ }
+ CACHE_UMA(COUNTS, "TrimItemsV2", 0, deleted_entries);
+
+ Trace("*** Trim Cache end ***");
+ trimming_ = false;
+ return;
+}
+
+// This is a minimal implementation that just discards the oldest nodes.
+// TODO(rvargas): Do something better here.
+void Eviction::TrimDeleted(bool empty) {
+ Trace("*** Trim Deleted ***");
+ if (backend_->disabled_)
+ return;
+
+ TimeTicks start = TimeTicks::Now();
+ Rankings::ScopedRankingsBlock node(rankings_);
+ Rankings::ScopedRankingsBlock next(
+ rankings_, rankings_->GetPrev(node.get(), Rankings::DELETED));
+ int deleted_entries = 0;
+ while (next.get() &&
+ (empty || (deleted_entries < 20 &&
+ (TimeTicks::Now() - start).InMilliseconds() < 20))) {
+ node.reset(next.release());
+ next.reset(rankings_->GetPrev(node.get(), Rankings::DELETED));
+ if (RemoveDeletedNode(node.get()))
+ deleted_entries++;
+ if (test_mode_)
+ break;
+ }
+
+ if (deleted_entries && !empty && ShouldTrimDeleted()) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&Eviction::TrimDeleted, ptr_factory_.GetWeakPtr(), false));
+ }
+
+ CACHE_UMA(AGE_MS, "TotalTrimDeletedTime", 0, start);
+ CACHE_UMA(COUNTS, "TrimDeletedItems", 0, deleted_entries);
+ Trace("*** Trim Deleted end ***");
+ return;
+}
+
+void Eviction::ReportTrimTimes(EntryImpl* entry) {
+ if (first_trim_) {
+ first_trim_ = false;
+ if (backend_->ShouldReportAgain()) {
+ CACHE_UMA(AGE, "TrimAge", 0, entry->GetLastUsed());
+ ReportListStats();
+ }
+
+ if (header_->lru.filled)
+ return;
+
+ header_->lru.filled = 1;
+
+ if (header_->create_time) {
+ // This is the first entry that we have to evict, generate some noise.
+ backend_->FirstEviction();
+ } else {
+ // This is an old file, but we may want more reports from this user so
+ // lets save some create_time.
+ Time::Exploded old = {0};
+ old.year = 2009;
+ old.month = 3;
+ old.day_of_month = 1;
+ header_->create_time = Time::FromLocalExploded(old).ToInternalValue();
+ }
+ }
+}
+
+bool Eviction::NodeIsOldEnough(CacheRankingsBlock* node, int list) {
+ if (!node)
+ return false;
+
+ // If possible, we want to keep entries on each list at least kTargetTime
+ // hours. Each successive list on the enumeration has 2x the target time of
+ // the previous list.
+ Time used = Time::FromInternalValue(node->Data()->last_used);
+ int multiplier = 1 << list;
+ return (Time::Now() - used).InHours() > kTargetTime * multiplier;
+}
+
+int Eviction::SelectListByLength(Rankings::ScopedRankingsBlock* next) {
+ int data_entries = header_->num_entries -
+ header_->lru.sizes[Rankings::DELETED];
+ // Start by having each list to be roughly the same size.
+ if (header_->lru.sizes[0] > data_entries / 3)
+ return 0;
+
+ int list = (header_->lru.sizes[1] > data_entries / 3) ? 1 : 2;
+
+ // Make sure that frequently used items are kept for a minimum time; we know
+ // that this entry is not older than its current target, but it must be at
+ // least older than the target for list 0 (kTargetTime), as long as we don't
+ // exhaust list 0.
+ if (!NodeIsOldEnough(next[list].get(), 0) &&
+ header_->lru.sizes[0] > data_entries / 10)
+ list = 0;
+
+ return list;
+}
+
+void Eviction::ReportListStats() {
+ if (!new_eviction_)
+ return;
+
+ Rankings::ScopedRankingsBlock last1(rankings_,
+ rankings_->GetPrev(NULL, Rankings::NO_USE));
+ Rankings::ScopedRankingsBlock last2(rankings_,
+ rankings_->GetPrev(NULL, Rankings::LOW_USE));
+ Rankings::ScopedRankingsBlock last3(rankings_,
+ rankings_->GetPrev(NULL, Rankings::HIGH_USE));
+ Rankings::ScopedRankingsBlock last4(rankings_,
+ rankings_->GetPrev(NULL, Rankings::DELETED));
+
+ if (last1.get())
+ CACHE_UMA(AGE, "NoUseAge", 0,
+ Time::FromInternalValue(last1.get()->Data()->last_used));
+ if (last2.get())
+ CACHE_UMA(AGE, "LowUseAge", 0,
+ Time::FromInternalValue(last2.get()->Data()->last_used));
+ if (last3.get())
+ CACHE_UMA(AGE, "HighUseAge", 0,
+ Time::FromInternalValue(last3.get()->Data()->last_used));
+ if (last4.get())
+ CACHE_UMA(AGE, "DeletedAge", 0,
+ Time::FromInternalValue(last4.get()->Data()->last_used));
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/eviction_v3.h b/chromium/net/disk_cache/v3/eviction_v3.h
new file mode 100644
index 00000000000..1f05b0e0881
--- /dev/null
+++ b/chromium/net/disk_cache/v3/eviction_v3.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_EVICTION_H_
+#define NET_DISK_CACHE_EVICTION_H_
+
+#include "base/basictypes.h"
+#include "base/memory/weak_ptr.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/rankings.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class EntryImpl;
+
+// This class implements the eviction algorithm for the cache and it is tightly
+// integrated with BackendImpl.
+class Eviction {
+ public:
+ Eviction();
+ ~Eviction();
+
+ void Init(BackendImpl* backend);
+ void Stop();
+
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Notifications of interesting events for a given entry.
+ void OnOpenEntry(EntryImpl* entry);
+ void OnCreateEntry(EntryImpl* entry);
+
+ // Testing interface.
+ void SetTestMode();
+ void TrimDeletedList(bool empty);
+
+ private:
+ void PostDelayedTrim();
+ void DelayedTrim();
+ bool ShouldTrim();
+ bool ShouldTrimDeleted();
+ bool EvictEntry(CacheRankingsBlock* node, bool empty, Rankings::List list);
+
+ void TrimCacheV2(bool empty);
+ void TrimDeleted(bool empty);
+
+ bool NodeIsOldEnough(CacheRankingsBlock* node, int list);
+ int SelectListByLength(Rankings::ScopedRankingsBlock* next);
+ void ReportListStats();
+
+ BackendImpl* backend_;
+ Rankings* rankings_;
+ IndexHeader* header_;
+ int max_size_;
+ int trim_delays_;
+ int index_size_;
+ bool new_eviction_;
+ bool first_trim_;
+ bool trimming_;
+ bool delay_trim_;
+ bool init_;
+ bool test_mode_;
+ base::WeakPtrFactory<Eviction> ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(Eviction);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_EVICTION_H_
diff --git a/chromium/net/disk_cache/v3/sparse_control_v3.cc b/chromium/net/disk_cache/v3/sparse_control_v3.cc
new file mode 100644
index 00000000000..d9700adc89c
--- /dev/null
+++ b/chromium/net/disk_cache/v3/sparse_control_v3.cc
@@ -0,0 +1,868 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/sparse_control.h"
+
+#include "base/bind.h"
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/file.h"
+#include "net/disk_cache/net_log_parameters.h"
+
+using base::Time;
+
+namespace {
+
+// Stream of the sparse data index.
+const int kSparseIndex = 2;
+
+// Stream of the sparse data.
+const int kSparseData = 1;
+
+// We can have up to 64k children.
+const int kMaxMapSize = 8 * 1024;
+
+// The maximum number of bytes that a child can store.
+const int kMaxEntrySize = 0x100000;
+
+// The size of each data block (tracked by the child allocation bitmap).
+const int kBlockSize = 1024;
+
+// Returns the name of a child entry given the base_name and signature of the
+// parent and the child_id.
+// If the entry is called entry_name, child entries will be named something
+// like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the
+// number of the particular child.
+std::string GenerateChildName(const std::string& base_name, int64 signature,
+ int64 child_id) {
+ return base::StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(),
+ signature, child_id);
+}
+
+// This class deletes the children of a sparse entry.
+class ChildrenDeleter
+ : public base::RefCounted<ChildrenDeleter>,
+ public disk_cache::FileIOCallback {
+ public:
+ ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name)
+ : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {}
+
+ virtual void OnFileIOComplete(int bytes_copied) OVERRIDE;
+
+ // Two ways of deleting the children: if we have the children map, use Start()
+ // directly, otherwise pass the data address to ReadData().
+ void Start(char* buffer, int len);
+ void ReadData(disk_cache::Addr address, int len);
+
+ private:
+ friend class base::RefCounted<ChildrenDeleter>;
+ virtual ~ChildrenDeleter() {}
+
+ void DeleteChildren();
+
+ base::WeakPtr<disk_cache::BackendImpl> backend_;
+ std::string name_;
+ disk_cache::Bitmap children_map_;
+ int64 signature_;
+ scoped_ptr<char[]> buffer_;
+ DISALLOW_COPY_AND_ASSIGN(ChildrenDeleter);
+};
+
+// This is the callback of the file operation.
+void ChildrenDeleter::OnFileIOComplete(int bytes_copied) {
+ char* buffer = buffer_.release();
+ Start(buffer, bytes_copied);
+}
+
+void ChildrenDeleter::Start(char* buffer, int len) {
+ buffer_.reset(buffer);
+ if (len < static_cast<int>(sizeof(disk_cache::SparseData)))
+ return Release();
+
+ // Just copy the information from |buffer|, delete |buffer| and start deleting
+ // the child entries.
+ disk_cache::SparseData* data =
+ reinterpret_cast<disk_cache::SparseData*>(buffer);
+ signature_ = data->header.signature;
+
+ int num_bits = (len - sizeof(disk_cache::SparseHeader)) * 8;
+ children_map_.Resize(num_bits, false);
+ children_map_.SetMap(data->bitmap, num_bits / 32);
+ buffer_.reset();
+
+ DeleteChildren();
+}
+
+void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
+ DCHECK(address.is_block_file());
+ if (!backend_)
+ return Release();
+
+ disk_cache::File* file(backend_->File(address));
+ if (!file)
+ return Release();
+
+ size_t file_offset = address.start_block() * address.BlockSize() +
+ disk_cache::kBlockHeaderSize;
+
+ buffer_.reset(new char[len]);
+ bool completed;
+ if (!file->Read(buffer_.get(), len, file_offset, this, &completed))
+ return Release();
+
+ if (completed)
+ OnFileIOComplete(len);
+
+ // And wait until OnFileIOComplete gets called.
+}
+
+void ChildrenDeleter::DeleteChildren() {
+ int child_id = 0;
+ if (!children_map_.FindNextSetBit(&child_id) || !backend_) {
+ // We are done. Just delete this object.
+ return Release();
+ }
+ std::string child_name = GenerateChildName(name_, signature_, child_id);
+ backend_->SyncDoomEntry(child_name);
+ children_map_.Set(child_id, false);
+
+ // Post a task to delete the next child.
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&ChildrenDeleter::DeleteChildren, this));
+}
+
+// -----------------------------------------------------------------------
+
+// Returns the NetLog event type corresponding to a SparseOperation.
+net::NetLog::EventType GetSparseEventType(
+ disk_cache::SparseControl::SparseOperation operation) {
+ switch (operation) {
+ case disk_cache::SparseControl::kReadOperation:
+ return net::NetLog::TYPE_SPARSE_READ;
+ case disk_cache::SparseControl::kWriteOperation:
+ return net::NetLog::TYPE_SPARSE_WRITE;
+ case disk_cache::SparseControl::kGetRangeOperation:
+ return net::NetLog::TYPE_SPARSE_GET_RANGE;
+ default:
+ NOTREACHED();
+ return net::NetLog::TYPE_CANCELLED;
+ }
+}
+
+// Logs the end event for |operation| on a child entry. Range operations log
+// no events for each child they search through.
+void LogChildOperationEnd(const net::BoundNetLog& net_log,
+ disk_cache::SparseControl::SparseOperation operation,
+ int result) {
+ if (net_log.IsLoggingAllEvents()) {
+ net::NetLog::EventType event_type;
+ switch (operation) {
+ case disk_cache::SparseControl::kReadOperation:
+ event_type = net::NetLog::TYPE_SPARSE_READ_CHILD_DATA;
+ break;
+ case disk_cache::SparseControl::kWriteOperation:
+ event_type = net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA;
+ break;
+ case disk_cache::SparseControl::kGetRangeOperation:
+ return;
+ default:
+ NOTREACHED();
+ return;
+ }
+ net_log.EndEventWithNetErrorCode(event_type, result);
+ }
+}
+
+} // namespace.
+
+namespace disk_cache {
+
+SparseControl::SparseControl(EntryImpl* entry)
+ : entry_(entry),
+ child_(NULL),
+ operation_(kNoOperation),
+ pending_(false),
+ finished_(false),
+ init_(false),
+ range_found_(false),
+ abort_(false),
+ child_map_(child_data_.bitmap, kNumSparseBits, kNumSparseBits / 32),
+ offset_(0),
+ buf_len_(0),
+ child_offset_(0),
+ child_len_(0),
+ result_(0) {
+ memset(&sparse_header_, 0, sizeof(sparse_header_));
+ memset(&child_data_, 0, sizeof(child_data_));
+}
+
+SparseControl::~SparseControl() {
+ if (child_)
+ CloseChild();
+ if (init_)
+ WriteSparseData();
+}
+
+bool SparseControl::CouldBeSparse() const {
+ DCHECK(!init_);
+
+ if (entry_->GetDataSize(kSparseData))
+ return false;
+
+ // We don't verify the data, just see if it could be there.
+ return (entry_->GetDataSize(kSparseIndex) != 0);
+}
+
+int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
+ int buf_len, const CompletionCallback& callback) {
+ DCHECK(init_);
+ // We don't support simultaneous IO for sparse data.
+ if (operation_ != kNoOperation)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ // We only support up to 64 GB.
+ if (offset + buf_len >= 0x1000000000LL || offset + buf_len < 0)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ DCHECK(!user_buf_);
+ DCHECK(user_callback_.is_null());
+
+ if (!buf && (op == kReadOperation || op == kWriteOperation))
+ return 0;
+
+ // Copy the operation parameters.
+ operation_ = op;
+ offset_ = offset;
+ user_buf_ = buf ? new net::DrainableIOBuffer(buf, buf_len) : NULL;
+ buf_len_ = buf_len;
+ user_callback_ = callback;
+
+ result_ = 0;
+ pending_ = false;
+ finished_ = false;
+ abort_ = false;
+
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().BeginEvent(
+ GetSparseEventType(operation_),
+ CreateNetLogSparseOperationCallback(offset_, buf_len_));
+ }
+ DoChildrenIO();
+
+ if (!pending_) {
+ // Everything was done synchronously.
+ operation_ = kNoOperation;
+ user_buf_ = NULL;
+ user_callback_.Reset();
+ return result_;
+ }
+
+ return net::ERR_IO_PENDING;
+}
+
+int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) {
+ DCHECK(init_);
+ // We don't support simultaneous IO for sparse data.
+ if (operation_ != kNoOperation)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ DCHECK(start);
+
+ range_found_ = false;
+ int result = StartIO(
+ kGetRangeOperation, offset, NULL, len, CompletionCallback());
+ if (range_found_) {
+ *start = offset_;
+ return result;
+ }
+
+ // This is a failure. We want to return a valid start value in any case.
+ *start = offset;
+ return result < 0 ? result : 0; // Don't mask error codes to the caller.
+}
+
+void SparseControl::CancelIO() {
+ if (operation_ == kNoOperation)
+ return;
+ abort_ = true;
+}
+
+int SparseControl::ReadyToUse(const CompletionCallback& callback) {
+ if (!abort_)
+ return net::OK;
+
+ // We'll grab another reference to keep this object alive because we just have
+ // one extra reference due to the pending IO operation itself, but we'll
+ // release that one before invoking user_callback_.
+ entry_->AddRef(); // Balanced in DoAbortCallbacks.
+ abort_callbacks_.push_back(callback);
+ return net::ERR_IO_PENDING;
+}
+
+// Static
+void SparseControl::DeleteChildren(EntryImpl* entry) {
+ DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
+ int data_len = entry->GetDataSize(kSparseIndex);
+ if (data_len < static_cast<int>(sizeof(SparseData)) ||
+ entry->GetDataSize(kSparseData))
+ return;
+
+ int map_len = data_len - sizeof(SparseHeader);
+ if (map_len > kMaxMapSize || map_len % 4)
+ return;
+
+ char* buffer;
+ Addr address;
+ entry->GetData(kSparseIndex, &buffer, &address);
+ if (!buffer && !address.is_initialized())
+ return;
+
+ entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN);
+
+ DCHECK(entry->backend_);
+ ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_.get(),
+ entry->GetKey());
+ // The object will self destruct when finished.
+ deleter->AddRef();
+
+ if (buffer) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&ChildrenDeleter::Start, deleter, buffer, data_len));
+ } else {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&ChildrenDeleter::ReadData, deleter, address, data_len));
+ }
+}
+
+// -----------------------------------------------------------------------
+
+int SparseControl::Init() {
+ DCHECK(!init_);
+
+ // We should not have sparse data for the exposed entry.
+ if (entry_->GetDataSize(kSparseData))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Now see if there is something where we store our data.
+ int rv = net::OK;
+ int data_len = entry_->GetDataSize(kSparseIndex);
+ if (!data_len) {
+ rv = CreateSparseEntry();
+ } else {
+ rv = OpenSparseEntry(data_len);
+ }
+
+ if (rv == net::OK)
+ init_ = true;
+ return rv;
+}
+
+// We are going to start using this entry to store sparse data, so we have to
+// initialize our control info.
+int SparseControl::CreateSparseEntry() {
+ if (CHILD_ENTRY & entry_->GetEntryFlags())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ memset(&sparse_header_, 0, sizeof(sparse_header_));
+ sparse_header_.signature = Time::Now().ToInternalValue();
+ sparse_header_.magic = kIndexMagic;
+ sparse_header_.parent_key_len = entry_->GetKey().size();
+ children_map_.Resize(kNumSparseBits, true);
+
+ // Save the header. The bitmap is saved in the destructor.
+ scoped_refptr<net::IOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
+
+ int rv = entry_->WriteData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_),
+ CompletionCallback(), false);
+ if (rv != sizeof(sparse_header_)) {
+ DLOG(ERROR) << "Unable to save sparse_header_";
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+ }
+
+ entry_->SetEntryFlags(PARENT_ENTRY);
+ return net::OK;
+}
+
+// We are opening an entry from disk. Make sure that our control data is there.
+int SparseControl::OpenSparseEntry(int data_len) {
+ if (data_len < static_cast<int>(sizeof(SparseData)))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (entry_->GetDataSize(kSparseData))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (!(PARENT_ENTRY & entry_->GetEntryFlags()))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB.
+ int map_len = data_len - sizeof(sparse_header_);
+ if (map_len > kMaxMapSize || map_len % 4)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ scoped_refptr<net::IOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
+
+ // Read header.
+ int rv = entry_->ReadData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_),
+ CompletionCallback());
+ if (rv != static_cast<int>(sizeof(sparse_header_)))
+ return net::ERR_CACHE_READ_FAILURE;
+
+ // The real validation should be performed by the caller. This is just to
+ // double check.
+ if (sparse_header_.magic != kIndexMagic ||
+ sparse_header_.parent_key_len !=
+ static_cast<int>(entry_->GetKey().size()))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Read the actual bitmap.
+ buf = new net::IOBuffer(map_len);
+ rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf.get(),
+ map_len, CompletionCallback());
+ if (rv != map_len)
+ return net::ERR_CACHE_READ_FAILURE;
+
+ // Grow the bitmap to the current size and copy the bits.
+ children_map_.Resize(map_len * 8, false);
+ children_map_.SetMap(reinterpret_cast<uint32*>(buf->data()), map_len);
+ return net::OK;
+}
+
+bool SparseControl::OpenChild() {
+ DCHECK_GE(result_, 0);
+
+ std::string key = GenerateChildKey();
+ if (child_) {
+ // Keep using the same child or open another one?.
+ if (key == child_->GetKey())
+ return true;
+ CloseChild();
+ }
+
+ // See if we are tracking this child.
+ if (!ChildPresent())
+ return ContinueWithoutChild(key);
+
+ if (!entry_->backend_)
+ return false;
+
+ child_ = entry_->backend_->OpenEntryImpl(key);
+ if (!child_)
+ return ContinueWithoutChild(key);
+
+ EntryImpl* child = static_cast<EntryImpl*>(child_);
+ if (!(CHILD_ENTRY & child->GetEntryFlags()) ||
+ child->GetDataSize(kSparseIndex) <
+ static_cast<int>(sizeof(child_data_)))
+ return KillChildAndContinue(key, false);
+
+ scoped_refptr<net::WrappedIOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
+
+ // Read signature.
+ int rv = child_->ReadData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
+ CompletionCallback());
+ if (rv != sizeof(child_data_))
+ return KillChildAndContinue(key, true); // This is a fatal failure.
+
+ if (child_data_.header.signature != sparse_header_.signature ||
+ child_data_.header.magic != kIndexMagic)
+ return KillChildAndContinue(key, false);
+
+ if (child_data_.header.last_block_len < 0 ||
+ child_data_.header.last_block_len > kBlockSize) {
+ // Make sure these values are always within range.
+ child_data_.header.last_block_len = 0;
+ child_data_.header.last_block = -1;
+ }
+
+ return true;
+}
+
+void SparseControl::CloseChild() {
+ scoped_refptr<net::WrappedIOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
+
+ // Save the allocation bitmap before closing the child entry.
+ int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
+ CompletionCallback(),
+ false);
+ if (rv != sizeof(child_data_))
+ DLOG(ERROR) << "Failed to save child data";
+ child_->Release();
+ child_ = NULL;
+}
+
+// We were not able to open this child; see what we can do.
+bool SparseControl::ContinueWithoutChild(const std::string& key) {
+ if (kReadOperation == operation_)
+ return false;
+ if (kGetRangeOperation == operation_)
+ return true;
+
+ if (!entry_->backend_)
+ return false;
+
+ child_ = entry_->backend_->CreateEntryImpl(key);
+ if (!child_) {
+ child_ = NULL;
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ return false;
+ }
+ // Write signature.
+ InitChildData();
+ return true;
+}
+
+void SparseControl::WriteSparseData() {
+ scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer(
+ reinterpret_cast<const char*>(children_map_.GetMap())));
+
+ int len = children_map_.ArraySize() * 4;
+ int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf.get(),
+ len, CompletionCallback(), false);
+ if (rv != len) {
+ DLOG(ERROR) << "Unable to save sparse map";
+ }
+}
+
+bool SparseControl::DoChildIO() {
+ finished_ = true;
+ if (!buf_len_ || result_ < 0)
+ return false;
+
+ if (!OpenChild())
+ return false;
+
+ if (!VerifyRange())
+ return false;
+
+ // We have more work to do. Let's not trigger a callback to the caller.
+ finished_ = false;
+ CompletionCallback callback;
+ if (!user_callback_.is_null()) {
+ callback =
+ base::Bind(&SparseControl::OnChildIOCompleted, base::Unretained(this));
+ }
+
+ int rv = 0;
+ switch (operation_) {
+ case kReadOperation:
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().BeginEvent(
+ net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child_->net_log().source(),
+ child_len_));
+ }
+ rv = child_->ReadDataImpl(kSparseData, child_offset_, user_buf_.get(),
+ child_len_, callback);
+ break;
+ case kWriteOperation:
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child_->net_log().source(),
+ child_len_));
+ }
+ rv = child_->WriteDataImpl(kSparseData, child_offset_, user_buf_.get(),
+ child_len_, callback, false);
+ break;
+ case kGetRangeOperation:
+ rv = DoGetAvailableRange();
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ if (rv == net::ERR_IO_PENDING) {
+ if (!pending_) {
+ pending_ = true;
+ // The child will protect himself against closing the entry while IO is in
+ // progress. However, this entry can still be closed, and that would not
+ // be a good thing for us, so we increase the refcount until we're
+ // finished doing sparse stuff.
+ entry_->AddRef(); // Balanced in DoUserCallback.
+ }
+ return false;
+ }
+ if (!rv)
+ return false;
+
+ DoChildIOCompleted(rv);
+ return true;
+}
+
+void SparseControl::DoChildIOCompleted(int result) {
+ LogChildOperationEnd(entry_->net_log(), operation_, result);
+ if (result < 0) {
+ // We fail the whole operation if we encounter an error.
+ result_ = result;
+ return;
+ }
+
+ UpdateRange(result);
+
+ result_ += result;
+ offset_ += result;
+ buf_len_ -= result;
+
+ // We'll be reusing the user provided buffer for the next chunk.
+ if (buf_len_ && user_buf_)
+ user_buf_->DidConsume(result);
+}
+
+std::string SparseControl::GenerateChildKey() {
+ return GenerateChildName(entry_->GetKey(), sparse_header_.signature,
+ offset_ >> 20);
+}
+
+// We are deleting the child because something went wrong.
+bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) {
+ SetChildBit(false);
+ child_->DoomImpl();
+ child_->Release();
+ child_ = NULL;
+ if (fatal) {
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ return false;
+ }
+ return ContinueWithoutChild(key);
+}
+
+bool SparseControl::ChildPresent() {
+ int child_bit = static_cast<int>(offset_ >> 20);
+ if (children_map_.Size() <= child_bit)
+ return false;
+
+ return children_map_.Get(child_bit);
+}
+
+void SparseControl::SetChildBit(bool value) {
+ int child_bit = static_cast<int>(offset_ >> 20);
+
+ // We may have to increase the bitmap of child entries.
+ if (children_map_.Size() <= child_bit)
+ children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true);
+
+ children_map_.Set(child_bit, value);
+}
+
+bool SparseControl::VerifyRange() {
+ DCHECK_GE(result_, 0);
+
+ child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1);
+ child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_);
+
+ // We can write to (or get info from) anywhere in this child.
+ if (operation_ != kReadOperation)
+ return true;
+
+ // Check that there are no holes in this range.
+ int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ int start = child_offset_ >> 10;
+ if (child_map_.FindNextBit(&start, last_bit, false)) {
+ // Something is not here.
+ DCHECK_GE(child_data_.header.last_block_len, 0);
+ DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+ int partial_block_len = PartialBlockLength(start);
+ if (start == child_offset_ >> 10) {
+ // It looks like we don't have anything.
+ if (partial_block_len <= (child_offset_ & (kBlockSize - 1)))
+ return false;
+ }
+
+ // We have the first part.
+ child_len_ = (start << 10) - child_offset_;
+ if (partial_block_len) {
+ // We may have a few extra bytes.
+ child_len_ = std::min(child_len_ + partial_block_len, buf_len_);
+ }
+ // There is no need to read more after this one.
+ buf_len_ = child_len_;
+ }
+ return true;
+}
+
+void SparseControl::UpdateRange(int result) {
+ if (result <= 0 || operation_ != kWriteOperation)
+ return;
+
+ DCHECK_GE(child_data_.header.last_block_len, 0);
+ DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+
+ // Write the bitmap.
+ int first_bit = child_offset_ >> 10;
+ int block_offset = child_offset_ & (kBlockSize - 1);
+ if (block_offset && (child_data_.header.last_block != first_bit ||
+ child_data_.header.last_block_len < block_offset)) {
+ // The first block is not completely filled; ignore it.
+ first_bit++;
+ }
+
+ int last_bit = (child_offset_ + result) >> 10;
+ block_offset = (child_offset_ + result) & (kBlockSize - 1);
+
+ // This condition will hit with the following criteria:
+ // 1. The first byte doesn't follow the last write.
+ // 2. The first byte is in the middle of a block.
+ // 3. The first byte and the last byte are in the same block.
+ if (first_bit > last_bit)
+ return;
+
+ if (block_offset && !child_map_.Get(last_bit)) {
+ // The last block is not completely filled; save it for later.
+ child_data_.header.last_block = last_bit;
+ child_data_.header.last_block_len = block_offset;
+ } else {
+ child_data_.header.last_block = -1;
+ }
+
+ child_map_.SetRange(first_bit, last_bit, true);
+}
+
+int SparseControl::PartialBlockLength(int block_index) const {
+ if (block_index == child_data_.header.last_block)
+ return child_data_.header.last_block_len;
+
+ // This may be the last stored index.
+ int entry_len = child_->GetDataSize(kSparseData);
+ if (block_index == entry_len >> 10)
+ return entry_len & (kBlockSize - 1);
+
+ // This is really empty.
+ return 0;
+}
+
+void SparseControl::InitChildData() {
+ // We know the real type of child_.
+ EntryImpl* child = static_cast<EntryImpl*>(child_);
+ child->SetEntryFlags(CHILD_ENTRY);
+
+ memset(&child_data_, 0, sizeof(child_data_));
+ child_data_.header = sparse_header_;
+
+ scoped_refptr<net::WrappedIOBuffer> buf(
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
+
+ int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_),
+ CompletionCallback(), false);
+ if (rv != sizeof(child_data_))
+ DLOG(ERROR) << "Failed to save child data";
+ SetChildBit(true);
+}
+
+int SparseControl::DoGetAvailableRange() {
+ if (!child_)
+ return child_len_; // Move on to the next child.
+
+ // Check that there are no holes in this range.
+ int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ int start = child_offset_ >> 10;
+ int partial_start_bytes = PartialBlockLength(start);
+ int found = start;
+ int bits_found = child_map_.FindBits(&found, last_bit, true);
+
+ // We don't care if there is a partial block in the middle of the range.
+ int block_offset = child_offset_ & (kBlockSize - 1);
+ if (!bits_found && partial_start_bytes <= block_offset)
+ return child_len_;
+
+ // We are done. Just break the loop and reset result_ to our real result.
+ range_found_ = true;
+
+ // found now points to the first 1. Lets see if we have zeros before it.
+ int empty_start = std::max((found << 10) - child_offset_, 0);
+
+ int bytes_found = bits_found << 10;
+ bytes_found += PartialBlockLength(found + bits_found);
+
+ if (start == found)
+ bytes_found -= block_offset;
+
+ // If the user is searching past the end of this child, bits_found is the
+ // right result; otherwise, we have some empty space at the start of this
+ // query that we have to subtract from the range that we searched.
+ result_ = std::min(bytes_found, child_len_ - empty_start);
+
+ if (!bits_found) {
+ result_ = std::min(partial_start_bytes - block_offset, child_len_);
+ empty_start = 0;
+ }
+
+ // Only update offset_ when this query found zeros at the start.
+ if (empty_start)
+ offset_ += empty_start;
+
+ // This will actually break the loop.
+ buf_len_ = 0;
+ return 0;
+}
+
+void SparseControl::DoUserCallback() {
+ DCHECK(!user_callback_.is_null());
+ CompletionCallback cb = user_callback_;
+ user_callback_.Reset();
+ user_buf_ = NULL;
+ pending_ = false;
+ operation_ = kNoOperation;
+ int rv = result_;
+ entry_->Release(); // Don't touch object after this line.
+ cb.Run(rv);
+}
+
+void SparseControl::DoAbortCallbacks() {
+ for (size_t i = 0; i < abort_callbacks_.size(); i++) {
+ // Releasing all references to entry_ may result in the destruction of this
+ // object so we should not be touching it after the last Release().
+ CompletionCallback cb = abort_callbacks_[i];
+ if (i == abort_callbacks_.size() - 1)
+ abort_callbacks_.clear();
+
+ entry_->Release(); // Don't touch object after this line.
+ cb.Run(net::OK);
+ }
+}
+
+void SparseControl::OnChildIOCompleted(int result) {
+ DCHECK_NE(net::ERR_IO_PENDING, result);
+ DoChildIOCompleted(result);
+
+ if (abort_) {
+ // We'll return the current result of the operation, which may be less than
+ // the bytes to read or write, but the user cancelled the operation.
+ abort_ = false;
+ if (entry_->net_log().IsLoggingAllEvents()) {
+ entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED);
+ entry_->net_log().EndEvent(GetSparseEventType(operation_));
+ }
+ // We have an indirect reference to this object for every callback so if
+ // there is only one callback, we may delete this object before reaching
+ // DoAbortCallbacks.
+ bool has_abort_callbacks = !abort_callbacks_.empty();
+ DoUserCallback();
+ if (has_abort_callbacks)
+ DoAbortCallbacks();
+ return;
+ }
+
+ // We are running a callback from the message loop. It's time to restart what
+ // we were doing before.
+ DoChildrenIO();
+}
+
+} // namespace disk_cache
diff --git a/chromium/net/disk_cache/v3/sparse_control_v3.h b/chromium/net/disk_cache/v3/sparse_control_v3.h
new file mode 100644
index 00000000000..8455ad724b5
--- /dev/null
+++ b/chromium/net/disk_cache/v3/sparse_control_v3.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SPARSE_CONTROL_H_
+#define NET_DISK_CACHE_SPARSE_CONTROL_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/base/completion_callback.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/disk_format.h"
+
+namespace net {
+class IOBuffer;
+class DrainableIOBuffer;
+}
+
+namespace disk_cache {
+
+class Entry;
+class EntryImpl;
+
+// This class provides support for the sparse capabilities of the disk cache.
+// Basically, sparse IO is directed from EntryImpl to this class, and we split
+// the operation into multiple small pieces, sending each one to the
+// appropriate entry. An instance of this class is asociated with each entry
+// used directly for sparse operations (the entry passed in to the constructor).
+class SparseControl {
+ public:
+ typedef net::CompletionCallback CompletionCallback;
+
+ // The operation to perform.
+ enum SparseOperation {
+ kNoOperation,
+ kReadOperation,
+ kWriteOperation,
+ kGetRangeOperation
+ };
+
+ explicit SparseControl(EntryImpl* entry);
+ ~SparseControl();
+
+ // Performs a quick test to see if the entry is sparse or not, without
+ // generating disk IO (so the answer provided is only a best effort).
+ bool CouldBeSparse() const;
+
+ // Performs an actual sparse read or write operation for this entry. |op| is
+ // the operation to perform, |offset| is the desired sparse offset, |buf| and
+ // |buf_len| specify the actual data to use and |callback| is the callback
+ // to use for asynchronous operations. See the description of the Read /
+ // WriteSparseData for details about the arguments. The return value is the
+ // number of bytes read or written, or a net error code.
+ int StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
+ int buf_len, const CompletionCallback& callback);
+
+ // Implements Entry::GetAvailableRange().
+ int GetAvailableRange(int64 offset, int len, int64* start);
+
+ // Cancels the current sparse operation (if any).
+ void CancelIO();
+
+ // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are
+ // busy. If the entry is busy, we'll invoke the callback when we are ready
+ // again. See disk_cache::Entry::ReadyToUse() for more info.
+ int ReadyToUse(const CompletionCallback& completion_callback);
+
+ // Deletes the children entries of |entry|.
+ static void DeleteChildren(EntryImpl* entry);
+
+ private:
+ // Initializes the object for the current entry. If this entry already stores
+ // sparse data, or can be used to do it, it updates the relevant information
+ // on disk and returns net::OK. Otherwise it returns a net error code.
+ int Init();
+
+ // Creates a new sparse entry or opens an aready created entry from disk.
+ // These methods just read / write the required info from disk for the current
+ // entry, and verify that everything is correct. The return value is a net
+ // error code.
+ int CreateSparseEntry();
+ int OpenSparseEntry(int data_len);
+
+ // Opens and closes a child entry. A child entry is a regular EntryImpl object
+ // with a key derived from the key of the resource to store and the range
+ // stored by that child.
+ bool OpenChild();
+ void CloseChild();
+
+ // Continues the current operation (open) without a current child.
+ bool ContinueWithoutChild(const std::string& key);
+
+ // Writes to disk the tracking information for this entry.
+ void WriteSparseData();
+
+ // Performs a single operation with the current child. Returns true when we
+ // should move on to the next child and false when we should interrupt our
+ // work.
+ bool DoChildIO();
+
+ // Performs the required work after a single IO operations finishes.
+ void DoChildIOCompleted(int result);
+
+ std::string GenerateChildKey();
+
+ // Deletes the current child and continues the current operation (open).
+ bool KillChildAndContinue(const std::string& key, bool fatal);
+
+ // Returns true if the required child is tracked by the parent entry, i.e. it
+ // was already created.
+ bool ChildPresent();
+
+ // Sets the bit for the current child to the provided |value|. In other words,
+ // starts or stops tracking this child.
+ void SetChildBit(bool value);
+
+ // Verify that the range to be accessed for the current child is appropriate.
+ // Returns false if an error is detected or there is no need to perform the
+ // current IO operation (for instance if the required range is not stored by
+ // the child).
+ bool VerifyRange();
+
+ // Updates the contents bitmap for the current range, based on the result of
+ // the current operation.
+ void UpdateRange(int result);
+
+ // Returns the number of bytes stored at |block_index|, if its allocation-bit
+ // is off (because it is not completely filled).
+ int PartialBlockLength(int block_index) const;
+
+ // Initializes the sparse info for the current child.
+ void InitChildData();
+
+ // Performs the required work for GetAvailableRange for one child.
+ int DoGetAvailableRange();
+
+ // Reports to the user that we are done.
+ void DoUserCallback();
+ void DoAbortCallbacks();
+
+ // Invoked by the callback of asynchronous operations.
+ void OnChildIOCompleted(int result);
+
+ EntryImpl* entry_; // The sparse entry.
+ EntryImpl* child_; // The current child entry.
+ SparseOperation operation_;
+ bool pending_; // True if any child IO operation returned pending.
+ bool finished_;
+ bool init_;
+ bool range_found_; // True if GetAvailableRange found something.
+ bool abort_; // True if we should abort the current operation ASAP.
+
+ SparseHeader sparse_header_; // Data about the children of entry_.
+ Bitmap children_map_; // The actual bitmap of children.
+ SparseData child_data_; // Parent and allocation map of child_.
+ Bitmap child_map_; // The allocation map as a bitmap.
+
+ CompletionCallback user_callback_;
+ std::vector<CompletionCallback> abort_callbacks_;
+ int64 offset_; // Current sparse offset.
+ scoped_refptr<net::DrainableIOBuffer> user_buf_;
+ int buf_len_; // Bytes to read or write.
+ int child_offset_; // Offset to use for the current child.
+ int child_len_; // Bytes to read or write for this child.
+ int result_;
+
+ DISALLOW_COPY_AND_ASSIGN(SparseControl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SPARSE_CONTROL_H_