summaryrefslogtreecommitdiff
path: root/chromium/sync
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2014-03-18 13:16:26 +0100
committerFrederik Gladhorn <frederik.gladhorn@digia.com>2014-03-20 15:55:39 +0100
commit3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch)
tree92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/sync
parente90d7c4b152c56919d963987e2503f9909a666d2 (diff)
downloadqtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies needed on Windows. Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42 Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu> Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/sync')
-rw-r--r--chromium/sync/engine/all_status.cc1
-rw-r--r--chromium/sync/engine/apply_control_data_updates.cc3
-rw-r--r--chromium/sync/engine/apply_control_data_updates.h7
-rw-r--r--chromium/sync/engine/apply_control_data_updates_unittest.cc55
-rw-r--r--chromium/sync/engine/apply_updates_and_resolve_conflicts_command.cc134
-rw-r--r--chromium/sync/engine/apply_updates_and_resolve_conflicts_command.h33
-rw-r--r--chromium/sync/engine/apply_updates_and_resolve_conflicts_command_unittest.cc415
-rw-r--r--chromium/sync/engine/build_commit_command.cc225
-rw-r--r--chromium/sync/engine/build_commit_command.h82
-rw-r--r--chromium/sync/engine/commit.cc284
-rw-r--r--chromium/sync/engine/commit.h73
-rw-r--r--chromium/sync/engine/commit_util.cc (renamed from chromium/sync/engine/process_commit_response_command.cc)437
-rw-r--r--chromium/sync/engine/commit_util.h64
-rw-r--r--chromium/sync/engine/download.cc306
-rw-r--r--chromium/sync/engine/download.h44
-rw-r--r--chromium/sync/engine/download_unittest.cc275
-rw-r--r--chromium/sync/engine/get_commit_ids.cc26
-rw-r--r--chromium/sync/engine/get_commit_ids.h22
-rw-r--r--chromium/sync/engine/model_changing_syncer_command.cc51
-rw-r--r--chromium/sync/engine/model_changing_syncer_command.h76
-rw-r--r--chromium/sync/engine/model_changing_syncer_command_unittest.cc88
-rw-r--r--chromium/sync/engine/net/server_connection_manager.cc18
-rw-r--r--chromium/sync/engine/net/server_connection_manager.h13
-rw-r--r--chromium/sync/engine/process_commit_response_command.h123
-rw-r--r--chromium/sync/engine/process_commit_response_command_unittest.cc365
-rw-r--r--chromium/sync/engine/process_updates_command.h59
-rw-r--r--chromium/sync/engine/process_updates_command_unittest.cc192
-rw-r--r--chromium/sync/engine/process_updates_util.cc (renamed from chromium/sync/engine/process_updates_command.cc)151
-rw-r--r--chromium/sync/engine/process_updates_util.h73
-rw-r--r--chromium/sync/engine/store_timestamps_command.cc63
-rw-r--r--chromium/sync/engine/store_timestamps_command.h56
-rw-r--r--chromium/sync/engine/store_timestamps_command_unittest.cc83
-rw-r--r--chromium/sync/engine/sync_directory_commit_contribution.cc164
-rw-r--r--chromium/sync/engine/sync_directory_commit_contribution.h102
-rw-r--r--chromium/sync/engine/sync_directory_commit_contribution_unittest.cc235
-rw-r--r--chromium/sync/engine/sync_directory_commit_contributor.cc24
-rw-r--r--chromium/sync/engine/sync_directory_commit_contributor.h45
-rw-r--r--chromium/sync/engine/sync_directory_update_handler.cc148
-rw-r--r--chromium/sync/engine/sync_directory_update_handler.h97
-rw-r--r--chromium/sync/engine/sync_directory_update_handler_unittest.cc826
-rw-r--r--chromium/sync/engine/sync_engine_event.h5
-rw-r--r--chromium/sync/engine/sync_scheduler.h13
-rw-r--r--chromium/sync/engine/sync_scheduler_impl.cc212
-rw-r--r--chromium/sync/engine/sync_scheduler_impl.h32
-rw-r--r--chromium/sync/engine/sync_scheduler_unittest.cc173
-rw-r--r--chromium/sync/engine/syncer.cc72
-rw-r--r--chromium/sync/engine/syncer.h26
-rw-r--r--chromium/sync/engine/syncer_command.cc17
-rw-r--r--chromium/sync/engine/syncer_command.h47
-rw-r--r--chromium/sync/engine/syncer_proto_util.cc7
-rw-r--r--chromium/sync/engine/syncer_proto_util_unittest.cc3
-rw-r--r--chromium/sync/engine/syncer_types.h20
-rw-r--r--chromium/sync/engine/syncer_unittest.cc258
-rw-r--r--chromium/sync/engine/syncer_util.cc45
-rw-r--r--chromium/sync/engine/syncer_util.h23
-rw-r--r--chromium/sync/engine/update_applicator.cc30
-rw-r--r--chromium/sync/engine/update_applicator.h8
-rw-r--r--chromium/sync/internal_api/debug_info_event_listener.cc45
-rw-r--r--chromium/sync/internal_api/debug_info_event_listener.h25
-rw-r--r--chromium/sync/internal_api/debug_info_event_listener_unittest.cc18
-rw-r--r--chromium/sync/internal_api/http_bridge_network_resources.cc29
-rw-r--r--chromium/sync/internal_api/js_mutation_event_observer.h3
-rw-r--r--chromium/sync/internal_api/js_sync_manager_observer.cc9
-rw-r--r--chromium/sync/internal_api/js_sync_manager_observer.h1
-rw-r--r--chromium/sync/internal_api/js_sync_manager_observer_unittest.cc14
-rw-r--r--chromium/sync/internal_api/public/base/DEPS7
-rw-r--r--chromium/sync/internal_api/public/base/ack_handle.cc67
-rw-r--r--chromium/sync/internal_api/public/base/ack_handle.h47
-rw-r--r--chromium/sync/internal_api/public/base/invalidation.cc192
-rw-r--r--chromium/sync/internal_api/public/base/invalidation.h130
-rw-r--r--chromium/sync/internal_api/public/base/invalidation_test_util.cc21
-rw-r--r--chromium/sync/internal_api/public/base/invalidation_test_util.h2
-rw-r--r--chromium/sync/internal_api/public/base/model_type.h4
-rw-r--r--chromium/sync/internal_api/public/base/model_type_test_util.cc12
-rw-r--r--chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc (renamed from chromium/sync/notifier/object_id_invalidation_map_test_util.cc)86
-rw-r--r--chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h (renamed from chromium/sync/notifier/object_id_invalidation_map_test_util.h)9
-rw-r--r--chromium/sync/internal_api/public/base/ordinal.h6
-rw-r--r--chromium/sync/internal_api/public/base/progress_marker_map.cc5
-rw-r--r--chromium/sync/internal_api/public/base/unique_position.cc36
-rw-r--r--chromium/sync/internal_api/public/base/unique_position_unittest.cc25
-rw-r--r--chromium/sync/internal_api/public/http_bridge.h11
-rw-r--r--chromium/sync/internal_api/public/http_bridge_network_resources.h35
-rw-r--r--chromium/sync/internal_api/public/network_resources.h33
-rw-r--r--chromium/sync/internal_api/public/network_time_update_callback.h28
-rw-r--r--chromium/sync/internal_api/public/sessions/model_neutral_state.cc1
-rw-r--r--chromium/sync/internal_api/public/sessions/model_neutral_state.h9
-rw-r--r--chromium/sync/internal_api/public/sync_manager.h5
-rw-r--r--chromium/sync/internal_api/public/util/syncer_error.cc5
-rw-r--r--chromium/sync/internal_api/public/util/syncer_error.h12
-rw-r--r--chromium/sync/internal_api/sync_encryption_handler_impl.cc9
-rw-r--r--chromium/sync/internal_api/sync_encryption_handler_impl.h4
-rw-r--r--chromium/sync/internal_api/sync_manager_impl.cc69
-rw-r--r--chromium/sync/internal_api/sync_manager_impl.h10
-rw-r--r--chromium/sync/internal_api/sync_manager_impl_unittest.cc90
-rw-r--r--chromium/sync/internal_api/syncapi_server_connection_manager.cc13
-rw-r--r--chromium/sync/internal_api/syncapi_server_connection_manager.h1
-rw-r--r--chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc6
-rw-r--r--chromium/sync/internal_api/write_node.cc2
-rw-r--r--chromium/sync/js/sync_js_controller_unittest.cc55
-rw-r--r--chromium/sync/notifier/ack_handler.cc15
-rw-r--r--chromium/sync/notifier/ack_handler.h42
-rw-r--r--chromium/sync/notifier/ack_tracker.cc221
-rw-r--r--chromium/sync/notifier/ack_tracker.h111
-rw-r--r--chromium/sync/notifier/ack_tracker_unittest.cc352
-rw-r--r--chromium/sync/notifier/dropped_invalidation_tracker.cc42
-rw-r--r--chromium/sync/notifier/dropped_invalidation_tracker.h67
-rw-r--r--chromium/sync/notifier/fake_invalidation_handler.h1
-rw-r--r--chromium/sync/notifier/fake_invalidation_state_tracker.cc59
-rw-r--r--chromium/sync/notifier/fake_invalidation_state_tracker.h18
-rw-r--r--chromium/sync/notifier/fake_invalidator.cc7
-rw-r--r--chromium/sync/notifier/fake_invalidator.h3
-rw-r--r--chromium/sync/notifier/invalidation_handler.h3
-rw-r--r--chromium/sync/notifier/invalidation_notifier.cc14
-rw-r--r--chromium/sync/notifier/invalidation_notifier.h12
-rw-r--r--chromium/sync/notifier/invalidation_notifier_unittest.cc3
-rw-r--r--chromium/sync/notifier/invalidation_state_tracker.cc24
-rw-r--r--chromium/sync/notifier/invalidation_state_tracker.h51
-rw-r--r--chromium/sync/notifier/invalidation_util.cc20
-rw-r--r--chromium/sync/notifier/invalidation_util.h7
-rw-r--r--chromium/sync/notifier/invalidator.h5
-rw-r--r--chromium/sync/notifier/invalidator_registrar.cc93
-rw-r--r--chromium/sync/notifier/invalidator_registrar.h11
-rw-r--r--chromium/sync/notifier/invalidator_registrar_unittest.cc6
-rw-r--r--chromium/sync/notifier/invalidator_test_template.h71
-rw-r--r--chromium/sync/notifier/mock_ack_handler.cc85
-rw-r--r--chromium/sync/notifier/mock_ack_handler.h64
-rw-r--r--chromium/sync/notifier/non_blocking_invalidator.cc46
-rw-r--r--chromium/sync/notifier/non_blocking_invalidator.h10
-rw-r--r--chromium/sync/notifier/non_blocking_invalidator_unittest.cc3
-rw-r--r--chromium/sync/notifier/object_id_invalidation_map.cc152
-rw-r--r--chromium/sync/notifier/object_id_invalidation_map.h78
-rw-r--r--chromium/sync/notifier/object_id_invalidation_map_unittest.cc104
-rw-r--r--chromium/sync/notifier/p2p_invalidator.cc48
-rw-r--r--chromium/sync/notifier/p2p_invalidator.h6
-rw-r--r--chromium/sync/notifier/p2p_invalidator_unittest.cc70
-rw-r--r--chromium/sync/notifier/push_client_channel.cc118
-rw-r--r--chromium/sync/notifier/push_client_channel.h54
-rw-r--r--chromium/sync/notifier/push_client_channel_unittest.cc191
-rw-r--r--chromium/sync/notifier/single_object_invalidation_set.cc111
-rw-r--r--chromium/sync/notifier/single_object_invalidation_set.h63
-rw-r--r--chromium/sync/notifier/single_object_invalidation_set_unittest.cc110
-rw-r--r--chromium/sync/notifier/sync_invalidation_listener.cc301
-rw-r--r--chromium/sync/notifier/sync_invalidation_listener.h86
-rw-r--r--chromium/sync/notifier/sync_invalidation_listener_unittest.cc807
-rw-r--r--chromium/sync/notifier/sync_system_resources.cc144
-rw-r--r--chromium/sync/notifier/sync_system_resources.h113
-rw-r--r--chromium/sync/notifier/sync_system_resources_unittest.cc231
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set.cc204
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set.h117
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set_test_util.cc181
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set_test_util.h25
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set_unittest.cc219
-rw-r--r--chromium/sync/protocol/app_list_specifics.proto49
-rw-r--r--chromium/sync/protocol/article_specifics.proto29
-rw-r--r--chromium/sync/protocol/bookmark_specifics.proto7
-rw-r--r--chromium/sync/protocol/nigori_specifics.proto6
-rw-r--r--chromium/sync/protocol/proto_enum_conversions.cc14
-rw-r--r--chromium/sync/protocol/proto_enum_conversions.h4
-rw-r--r--chromium/sync/protocol/proto_enum_conversions_unittest.cc7
-rw-r--r--chromium/sync/protocol/proto_value_conversions.cc104
-rw-r--r--chromium/sync/protocol/proto_value_conversions.h45
-rw-r--r--chromium/sync/protocol/proto_value_conversions_unittest.cc35
-rw-r--r--chromium/sync/protocol/sync.proto4
-rw-r--r--chromium/sync/protocol/unique_position.proto30
-rw-r--r--chromium/sync/sessions/data_type_tracker.cc23
-rw-r--r--chromium/sync/sessions/data_type_tracker.h9
-rw-r--r--chromium/sync/sessions/debug_info_getter.h11
-rw-r--r--chromium/sync/sessions/nudge_tracker.cc13
-rw-r--r--chromium/sync/sessions/nudge_tracker.h4
-rw-r--r--chromium/sync/sessions/nudge_tracker_unittest.cc305
-rw-r--r--chromium/sync/sessions/ordered_commit_set.cc131
-rw-r--r--chromium/sync/sessions/ordered_commit_set.h128
-rw-r--r--chromium/sync/sessions/ordered_commit_set_unittest.cc134
-rw-r--r--chromium/sync/sessions/status_controller.cc42
-rw-r--r--chromium/sync/sessions/status_controller.h99
-rw-r--r--chromium/sync/sessions/status_controller_unittest.cc19
-rw-r--r--chromium/sync/sessions/sync_session.h20
-rw-r--r--chromium/sync/sessions/sync_session_context.cc38
-rw-r--r--chromium/sync/sessions/sync_session_context.h56
-rw-r--r--chromium/sync/sessions/sync_session_unittest.cc35
-rw-r--r--chromium/sync/sync_android.gypi13
-rw-r--r--chromium/sync/sync_core.gypi32
-rw-r--r--chromium/sync/sync_internal_api.gypi8
-rw-r--r--chromium/sync/sync_notifier.gypi13
-rw-r--r--chromium/sync/sync_proto.gypi2
-rw-r--r--chromium/sync/sync_tests.gypi44
-rw-r--r--chromium/sync/syncable/directory.cc52
-rw-r--r--chromium/sync/syncable/directory.h19
-rw-r--r--chromium/sync/syncable/directory_backing_store.cc2
-rw-r--r--chromium/sync/syncable/entry.cc6
-rw-r--r--chromium/sync/syncable/metahandle_set.h2
-rw-r--r--chromium/sync/syncable/model_neutral_mutable_entry.cc381
-rw-r--r--chromium/sync/syncable/model_neutral_mutable_entry.h116
-rw-r--r--chromium/sync/syncable/model_type.cc50
-rw-r--r--chromium/sync/syncable/mutable_entry.cc366
-rw-r--r--chromium/sync/syncable/mutable_entry.h69
-rw-r--r--chromium/sync/syncable/nigori_util.cc10
-rw-r--r--chromium/sync/syncable/syncable_base_write_transaction.cc22
-rw-r--r--chromium/sync/syncable/syncable_base_write_transaction.h35
-rw-r--r--chromium/sync/syncable/syncable_model_neutral_write_transaction.cc33
-rw-r--r--chromium/sync/syncable/syncable_model_neutral_write_transaction.h44
-rw-r--r--chromium/sync/syncable/syncable_util.cc8
-rw-r--r--chromium/sync/syncable/syncable_util.h11
-rw-r--r--chromium/sync/syncable/syncable_write_transaction.cc14
-rw-r--r--chromium/sync/syncable/syncable_write_transaction.h6
-rw-r--r--chromium/sync/tools/null_invalidation_state_tracker.cc40
-rw-r--r--chromium/sync/tools/null_invalidation_state_tracker.h17
-rw-r--r--chromium/sync/tools/sync_client.cc6
-rw-r--r--chromium/sync/tools/sync_listen_notifications.cc14
-rw-r--r--chromium/sync/tools/testserver/chromiumsync.py181
-rwxr-xr-xchromium/sync/tools/testserver/sync_testserver.py148
-rw-r--r--chromium/sync/tools/testserver/synced_notifications.html51
-rw-r--r--chromium/sync/util/DEPS4
-rw-r--r--chromium/sync/util/cryptographer.cc6
-rw-r--r--chromium/sync/util/data_type_histogram.h6
-rw-r--r--chromium/sync/util/get_session_name.cc27
-rw-r--r--chromium/sync/util/get_session_name_unittest.cc23
-rw-r--r--chromium/sync/util/nigori.cc6
218 files changed, 8870 insertions, 7137 deletions
diff --git a/chromium/sync/engine/all_status.cc b/chromium/sync/engine/all_status.cc
index af3d4fc96e7..e5f51248ad2 100644
--- a/chromium/sync/engine/all_status.cc
+++ b/chromium/sync/engine/all_status.cc
@@ -101,7 +101,6 @@ void AllStatus::OnSyncEngineEvent(const SyncEngineEvent& event) {
status_ = CalcSyncing(event);
break;
case SyncEngineEvent::STOP_SYNCING_PERMANENTLY:
- case SyncEngineEvent::UPDATED_TOKEN:
break;
case SyncEngineEvent::ACTIONABLE_ERROR:
status_ = CreateBlankStatus();
diff --git a/chromium/sync/engine/apply_control_data_updates.cc b/chromium/sync/engine/apply_control_data_updates.cc
index 137560d5f89..e97741ed336 100644
--- a/chromium/sync/engine/apply_control_data_updates.cc
+++ b/chromium/sync/engine/apply_control_data_updates.cc
@@ -24,8 +24,7 @@ using syncable::SERVER_SPECIFICS;
using syncable::SPECIFICS;
using syncable::SYNCER;
-void ApplyControlDataUpdates(sessions::SyncSession* session) {
- syncable::Directory* dir = session->context()->directory();
+void ApplyControlDataUpdates(syncable::Directory* dir) {
syncable::WriteTransaction trans(FROM_HERE, SYNCER, dir);
std::vector<int64> handles;
diff --git a/chromium/sync/engine/apply_control_data_updates.h b/chromium/sync/engine/apply_control_data_updates.h
index 4646cb01e15..b825665ed16 100644
--- a/chromium/sync/engine/apply_control_data_updates.h
+++ b/chromium/sync/engine/apply_control_data_updates.h
@@ -11,18 +11,13 @@ namespace syncer {
class Cryptographer;
-namespace sessions {
-class SyncSession;
-}
-
namespace syncable {
class Directory;
class MutableEntry;
class WriteTransaction;
}
-SYNC_EXPORT_PRIVATE void ApplyControlDataUpdates(
- sessions::SyncSession* session);
+SYNC_EXPORT_PRIVATE void ApplyControlDataUpdates(syncable::Directory* dir);
void ApplyNigoriUpdate(syncable::WriteTransaction* trans,
syncable::MutableEntry* const entry,
Cryptographer* cryptographer);
diff --git a/chromium/sync/engine/apply_control_data_updates_unittest.cc b/chromium/sync/engine/apply_control_data_updates_unittest.cc
index f3d173cba91..caacfbc1a11 100644
--- a/chromium/sync/engine/apply_control_data_updates_unittest.cc
+++ b/chromium/sync/engine/apply_control_data_updates_unittest.cc
@@ -5,19 +5,21 @@
#include "base/format_macros.h"
#include "base/location.h"
#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
#include "sync/engine/apply_control_data_updates.h"
#include "sync/engine/syncer.h"
#include "sync/engine/syncer_util.h"
#include "sync/internal_api/public/test/test_entry_factory.h"
#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/syncable/directory.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/nigori_util.h"
#include "sync/syncable/syncable_read_transaction.h"
#include "sync/syncable/syncable_util.h"
#include "sync/syncable/syncable_write_transaction.h"
#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
#include "sync/test/engine/test_id_factory.h"
#include "sync/test/fake_sync_encryption_handler.h"
#include "sync/util/cryptographer.h"
@@ -29,32 +31,31 @@ using syncable::MutableEntry;
using syncable::UNITTEST;
using syncable::Id;
-class ApplyControlDataUpdatesTest : public SyncerCommandTest {
+class ApplyControlDataUpdatesTest : public ::testing::Test {
public:
protected:
ApplyControlDataUpdatesTest() {}
virtual ~ApplyControlDataUpdatesTest() {}
virtual void SetUp() {
- workers()->clear();
- mutable_routing_info()->clear();
- workers()->push_back(make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_PASSWORD)));
- (*mutable_routing_info())[NIGORI] = GROUP_PASSIVE;
- (*mutable_routing_info())[EXPERIMENTS] = GROUP_PASSIVE;
- SyncerCommandTest::SetUp();
+ dir_maker_.SetUp();
entry_factory_.reset(new TestEntryFactory(directory()));
+ }
- session()->mutable_status_controller()->set_updates_request_types(
- ControlTypes());
+ virtual void TearDown() {
+ dir_maker_.TearDown();
+ }
- syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Directory* directory() {
+ return dir_maker_.directory();
}
TestIdFactory id_factory_;
scoped_ptr<TestEntryFactory> entry_factory_;
private:
+ base::MessageLoop loop_; // Needed for directory init.
+ TestDirectorySetterUpper dir_maker_;
+
DISALLOW_COPY_AND_ASSIGN(ApplyControlDataUpdatesTest);
};
@@ -87,7 +88,7 @@ TEST_F(ApplyControlDataUpdatesTest, NigoriUpdate) {
ModelTypeToRootTag(NIGORI), specifics, true);
EXPECT_FALSE(cryptographer->has_pending_keys());
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(cryptographer->is_ready());
EXPECT_TRUE(cryptographer->has_pending_keys());
@@ -166,7 +167,7 @@ TEST_F(ApplyControlDataUpdatesTest, EncryptUnsyncedChanges) {
EXPECT_EQ(2*batch_s+1, handles.size());
}
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(cryptographer->has_pending_keys());
EXPECT_TRUE(cryptographer->is_ready());
@@ -194,7 +195,7 @@ TEST_F(ApplyControlDataUpdatesTest, EncryptUnsyncedChanges) {
entry.PutIsUnappliedUpdate(true);
}
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(cryptographer->has_pending_keys());
EXPECT_TRUE(cryptographer->is_ready());
@@ -281,7 +282,7 @@ TEST_F(ApplyControlDataUpdatesTest, CannotEncryptUnsyncedChanges) {
EXPECT_EQ(2*batch_s+1, handles.size());
}
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(cryptographer->is_ready());
EXPECT_TRUE(cryptographer->has_pending_keys());
@@ -359,7 +360,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -437,7 +438,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -510,7 +511,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -589,7 +590,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -671,7 +672,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -751,7 +752,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -831,7 +832,7 @@ TEST_F(ApplyControlDataUpdatesTest,
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
@@ -865,7 +866,7 @@ TEST_F(ApplyControlDataUpdatesTest, ControlApply) {
set_enabled(true);
int64 experiment_handle = entry_factory_->CreateUnappliedNewItem(
experiment_id, specifics, false);
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
EXPECT_TRUE(
@@ -884,7 +885,7 @@ TEST_F(ApplyControlDataUpdatesTest, ControlApplyParentBeforeChild) {
experiment_id, specifics, parent_id);
int64 parent_handle = entry_factory_->CreateUnappliedNewItem(
parent_id, specifics, true);
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(parent_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
@@ -908,7 +909,7 @@ TEST_F(ApplyControlDataUpdatesTest, ControlConflict) {
server_specifics);
entry_factory_->SetLocalSpecificsForItem(experiment_handle,
local_specifics);
- ApplyControlDataUpdates(session());
+ ApplyControlDataUpdates(directory());
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
EXPECT_TRUE(
diff --git a/chromium/sync/engine/apply_updates_and_resolve_conflicts_command.cc b/chromium/sync/engine/apply_updates_and_resolve_conflicts_command.cc
deleted file mode 100644
index 54b0b38e46e..00000000000
--- a/chromium/sync/engine/apply_updates_and_resolve_conflicts_command.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/apply_updates_and_resolve_conflicts_command.h"
-
-#include "base/location.h"
-#include "sync/engine/conflict_resolver.h"
-#include "sync/engine/update_applicator.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-namespace syncer {
-
-using sessions::SyncSession;
-
-ApplyUpdatesAndResolveConflictsCommand::
- ApplyUpdatesAndResolveConflictsCommand() {}
-ApplyUpdatesAndResolveConflictsCommand::
- ~ApplyUpdatesAndResolveConflictsCommand() {}
-
-std::set<ModelSafeGroup>
-ApplyUpdatesAndResolveConflictsCommand::GetGroupsToChange(
- const sessions::SyncSession& session) const {
- std::set<ModelSafeGroup> groups_with_unapplied_updates;
-
- FullModelTypeSet server_types_with_unapplied_updates;
- {
- syncable::Directory* dir = session.context()->directory();
- syncable::ReadTransaction trans(FROM_HERE, dir);
- server_types_with_unapplied_updates =
- dir->GetServerTypesWithUnappliedUpdates(&trans);
- }
-
- for (FullModelTypeSet::Iterator it =
- server_types_with_unapplied_updates.First(); it.Good(); it.Inc()) {
- groups_with_unapplied_updates.insert(
- GetGroupForModelType(it.Get(), session.context()->routing_info()));
- }
-
- return groups_with_unapplied_updates;
-}
-
-SyncerError ApplyUpdatesAndResolveConflictsCommand::ModelChangingExecuteImpl(
- SyncSession* session) {
- sessions::StatusController* status = session->mutable_status_controller();
- syncable::Directory* dir = session->context()->directory();
- syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
-
- // Compute server types with unapplied updates that fall under our
- // group restriction.
- const FullModelTypeSet server_types_with_unapplied_updates =
- dir->GetServerTypesWithUnappliedUpdates(&trans);
- FullModelTypeSet server_type_restriction;
- for (FullModelTypeSet::Iterator it =
- server_types_with_unapplied_updates.First(); it.Good(); it.Inc()) {
- if (GetGroupForModelType(it.Get(), session->context()->routing_info()) ==
- status->group_restriction()) {
- server_type_restriction.Put(it.Get());
- }
- }
-
- // Don't process control type updates here. They will be handled elsewhere.
- FullModelTypeSet control_types = ToFullModelTypeSet(ControlTypes());
- server_type_restriction.RemoveAll(control_types);
-
- std::vector<int64> handles;
- dir->GetUnappliedUpdateMetaHandles(
- &trans, server_type_restriction, &handles);
-
- // First set of update application passes.
- UpdateApplicator applicator(
- dir->GetCryptographer(&trans),
- session->context()->routing_info(),
- status->group_restriction());
- applicator.AttemptApplications(&trans, handles);
- status->increment_num_updates_applied_by(applicator.updates_applied());
- status->increment_num_hierarchy_conflicts_by(
- applicator.hierarchy_conflicts());
- status->increment_num_encryption_conflicts_by(
- applicator.encryption_conflicts());
-
- if (applicator.simple_conflict_ids().size() != 0) {
- // Resolve the simple conflicts we just detected.
- ConflictResolver resolver;
- resolver.ResolveConflicts(&trans,
- dir->GetCryptographer(&trans),
- applicator.simple_conflict_ids(),
- status);
-
- // Conflict resolution sometimes results in more updates to apply.
- handles.clear();
- dir->GetUnappliedUpdateMetaHandles(
- &trans, server_type_restriction, &handles);
-
- UpdateApplicator conflict_applicator(
- dir->GetCryptographer(&trans),
- session->context()->routing_info(),
- status->group_restriction());
- conflict_applicator.AttemptApplications(&trans, handles);
-
- // We count the number of updates from both applicator passes.
- status->increment_num_updates_applied_by(
- conflict_applicator.updates_applied());
-
- // Encryption conflicts should remain unchanged by the resolution of simple
- // conflicts. Those can only be solved by updating our nigori key bag.
- DCHECK_EQ(conflict_applicator.encryption_conflicts(),
- applicator.encryption_conflicts());
-
- // Hierarchy conflicts should also remain unchanged, for reasons that are
- // more subtle. Hierarchy conflicts exist when the application of a pending
- // update from the server would make the local folder hierarchy
- // inconsistent. The resolution of simple conflicts could never affect the
- // hierarchy conflicting item directly, because hierarchy conflicts are not
- // processed by the conflict resolver. It could, in theory, modify the
- // local hierarchy on which hierarchy conflict detection depends. However,
- // the conflict resolution algorithm currently in use does not allow this.
- DCHECK_EQ(conflict_applicator.hierarchy_conflicts(),
- applicator.hierarchy_conflicts());
-
- // There should be no simple conflicts remaining. We know this because the
- // resolver should have resolved all the conflicts we detected last time
- // and, by the two previous assertions, that no conflicts have been
- // downgraded from encryption or hierarchy down to simple.
- DCHECK(conflict_applicator.simple_conflict_ids().empty());
- }
-
- return SYNCER_OK;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/apply_updates_and_resolve_conflicts_command.h b/chromium/sync/engine/apply_updates_and_resolve_conflicts_command.h
deleted file mode 100644
index 5072f80411c..00000000000
--- a/chromium/sync/engine/apply_updates_and_resolve_conflicts_command.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_APPLY_UPDATES_AND_RESOLVE_CONFLICTS_COMMAND_H_
-#define SYNC_ENGINE_APPLY_UPDATES_AND_RESOLVE_CONFLICTS_COMMAND_H_
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/model_changing_syncer_command.h"
-
-namespace syncer {
-
-class SYNC_EXPORT_PRIVATE ApplyUpdatesAndResolveConflictsCommand
- : public ModelChangingSyncerCommand {
- public:
- ApplyUpdatesAndResolveConflictsCommand();
- virtual ~ApplyUpdatesAndResolveConflictsCommand();
-
- protected:
- // ModelChangingSyncerCommand implementation.
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const OVERRIDE;
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesAndResolveConflictsCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_APPLY_UPDATES_AND_RESOLVE_CONFLICTS_COMMAND_H_
diff --git a/chromium/sync/engine/apply_updates_and_resolve_conflicts_command_unittest.cc b/chromium/sync/engine/apply_updates_and_resolve_conflicts_command_unittest.cc
deleted file mode 100644
index d021789734b..00000000000
--- a/chromium/sync/engine/apply_updates_and_resolve_conflicts_command_unittest.cc
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/location.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/stringprintf.h"
-#include "sync/engine/apply_updates_and_resolve_conflicts_command.h"
-#include "sync/engine/syncer.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/fake_sync_encryption_handler.h"
-#include "sync/util/cryptographer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using std::string;
-using syncable::Id;
-using syncable::MutableEntry;
-using syncable::UNITTEST;
-using syncable::WriteTransaction;
-
-namespace {
-sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
- sync_pb::EntitySpecifics result;
- AddDefaultFieldValue(BOOKMARKS, &result);
- return result;
-}
-} // namespace
-
-// A test fixture for tests exercising ApplyUpdatesAndResolveConflictsCommand.
-class ApplyUpdatesAndResolveConflictsCommandTest : public SyncerCommandTest {
- public:
- protected:
- ApplyUpdatesAndResolveConflictsCommandTest() {}
- virtual ~ApplyUpdatesAndResolveConflictsCommandTest() {}
-
- virtual void SetUp() {
- workers()->clear();
- mutable_routing_info()->clear();
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_PASSWORD)));
- (*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
- (*mutable_routing_info())[PASSWORDS] = GROUP_PASSWORD;
- (*mutable_routing_info())[NIGORI] = GROUP_PASSIVE;
- SyncerCommandTest::SetUp();
- entry_factory_.reset(new TestEntryFactory(directory()));
- ExpectNoGroupsToChange(apply_updates_command_);
- }
-
- protected:
- DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesAndResolveConflictsCommandTest);
-
- ApplyUpdatesAndResolveConflictsCommand apply_updates_command_;
- scoped_ptr<TestEntryFactory> entry_factory_;
-};
-
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, Simple) {
- string root_server_id = syncable::GetNullId().GetServerId();
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "parent", DefaultBookmarkSpecifics(), root_server_id);
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "child", DefaultBookmarkSpecifics(), "parent");
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(0, status.num_encryption_conflicts())
- << "Simple update shouldn't result in conflicts";
- EXPECT_EQ(0, status.num_hierarchy_conflicts())
- << "Simple update shouldn't result in conflicts";
- EXPECT_EQ(2, status.num_updates_applied())
- << "All items should have been successfully applied";
-}
-
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest,
- UpdateWithChildrenBeforeParents) {
- // Set a bunch of updates which are difficult to apply in the order
- // they're received due to dependencies on other unseen items.
- string root_server_id = syncable::GetNullId().GetServerId();
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "a_child_created_first", DefaultBookmarkSpecifics(), "parent");
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "x_child_created_first", DefaultBookmarkSpecifics(), "parent");
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "parent", DefaultBookmarkSpecifics(), root_server_id);
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "a_child_created_second", DefaultBookmarkSpecifics(), "parent");
- entry_factory_->CreateUnappliedNewBookmarkItemWithParent(
- "x_child_created_second", DefaultBookmarkSpecifics(), "parent");
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(5, status.num_updates_applied())
- << "All updates should have been successfully applied";
-}
-
-// Runs the ApplyUpdatesAndResolveConflictsCommand on an item that has both
-// local and remote modifications (IS_UNSYNCED and IS_UNAPPLIED_UPDATE). We
-// expect the command to detect that this update can't be applied because it is
-// in a CONFLICT state.
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, SimpleConflict) {
- entry_factory_->CreateUnappliedAndUnsyncedBookmarkItem("item");
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(1, status.num_server_overwrites())
- << "Unsynced and unapplied item conflict should be resolved";
- EXPECT_EQ(0, status.num_updates_applied())
- << "Update should not be applied; we should override the server.";
-}
-
-// Runs the ApplyUpdatesAndResolveConflictsCommand on an item that has both
-// local and remote modifications *and* the remote modification cannot be
-// applied without violating the tree constraints. We expect the command to
-// detect that this update can't be applied and that this situation can't be
-// resolved with the simple conflict processing logic; it is in a
-// CONFLICT_HIERARCHY state.
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, HierarchyAndSimpleConflict) {
- // Create a simply-conflicting item. It will start with valid parent ids.
- int64 handle = entry_factory_->CreateUnappliedAndUnsyncedBookmarkItem(
- "orphaned_by_server");
- {
- // Manually set the SERVER_PARENT_ID to bad value.
- // A bad parent indicates a hierarchy conflict.
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(entry.good());
-
- entry.PutServerParentId(TestIdFactory::MakeServer("bogus_parent"));
- }
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-}
-
-
-// Runs the ApplyUpdatesAndResolveConflictsCommand on an item with remote
-// modifications that would create a directory loop if the update were applied.
-// We expect the command to detect that this update can't be applied because it
-// is in a CONFLICT_HIERARCHY state.
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest,
- HierarchyConflictDirectoryLoop) {
- // Item 'X' locally has parent of 'root'. Server is updating it to have
- // parent of 'Y'.
- {
- // Create it as a child of root node.
- int64 handle = entry_factory_->CreateSyncedItem("X", BOOKMARKS, true);
-
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(entry.good());
-
- // Re-parent from root to "Y"
- entry.PutServerVersion(entry_factory_->GetNextRevision());
- entry.PutIsUnappliedUpdate(true);
- entry.PutServerParentId(TestIdFactory::MakeServer("Y"));
- }
-
- // Item 'Y' is child of 'X'.
- entry_factory_->CreateUnsyncedItem(
- TestIdFactory::MakeServer("Y"), TestIdFactory::MakeServer("X"), "Y", true,
- BOOKMARKS, NULL);
-
- // If the server's update were applied, we would have X be a child of Y, and Y
- // as a child of X. That's a directory loop. The UpdateApplicator should
- // prevent the update from being applied and note that this is a hierarchy
- // conflict.
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
-
- // This should count as a hierarchy conflict.
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-}
-
-// Runs the ApplyUpdatesAndResolveConflictsCommand on a directory where the
-// server sent us an update to add a child to a locally deleted (and unsynced)
-// parent. We expect the command to not apply the update and to indicate the
-// update is in a CONFLICT_HIERARCHY state.
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest,
- HierarchyConflictDeletedParent) {
- // Create a locally deleted parent item.
- int64 parent_handle;
- entry_factory_->CreateUnsyncedItem(
- Id::CreateFromServerId("parent"), TestIdFactory::root(),
- "parent", true, BOOKMARKS, &parent_handle);
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_HANDLE, parent_handle);
- entry.PutIsDel(true);
- }
-
- // Create an incoming child from the server.
- entry_factory_->CreateUnappliedNewItemWithParent(
- "child", DefaultBookmarkSpecifics(), "parent");
-
- // The server's update may seem valid to some other client, but on this client
- // that new item's parent no longer exists. The update should not be applied
- // and the update applicator should indicate this is a hierarchy conflict.
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-}
-
-// Runs the ApplyUpdatesAndResolveConflictsCommand on a directory where the
-// server is trying to delete a folder that has a recently added (and unsynced)
-// child. We expect the command to not apply the update because it is in a
-// CONFLICT_HIERARCHY state.
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest,
- HierarchyConflictDeleteNonEmptyDirectory) {
- // Create a server-deleted directory.
- {
- // Create it as a child of root node.
- int64 handle = entry_factory_->CreateSyncedItem("parent", BOOKMARKS, true);
-
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(entry.good());
-
- // Delete it on the server.
- entry.PutServerVersion(entry_factory_->GetNextRevision());
- entry.PutIsUnappliedUpdate(true);
- entry.PutServerParentId(TestIdFactory::root());
- entry.PutServerIsDel(true);
- }
-
- // Create a local child of the server-deleted directory.
- entry_factory_->CreateUnsyncedItem(
- TestIdFactory::MakeServer("child"), TestIdFactory::MakeServer("parent"),
- "child", false, BOOKMARKS, NULL);
-
- // The server's request to delete the directory must be ignored, otherwise our
- // unsynced new child would be orphaned. This is a hierarchy conflict.
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- // This should count as a hierarchy conflict.
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-}
-
-// Runs the ApplyUpdatesAndResolveConflictsCommand on a server-created item that
-// has a locally unknown parent. We expect the command to not apply the update
-// because the item is in a CONFLICT_HIERARCHY state.
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest,
- HierarchyConflictUnknownParent) {
- // We shouldn't be able to do anything with either of these items.
- entry_factory_->CreateUnappliedNewItemWithParent(
- "some_item", DefaultBookmarkSpecifics(), "unknown_parent");
- entry_factory_->CreateUnappliedNewItemWithParent(
- "some_other_item", DefaultBookmarkSpecifics(), "some_item");
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(2, status.num_hierarchy_conflicts())
- << "All updates with an unknown ancestors should be in conflict";
- EXPECT_EQ(0, status.num_updates_applied())
- << "No item with an unknown ancestor should be applied";
-}
-
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, ItemsBothKnownAndUnknown) {
- // See what happens when there's a mixture of good and bad updates.
- string root_server_id = syncable::GetNullId().GetServerId();
- entry_factory_->CreateUnappliedNewItemWithParent(
- "first_unknown_item", DefaultBookmarkSpecifics(), "unknown_parent");
- entry_factory_->CreateUnappliedNewItemWithParent(
- "first_known_item", DefaultBookmarkSpecifics(), root_server_id);
- entry_factory_->CreateUnappliedNewItemWithParent(
- "second_unknown_item", DefaultBookmarkSpecifics(), "unknown_parent");
- entry_factory_->CreateUnappliedNewItemWithParent(
- "second_known_item", DefaultBookmarkSpecifics(), "first_known_item");
- entry_factory_->CreateUnappliedNewItemWithParent(
- "third_known_item", DefaultBookmarkSpecifics(), "fourth_known_item");
- entry_factory_->CreateUnappliedNewItemWithParent(
- "fourth_known_item", DefaultBookmarkSpecifics(), root_server_id);
-
- ExpectGroupToChange(apply_updates_command_, GROUP_UI);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(2, status.num_hierarchy_conflicts())
- << "The updates with unknown ancestors should be in conflict";
- EXPECT_EQ(4, status.num_updates_applied())
- << "The updates with known ancestors should be successfully applied";
-}
-
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, DecryptablePassword) {
- // Decryptable password updates should be applied.
- Cryptographer* cryptographer;
- {
- // Storing the cryptographer separately is bad, but for this test we
- // know it's safe.
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- }
-
- KeyParams params = {"localhost", "dummy", "foobar"};
- cryptographer->AddKey(params);
-
- sync_pb::EntitySpecifics specifics;
- sync_pb::PasswordSpecificsData data;
- data.set_origin("http://example.com");
-
- cryptographer->Encrypt(data,
- specifics.mutable_password()->mutable_encrypted());
- entry_factory_->CreateUnappliedNewItem("item", specifics, false);
-
- ExpectGroupToChange(apply_updates_command_, GROUP_PASSWORD);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(1, status.num_updates_applied())
- << "The updates that can be decrypted should be applied";
-}
-
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, UndecryptableData) {
- // Undecryptable updates should not be applied.
- sync_pb::EntitySpecifics encrypted_bookmark;
- encrypted_bookmark.mutable_encrypted();
- AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
- string root_server_id = syncable::GetNullId().GetServerId();
- entry_factory_->CreateUnappliedNewItemWithParent(
- "folder", encrypted_bookmark, root_server_id);
- entry_factory_->CreateUnappliedNewItem("item2", encrypted_bookmark, false);
- sync_pb::EntitySpecifics encrypted_password;
- encrypted_password.mutable_password();
- entry_factory_->CreateUnappliedNewItem("item3", encrypted_password, false);
-
- ExpectGroupsToChange(apply_updates_command_, GROUP_UI, GROUP_PASSWORD);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(3, status.num_encryption_conflicts())
- << "Updates that can't be decrypted should be in encryption conflict";
- EXPECT_EQ(0, status.num_updates_applied())
- << "No update that can't be decrypted should be applied";
-}
-
-TEST_F(ApplyUpdatesAndResolveConflictsCommandTest, SomeUndecryptablePassword) {
- Cryptographer* cryptographer;
- // Only decryptable password updates should be applied.
- {
- sync_pb::EntitySpecifics specifics;
- sync_pb::PasswordSpecificsData data;
- data.set_origin("http://example.com/1");
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
-
- KeyParams params = {"localhost", "dummy", "foobar"};
- cryptographer->AddKey(params);
-
- cryptographer->Encrypt(data,
- specifics.mutable_password()->mutable_encrypted());
- }
- entry_factory_->CreateUnappliedNewItem("item1", specifics, false);
- }
- {
- // Create a new cryptographer, independent of the one in the session.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- KeyParams params = {"localhost", "dummy", "bazqux"};
- other_cryptographer.AddKey(params);
-
- sync_pb::EntitySpecifics specifics;
- sync_pb::PasswordSpecificsData data;
- data.set_origin("http://example.com/2");
-
- other_cryptographer.Encrypt(data,
- specifics.mutable_password()->mutable_encrypted());
- entry_factory_->CreateUnappliedNewItem("item2", specifics, false);
- }
-
- ExpectGroupToChange(apply_updates_command_, GROUP_PASSWORD);
- apply_updates_command_.ExecuteImpl(session());
-
- const sessions::StatusController& status = session()->status_controller();
- EXPECT_EQ(1, status.num_encryption_conflicts())
- << "The updates that can't be decrypted should be in encryption "
- << "conflict";
- EXPECT_EQ(1, status.num_updates_applied())
- << "The undecryptable password update shouldn't be applied";
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/build_commit_command.cc b/chromium/sync/engine/build_commit_command.cc
deleted file mode 100644
index 65f1cdfb9cf..00000000000
--- a/chromium/sync/engine/build_commit_command.cc
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/build_commit_command.h"
-
-#include <limits>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/strings/string_util.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/ordered_commit_set.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/util/time.h"
-
-using std::set;
-using std::string;
-using std::vector;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using syncable::Entry;
-using syncable::IS_DEL;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::Id;
-using syncable::SPECIFICS;
-using syncable::UNIQUE_POSITION;
-
-BuildCommitCommand::BuildCommitCommand(
- syncable::BaseTransaction* trans,
- const sessions::OrderedCommitSet& batch_commit_set,
- sync_pb::ClientToServerMessage* commit_message,
- ExtensionsActivity::Records* extensions_activity_buffer)
- : trans_(trans),
- batch_commit_set_(batch_commit_set),
- commit_message_(commit_message),
- extensions_activity_buffer_(extensions_activity_buffer) {
-}
-
-BuildCommitCommand::~BuildCommitCommand() {}
-
-void BuildCommitCommand::AddExtensionsActivityToMessage(
- SyncSession* session, sync_pb::CommitMessage* message) {
- // We only send ExtensionsActivity to the server if bookmarks are being
- // committed.
- ExtensionsActivity* activity = session->context()->extensions_activity();
- if (batch_commit_set_.HasBookmarkCommitId()) {
- // This isn't perfect, since the set of extensions activity may not
- // correlate exactly with the items being committed. That's OK as
- // long as we're looking for a rough estimate of extensions activity,
- // not an precise mapping of which commits were triggered by which
- // extension.
- //
- // We will push this list of extensions activity back into the
- // ExtensionsActivityMonitor if this commit fails. That's why we must keep
- // a copy of these records in the session.
- activity->GetAndClearRecords(extensions_activity_buffer_);
-
- const ExtensionsActivity::Records& records =
- *extensions_activity_buffer_;
- for (ExtensionsActivity::Records::const_iterator it =
- records.begin();
- it != records.end(); ++it) {
- sync_pb::ChromiumExtensionsActivity* activity_message =
- message->add_extensions_activity();
- activity_message->set_extension_id(it->second.extension_id);
- activity_message->set_bookmark_writes_since_last_commit(
- it->second.bookmark_write_count);
- }
- }
-}
-
-void BuildCommitCommand::AddClientConfigParamsToMessage(
- SyncSession* session, sync_pb::CommitMessage* message) {
- const ModelSafeRoutingInfo& routing_info = session->context()->routing_info();
- sync_pb::ClientConfigParams* config_params = message->mutable_config_params();
- for (std::map<ModelType, ModelSafeGroup>::const_iterator iter =
- routing_info.begin(); iter != routing_info.end(); ++iter) {
- if (ProxyTypes().Has(iter->first))
- continue;
- int field_number = GetSpecificsFieldNumberFromModelType(iter->first);
- config_params->mutable_enabled_type_ids()->Add(field_number);
- }
- config_params->set_tabs_datatype_enabled(
- routing_info.count(syncer::PROXY_TABS) > 0);
-}
-
-namespace {
-void SetEntrySpecifics(const Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry) {
- // Add the new style extension and the folder bit.
- sync_entry->mutable_specifics()->CopyFrom(meta_entry.GetSpecifics());
- sync_entry->set_folder(meta_entry.GetIsDir());
-
- CHECK(!sync_entry->specifics().password().has_client_only_encrypted_data());
- DCHECK_EQ(meta_entry.GetModelType(), GetModelType(*sync_entry));
-}
-} // namespace
-
-SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
- commit_message_->set_share(session->context()->account_name());
- commit_message_->set_message_contents(sync_pb::ClientToServerMessage::COMMIT);
-
- sync_pb::CommitMessage* commit_message = commit_message_->mutable_commit();
- commit_message->set_cache_guid(trans_->directory()->cache_guid());
- AddExtensionsActivityToMessage(session, commit_message);
- AddClientConfigParamsToMessage(session, commit_message);
-
- for (size_t i = 0; i < batch_commit_set_.Size(); i++) {
- int64 handle = batch_commit_set_.GetCommitHandleAt(i);
- sync_pb::SyncEntity* sync_entry = commit_message->add_entries();
-
- Entry meta_entry(trans_, syncable::GET_BY_HANDLE, handle);
- CHECK(meta_entry.good());
-
- DCHECK_NE(0UL,
- session->context()->routing_info().count(
- meta_entry.GetModelType()))
- << "Committing change to datatype that's not actively enabled.";
-
- BuildCommitItem(meta_entry, sync_entry);
- }
-
-
- return SYNCER_OK;
-}
-
-// static.
-void BuildCommitCommand::BuildCommitItem(
- const syncable::Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry) {
- syncable::Id id = meta_entry.GetId();
- sync_entry->set_id_string(SyncableIdToProto(id));
-
- string name = meta_entry.GetNonUniqueName();
- CHECK(!name.empty()); // Make sure this isn't an update.
- // Note: Truncation is also performed in WriteNode::SetTitle(..). But this
- // call is still necessary to handle any title changes that might originate
- // elsewhere, or already be persisted in the directory.
- TruncateUTF8ToByteSize(name, 255, &name);
- sync_entry->set_name(name);
-
- // Set the non_unique_name. If we do, the server ignores
- // the |name| value (using |non_unique_name| instead), and will return
- // in the CommitResponse a unique name if one is generated.
- // We send both because it may aid in logging.
- sync_entry->set_non_unique_name(name);
-
- if (!meta_entry.GetUniqueClientTag().empty()) {
- sync_entry->set_client_defined_unique_tag(
- meta_entry.GetUniqueClientTag());
- }
-
- // Deleted items with server-unknown parent ids can be a problem so we set
- // the parent to 0. (TODO(sync): Still true in protocol?).
- Id new_parent_id;
- if (meta_entry.GetIsDel() &&
- !meta_entry.GetParentId().ServerKnows()) {
- new_parent_id = syncable::BaseTransaction::root_id();
- } else {
- new_parent_id = meta_entry.GetParentId();
- }
- sync_entry->set_parent_id_string(SyncableIdToProto(new_parent_id));
-
- // If our parent has changed, send up the old one so the server
- // can correctly deal with multiple parents.
- // TODO(nick): With the server keeping track of the primary sync parent,
- // it should not be necessary to provide the old_parent_id: the version
- // number should suffice.
- if (new_parent_id != meta_entry.GetServerParentId() &&
- 0 != meta_entry.GetBaseVersion() &&
- syncable::CHANGES_VERSION != meta_entry.GetBaseVersion()) {
- sync_entry->set_old_parent_id(
- SyncableIdToProto(meta_entry.GetServerParentId()));
- }
-
- int64 version = meta_entry.GetBaseVersion();
- if (syncable::CHANGES_VERSION == version || 0 == version) {
- // Undeletions are only supported for items that have a client tag.
- DCHECK(!id.ServerKnows() ||
- !meta_entry.GetUniqueClientTag().empty())
- << meta_entry;
-
- // Version 0 means to create or undelete an object.
- sync_entry->set_version(0);
- } else {
- DCHECK(id.ServerKnows()) << meta_entry;
- sync_entry->set_version(meta_entry.GetBaseVersion());
- }
- sync_entry->set_ctime(TimeToProtoTime(meta_entry.GetCtime()));
- sync_entry->set_mtime(TimeToProtoTime(meta_entry.GetMtime()));
-
- // Deletion is final on the server, let's move things and then delete them.
- if (meta_entry.GetIsDel()) {
- sync_entry->set_deleted(true);
- } else {
- if (meta_entry.GetSpecifics().has_bookmark()) {
- // Both insert_after_item_id and position_in_parent fields are set only
- // for legacy reasons. See comments in sync.proto for more information.
- const Id& prev_id = meta_entry.GetPredecessorId();
- string prev_id_string =
- prev_id.IsRoot() ? string() : prev_id.GetServerId();
- sync_entry->set_insert_after_item_id(prev_id_string);
- sync_entry->set_position_in_parent(
- meta_entry.GetUniquePosition().ToInt64());
- meta_entry.GetUniquePosition().ToProto(
- sync_entry->mutable_unique_position());
- }
- SetEntrySpecifics(meta_entry, sync_entry);
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/build_commit_command.h b/chromium/sync/engine/build_commit_command.h
deleted file mode 100644
index a47c62afe0c..00000000000
--- a/chromium/sync/engine/build_commit_command.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
-#define SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/syncer_command.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/util/extensions_activity.h"
-
-namespace syncer {
-
-namespace sessions {
-class OrderedCommitSet;
-}
-
-namespace syncable {
-class Entry;
-class BaseTransaction;
-}
-
-// A class that contains the code used to serialize a set of sync items into a
-// protobuf commit message. This conversion process references the
-// syncable::Directory, which is why it must be called within the same
-// transaction as the GetCommitIdsCommand that fetched the set of items to be
-// committed.
-//
-// See SyncerCommand documentation for more info.
-class SYNC_EXPORT_PRIVATE BuildCommitCommand : public SyncerCommand {
- public:
- // The batch_commit_set parameter contains a set of references to the items
- // that should be committed.
- //
- // The commit_message parameter is an output parameter which will contain the
- // fully initialized commit message once ExecuteImpl() has been called.
- BuildCommitCommand(
- syncable::BaseTransaction* trans,
- const sessions::OrderedCommitSet& batch_commit_set,
- sync_pb::ClientToServerMessage* commit_message,
- ExtensionsActivity::Records* extensions_activity_buffer);
- virtual ~BuildCommitCommand();
-
- // SyncerCommand implementation.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
-
- // Helper function that takes a snapshot of |meta_entry| and puts it into a
- // protobuf suitable for use in a commit request message.
- static void BuildCommitItem(const syncable::Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry);
-
- private:
- FRIEND_TEST_ALL_PREFIXES(BuildCommitCommandTest, InterpolatePosition);
-
- void AddExtensionsActivityToMessage(sessions::SyncSession* session,
- sync_pb::CommitMessage* message);
-
- // Fills the config_params field of |message|.
- void AddClientConfigParamsToMessage(sessions::SyncSession* session,
- sync_pb::CommitMessage* message);
-
- DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
-
- // A pointer to a valid transaction not owned by this class.
- syncable::BaseTransaction* trans_;
-
- // Input parameter; see constructor comment.
- const sessions::OrderedCommitSet& batch_commit_set_;
-
- // Output parameter; see constructor comment.
- sync_pb::ClientToServerMessage* commit_message_;
-
- ExtensionsActivity::Records* extensions_activity_buffer_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
diff --git a/chromium/sync/engine/commit.cc b/chromium/sync/engine/commit.cc
index 415c608def6..a8db1a41ee3 100644
--- a/chromium/sync/engine/commit.cc
+++ b/chromium/sync/engine/commit.cc
@@ -5,184 +5,176 @@
#include "sync/engine/commit.h"
#include "base/debug/trace_event.h"
-#include "sync/engine/build_commit_command.h"
-#include "sync/engine/get_commit_ids.h"
-#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/commit_util.h"
+#include "sync/engine/sync_directory_commit_contribution.h"
#include "sync/engine/syncer.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/sessions/sync_session.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_write_transaction.h"
namespace syncer {
-using sessions::SyncSession;
-using sessions::StatusController;
-using syncable::SYNCER;
-using syncable::WriteTransaction;
-
-namespace {
-
-// Sets the SYNCING bits of all items in the commit set to value_to_set.
-void SetAllSyncingBitsToValue(WriteTransaction* trans,
- const sessions::OrderedCommitSet& commit_set,
- bool value_to_set) {
- const std::vector<int64>& commit_handles = commit_set.GetAllCommitHandles();
- for (std::vector<int64>::const_iterator it = commit_handles.begin();
- it != commit_handles.end(); ++it) {
- syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *it);
- if (entry.good()) {
- entry.PutSyncing(value_to_set);
- }
- }
-}
-
-// Sets the SYNCING bits for all items in the OrderedCommitSet.
-void SetSyncingBits(WriteTransaction* trans,
- const sessions::OrderedCommitSet& commit_set) {
- SetAllSyncingBitsToValue(trans, commit_set, true);
+Commit::Commit(
+ const std::map<ModelType, SyncDirectoryCommitContribution*>& contributions,
+ const sync_pb::ClientToServerMessage& message,
+ ExtensionsActivity::Records extensions_activity_buffer)
+ : contributions_(contributions),
+ deleter_(&contributions_),
+ message_(message),
+ extensions_activity_buffer_(extensions_activity_buffer),
+ cleaned_up_(false) {
}
-// Clears the SYNCING bits for all items in the OrderedCommitSet.
-void ClearSyncingBits(syncable::Directory* dir,
- const sessions::OrderedCommitSet& commit_set) {
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- SetAllSyncingBitsToValue(&trans, commit_set, false);
+Commit::~Commit() {
+ DCHECK(cleaned_up_);
}
-// Helper function that finds sync items that are ready to be committed to the
-// server and serializes them into a commit message protobuf. It will return
-// false iff there are no entries ready to be committed at this time.
-//
-// The OrderedCommitSet parameter is an output parameter which will contain
-// the set of all items which are to be committed. The number of items in
-// the set shall not exceed the maximum batch size. (The default batch size
-// is currently 25, though it can be overwritten by the server.)
-//
-// The ClientToServerMessage parameter is an output parameter which will contain
-// the commit message which should be sent to the server. It is valid iff the
-// return value of this function is true.
-bool PrepareCommitMessage(
- sessions::SyncSession* session,
+Commit* Commit::Init(
ModelTypeSet requested_types,
- sessions::OrderedCommitSet* commit_set,
- sync_pb::ClientToServerMessage* commit_message,
- ExtensionsActivity::Records* extensions_activity_buffer) {
- TRACE_EVENT0("sync", "PrepareCommitMessage");
-
- commit_set->Clear();
- commit_message->Clear();
-
- WriteTransaction trans(FROM_HERE, SYNCER, session->context()->directory());
+ size_t max_entries,
+ const std::string& account_name,
+ const std::string& cache_guid,
+ CommitContributorMap* contributor_map,
+ ExtensionsActivity* extensions_activity) {
+ // Gather per-type contributions.
+ ContributionMap contributions;
+ size_t num_entries = 0;
+ for (ModelTypeSet::Iterator it = requested_types.First();
+ it.Good(); it.Inc()) {
+ CommitContributorMap::iterator cm_it = contributor_map->find(it.Get());
+ if (cm_it == contributor_map->end()) {
+ NOTREACHED()
+ << "Could not find requested type " << ModelTypeToString(it.Get())
+ << " in contributor map.";
+ continue;
+ }
+ size_t spaces_remaining = max_entries - num_entries;
+ SyncDirectoryCommitContribution* contribution =
+ cm_it->second->GetContribution(spaces_remaining);
+ if (contribution) {
+ num_entries += contribution->GetNumEntries();
+ contributions.insert(std::make_pair(it.Get(), contribution));
+ }
+ if (num_entries == max_entries) {
+ break; // No point in continuting to iterate in this case.
+ }
+ }
- // Fetch the items to commit.
- const size_t batch_size = session->context()->max_commit_batch_size();
- GetCommitIds(&trans, requested_types, batch_size, commit_set);
+ // Give up if no one had anything to commit.
+ if (contributions.empty())
+ return NULL;
+
+ sync_pb::ClientToServerMessage message;
+ message.set_message_contents(sync_pb::ClientToServerMessage::COMMIT);
+ message.set_share(account_name);
+
+ sync_pb::CommitMessage* commit_message = message.mutable_commit();
+ commit_message->set_cache_guid(cache_guid);
+
+ // Set extensions activity if bookmark commits are present.
+ ExtensionsActivity::Records extensions_activity_buffer;
+ ContributionMap::iterator it = contributions.find(syncer::BOOKMARKS);
+ if (it != contributions.end() && it->second->GetNumEntries() != 0) {
+ commit_util::AddExtensionsActivityToMessage(
+ extensions_activity,
+ &extensions_activity_buffer,
+ commit_message);
+ }
- DVLOG(1) << "Commit message will contain " << commit_set->Size() << " items.";
- if (commit_set->Empty()) {
- return false;
+ // Set the client config params.
+ ModelTypeSet enabled_types;
+ for (CommitContributorMap::iterator it = contributor_map->begin();
+ it != contributor_map->end(); ++it) {
+ enabled_types.Put(it->first);
}
+ commit_util::AddClientConfigParamsToMessage(enabled_types,
+ commit_message);
- // Serialize the message.
- BuildCommitCommand build_commit_command(&trans,
- *commit_set,
- commit_message,
- extensions_activity_buffer);
- build_commit_command.Execute(session);
+ // Finally, serialize all our contributions.
+ for (std::map<ModelType, SyncDirectoryCommitContribution*>::iterator it =
+ contributions.begin(); it != contributions.end(); ++it) {
+ it->second->AddToCommitMessage(&message);
+ }
- SetSyncingBits(&trans, *commit_set);
- return true;
+ // If we made it this far, then we've successfully prepared a commit message.
+ return new Commit(contributions, message, extensions_activity_buffer);
}
-SyncerError BuildAndPostCommitsImpl(ModelTypeSet requested_types,
- Syncer* syncer,
- sessions::SyncSession* session,
- sessions::OrderedCommitSet* commit_set) {
- ModelTypeSet commit_request_types;
- while (!syncer->ExitRequested()) {
- sync_pb::ClientToServerMessage commit_message;
- ExtensionsActivity::Records extensions_activity_buffer;
-
- if (!PrepareCommitMessage(session,
- requested_types,
- commit_set,
- &commit_message,
- &extensions_activity_buffer)) {
- break;
- }
-
- commit_request_types.PutAll(commit_set->Types());
- session->mutable_status_controller()->set_commit_request_types(
- commit_request_types);
+SyncerError Commit::PostAndProcessResponse(
+ sessions::SyncSession* session,
+ sessions::StatusController* status,
+ ExtensionsActivity* extensions_activity) {
+ ModelTypeSet request_types;
+ for (ContributionMap::const_iterator it = contributions_.begin();
+ it != contributions_.end(); ++it) {
+ request_types.Put(it->first);
+ }
+ session->mutable_status_controller()->set_commit_request_types(request_types);
- sync_pb::ClientToServerResponse commit_response;
+ if (session->context()->debug_info_getter()) {
+ sync_pb::DebugInfo* debug_info = message_.mutable_debug_info();
+ session->context()->debug_info_getter()->GetDebugInfo(debug_info);
+ }
- DVLOG(1) << "Sending commit message.";
- TRACE_EVENT_BEGIN0("sync", "PostCommit");
- const SyncerError post_result = SyncerProtoUtil::PostClientToServerMessage(
- &commit_message, &commit_response, session);
- TRACE_EVENT_END0("sync", "PostCommit");
+ DVLOG(1) << "Sending commit message.";
+ TRACE_EVENT_BEGIN0("sync", "PostCommit");
+ const SyncerError post_result = SyncerProtoUtil::PostClientToServerMessage(
+ &message_, &response_, session);
+ TRACE_EVENT_END0("sync", "PostCommit");
- // TODO(rlarocque): Put all the post-commit logic in one place.
- // See crbug.com/196338.
+ if (post_result != SYNCER_OK) {
+ LOG(WARNING) << "Post commit failed";
+ return post_result;
+ }
- if (post_result != SYNCER_OK) {
- LOG(WARNING) << "Post commit failed";
- return post_result;
- }
+ if (!response_.has_commit()) {
+ LOG(WARNING) << "Commit response has no commit body!";
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
- if (!commit_response.has_commit()) {
- LOG(WARNING) << "Commit response has no commit body!";
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
+ size_t message_entries = message_.commit().entries_size();
+ size_t response_entries = response_.commit().entryresponse_size();
+ if (message_entries != response_entries) {
+ LOG(ERROR)
+ << "Commit response has wrong number of entries! "
+ << "Expected: " << message_entries << ", "
+ << "Got: " << response_entries;
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
- const size_t num_responses = commit_response.commit().entryresponse_size();
- if (num_responses != commit_set->Size()) {
- LOG(ERROR)
- << "Commit response has wrong number of entries! "
- << "Expected: " << commit_set->Size() << ", "
- << "Got: " << num_responses;
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
+ if (session->context()->debug_info_getter()) {
+ // Clear debug info now that we have successfully sent it to the server.
+ DVLOG(1) << "Clearing client debug info.";
+ session->context()->debug_info_getter()->ClearDebugInfo();
+ }
- TRACE_EVENT_BEGIN0("sync", "ProcessCommitResponse");
- ProcessCommitResponseCommand process_response_command(
- *commit_set, commit_message, commit_response);
- const SyncerError processing_result =
- process_response_command.Execute(session);
- TRACE_EVENT_END0("sync", "ProcessCommitResponse");
-
- // If the commit failed, return the data to the ExtensionsActivityMonitor.
- if (session->status_controller().
- model_neutral_state().num_successful_bookmark_commits == 0) {
- ExtensionsActivity* extensions_activity =
- session->context()->extensions_activity();
- extensions_activity->PutRecords(extensions_activity_buffer);
+ // Let the contributors process the responses to each of their requests.
+ SyncerError processing_result = SYNCER_OK;
+ for (std::map<ModelType, SyncDirectoryCommitContribution*>::iterator it =
+ contributions_.begin(); it != contributions_.end(); ++it) {
+ TRACE_EVENT1("sync", "ProcessCommitResponse",
+ "type", ModelTypeToString(it->first));
+ SyncerError type_result =
+ it->second->ProcessCommitResponse(response_, status);
+ if (processing_result == SYNCER_OK && type_result != SYNCER_OK) {
+ processing_result = type_result;
}
+ }
- if (processing_result != SYNCER_OK) {
- return processing_result;
- }
- session->SendEventNotification(SyncEngineEvent::STATUS_CHANGED);
+ // Handle bookmarks' special extensions activity stats.
+ if (session->status_controller().
+ model_neutral_state().num_successful_bookmark_commits == 0) {
+ extensions_activity->PutRecords(extensions_activity_buffer_);
}
- return SYNCER_OK;
+ return processing_result;
}
-} // namespace
-
-
-SyncerError BuildAndPostCommits(ModelTypeSet requested_types,
- Syncer* syncer,
- sessions::SyncSession* session) {
- sessions::OrderedCommitSet commit_set(session->context()->routing_info());
- SyncerError result =
- BuildAndPostCommitsImpl(requested_types, syncer, session, &commit_set);
- if (result != SYNCER_OK) {
- ClearSyncingBits(session->context()->directory(), commit_set);
+void Commit::CleanUp() {
+ for (ContributionMap::iterator it = contributions_.begin();
+ it != contributions_.end(); ++it) {
+ it->second->CleanUp();
}
- return result;
+ cleaned_up_ = true;
}
} // namespace syncer
diff --git a/chromium/sync/engine/commit.h b/chromium/sync/engine/commit.h
index 168d950c276..4750971bc7c 100644
--- a/chromium/sync/engine/commit.h
+++ b/chromium/sync/engine/commit.h
@@ -5,33 +5,76 @@
#ifndef SYNC_ENGINE_COMMIT_H_
#define SYNC_ENGINE_COMMIT_H_
+#include <map>
+
+#include "base/stl_util.h"
+#include "sync/base/sync_export.h"
+#include "sync/engine/sync_directory_commit_contribution.h"
+#include "sync/engine/sync_directory_commit_contributor.h"
#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/util/syncer_error.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/util/extensions_activity.h"
namespace syncer {
namespace sessions {
+class StatusController;
class SyncSession;
}
class Syncer;
-// This function will commit batches of unsynced items to the server until the
-// number of unsynced and ready to commit items reaches zero or an error is
-// encountered. A request to exit early will be treated as an error and will
-// abort any blocking operations.
-//
-// The Syncer parameter is provided only for access to its ExitRequested()
-// method. This is technically unnecessary since an early exit request should
-// be detected as we attempt to contact the sync server.
+// This class wraps the actions related to building and executing a single
+// commit operation.
//
-// The SyncSession parameter contains pointers to various bits of state,
-// including the syncable::Directory that contains all sync items and the
-// ServerConnectionManager used to contact the server.
-SyncerError BuildAndPostCommits(
- ModelTypeSet request_types,
- Syncer* syncer,
- sessions::SyncSession* session);
+// This class' most important responsibility is to manage the ContributionsMap.
+// This class serves as a container for those objects. Although it would have
+// been acceptable to let this class be a dumb container object, it turns out
+// that there was no other convenient place to put the Init() and
+// PostAndProcessCommitResponse() functions. So they ended up here.
+class SYNC_EXPORT_PRIVATE Commit {
+ public:
+ Commit(
+ const std::map<ModelType, SyncDirectoryCommitContribution*>&
+ contributions,
+ const sync_pb::ClientToServerMessage& message,
+ ExtensionsActivity::Records extensions_activity_buffer);
+
+ // This destructor will DCHECK if CleanUp() has not been called.
+ ~Commit();
+
+ static Commit* Init(
+ ModelTypeSet requested_types,
+ size_t max_entries,
+ const std::string& account_name,
+ const std::string& cache_guid,
+ CommitContributorMap* contributor_map,
+ ExtensionsActivity* extensions_activity);
+
+ SyncerError PostAndProcessResponse(
+ sessions::SyncSession* session,
+ sessions::StatusController* status,
+ ExtensionsActivity* extensions_activity);
+
+ // Cleans up state associated with this commit. Must be called before the
+ // destructor.
+ void CleanUp();
+
+ private:
+ typedef std::map<ModelType, SyncDirectoryCommitContribution*> ContributionMap;
+
+ ContributionMap contributions_;
+ STLValueDeleter<ContributionMap> deleter_;
+
+ sync_pb::ClientToServerMessage message_;
+ sync_pb::ClientToServerResponse response_;
+ ExtensionsActivity::Records extensions_activity_buffer_;
+
+ // Debug only flag used to indicate if it's safe to destruct the object.
+ bool cleaned_up_;
+};
} // namespace syncer
diff --git a/chromium/sync/engine/process_commit_response_command.cc b/chromium/sync/engine/commit_util.cc
index 2c38e98615b..1081446b7a8 100644
--- a/chromium/sync/engine/process_commit_response_command.cc
+++ b/chromium/sync/engine/commit_util.cc
@@ -1,162 +1,187 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/commit_util.h"
-#include <cstddef>
+#include <limits>
#include <set>
#include <string>
#include <vector>
-#include "base/basictypes.h"
-#include "base/location.h"
+#include "base/strings/string_util.h"
#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/syncer_util.h"
#include "sync/internal_api/public/base/unique_position.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
#include "sync/sessions/sync_session.h"
+#include "sync/syncable/directory.h"
#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
+#include "sync/syncable/syncable_base_transaction.h"
+#include "sync/syncable/syncable_base_write_transaction.h"
+#include "sync/syncable/syncable_changes_version.h"
#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/time.h"
using std::set;
using std::string;
using std::vector;
-using sync_pb::CommitResponse;
namespace syncer {
-using sessions::OrderedCommitSet;
-using sessions::StatusController;
using sessions::SyncSession;
-using syncable::WriteTransaction;
-using syncable::MutableEntry;
using syncable::Entry;
-using syncable::BASE_VERSION;
-using syncable::GET_BY_ID;
-using syncable::GET_BY_HANDLE;
-using syncable::ID;
using syncable::IS_DEL;
-using syncable::IS_DIR;
using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
-using syncable::PARENT_ID;
-using syncable::SERVER_IS_DEL;
-using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_VERSION;
-using syncable::SYNCER;
-using syncable::SYNCING;
-
-ProcessCommitResponseCommand::ProcessCommitResponseCommand(
- const sessions::OrderedCommitSet& commit_set,
- const sync_pb::ClientToServerMessage& commit_message,
- const sync_pb::ClientToServerResponse& commit_response)
- : commit_set_(commit_set),
- commit_message_(commit_message),
- commit_response_(commit_response) {
+using syncable::Id;
+using syncable::SPECIFICS;
+using syncable::UNIQUE_POSITION;
+
+namespace commit_util {
+
+void AddExtensionsActivityToMessage(
+ ExtensionsActivity* activity,
+ ExtensionsActivity::Records* extensions_activity_buffer,
+ sync_pb::CommitMessage* message) {
+ // This isn't perfect, since the set of extensions activity may not correlate
+ // exactly with the items being committed. That's OK as long as we're looking
+ // for a rough estimate of extensions activity, not an precise mapping of
+ // which commits were triggered by which extension.
+ //
+ // We will push this list of extensions activity back into the
+ // ExtensionsActivityMonitor if this commit fails. That's why we must keep a
+ // copy of these records in the session.
+ activity->GetAndClearRecords(extensions_activity_buffer);
+
+ const ExtensionsActivity::Records& records = *extensions_activity_buffer;
+ for (ExtensionsActivity::Records::const_iterator it =
+ records.begin();
+ it != records.end(); ++it) {
+ sync_pb::ChromiumExtensionsActivity* activity_message =
+ message->add_extensions_activity();
+ activity_message->set_extension_id(it->second.extension_id);
+ activity_message->set_bookmark_writes_since_last_commit(
+ it->second.bookmark_write_count);
+ }
}
-ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
+void AddClientConfigParamsToMessage(
+ ModelTypeSet enabled_types,
+ sync_pb::CommitMessage* message) {
+ sync_pb::ClientConfigParams* config_params = message->mutable_config_params();
+ for (ModelTypeSet::Iterator it = enabled_types.First(); it.Good(); it.Inc()) {
+ if (ProxyTypes().Has(it.Get()))
+ continue;
+ int field_number = GetSpecificsFieldNumberFromModelType(it.Get());
+ config_params->mutable_enabled_type_ids()->Add(field_number);
+ }
+ config_params->set_tabs_datatype_enabled(
+ enabled_types.Has(syncer::PROXY_TABS));
+}
-std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
- const sessions::SyncSession& session) const {
- std::set<ModelSafeGroup> groups_with_commits;
+namespace {
+void SetEntrySpecifics(const Entry& meta_entry,
+ sync_pb::SyncEntity* sync_entry) {
+ // Add the new style extension and the folder bit.
+ sync_entry->mutable_specifics()->CopyFrom(meta_entry.GetSpecifics());
+ sync_entry->set_folder(meta_entry.GetIsDir());
- syncable::Directory* dir = session.context()->directory();
- syncable::ReadTransaction trans(FROM_HERE, dir);
- for (size_t i = 0; i < commit_set_.Size(); ++i) {
- groups_with_commits.insert(
- GetGroupForModelType(commit_set_.GetModelTypeAt(i),
- session.context()->routing_info()));
+ CHECK(!sync_entry->specifics().password().has_client_only_encrypted_data());
+ DCHECK_EQ(meta_entry.GetModelType(), GetModelType(*sync_entry));
+}
+} // namespace
+
+void BuildCommitItem(
+ const syncable::Entry& meta_entry,
+ sync_pb::SyncEntity* sync_entry) {
+ syncable::Id id = meta_entry.GetId();
+ sync_entry->set_id_string(SyncableIdToProto(id));
+
+ string name = meta_entry.GetNonUniqueName();
+ CHECK(!name.empty()); // Make sure this isn't an update.
+ // Note: Truncation is also performed in WriteNode::SetTitle(..). But this
+ // call is still necessary to handle any title changes that might originate
+ // elsewhere, or already be persisted in the directory.
+ base::TruncateUTF8ToByteSize(name, 255, &name);
+ sync_entry->set_name(name);
+
+ // Set the non_unique_name. If we do, the server ignores
+ // the |name| value (using |non_unique_name| instead), and will return
+ // in the CommitResponse a unique name if one is generated.
+ // We send both because it may aid in logging.
+ sync_entry->set_non_unique_name(name);
+
+ if (!meta_entry.GetUniqueClientTag().empty()) {
+ sync_entry->set_client_defined_unique_tag(
+ meta_entry.GetUniqueClientTag());
}
- return groups_with_commits;
-}
+ // Deleted items with server-unknown parent ids can be a problem so we set
+ // the parent to 0. (TODO(sync): Still true in protocol?).
+ Id new_parent_id;
+ if (meta_entry.GetIsDel() &&
+ !meta_entry.GetParentId().ServerKnows()) {
+ new_parent_id = syncable::BaseTransaction::root_id();
+ } else {
+ new_parent_id = meta_entry.GetParentId();
+ }
+ sync_entry->set_parent_id_string(SyncableIdToProto(new_parent_id));
+
+ // If our parent has changed, send up the old one so the server
+ // can correctly deal with multiple parents.
+ // TODO(nick): With the server keeping track of the primary sync parent,
+ // it should not be necessary to provide the old_parent_id: the version
+ // number should suffice.
+ if (new_parent_id != meta_entry.GetServerParentId() &&
+ 0 != meta_entry.GetBaseVersion() &&
+ syncable::CHANGES_VERSION != meta_entry.GetBaseVersion()) {
+ sync_entry->set_old_parent_id(
+ SyncableIdToProto(meta_entry.GetServerParentId()));
+ }
+ int64 version = meta_entry.GetBaseVersion();
+ if (syncable::CHANGES_VERSION == version || 0 == version) {
+ // Undeletions are only supported for items that have a client tag.
+ DCHECK(!id.ServerKnows() ||
+ !meta_entry.GetUniqueClientTag().empty())
+ << meta_entry;
-SyncerError ProcessCommitResponseCommand::ModelChangingExecuteImpl(
- SyncSession* session) {
- syncable::Directory* dir = session->context()->directory();
- StatusController* status = session->mutable_status_controller();
- const CommitResponse& cr = commit_response_.commit();
- const sync_pb::CommitMessage& commit_message = commit_message_.commit();
-
- int transient_error_commits = 0;
- int conflicting_commits = 0;
- int error_commits = 0;
- int successes = 0;
-
- set<syncable::Id> deleted_folders;
- OrderedCommitSet::Projection proj = status->commit_id_projection(
- commit_set_);
-
- if (!proj.empty()) { // Scope for WriteTransaction.
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- for (size_t i = 0; i < proj.size(); i++) {
- CommitResponse::ResponseType response_type = ProcessSingleCommitResponse(
- &trans,
- cr.entryresponse(proj[i]),
- commit_message.entries(proj[i]),
- commit_set_.GetCommitHandleAt(proj[i]),
- &deleted_folders);
- switch (response_type) {
- case CommitResponse::INVALID_MESSAGE:
- ++error_commits;
- break;
- case CommitResponse::CONFLICT:
- ++conflicting_commits;
- status->increment_num_server_conflicts();
- break;
- case CommitResponse::SUCCESS:
- // TODO(sync): worry about sync_rate_ rate calc?
- ++successes;
- if (commit_set_.GetModelTypeAt(proj[i]) == BOOKMARKS)
- status->increment_num_successful_bookmark_commits();
- status->increment_num_successful_commits();
- break;
- case CommitResponse::OVER_QUOTA:
- // We handle over quota like a retry, which is same as transient.
- case CommitResponse::RETRY:
- case CommitResponse::TRANSIENT_ERROR:
- ++transient_error_commits;
- break;
- default:
- LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
- }
- }
+ // Version 0 means to create or undelete an object.
+ sync_entry->set_version(0);
+ } else {
+ DCHECK(id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(meta_entry.GetBaseVersion());
}
+ sync_entry->set_ctime(TimeToProtoTime(meta_entry.GetCtime()));
+ sync_entry->set_mtime(TimeToProtoTime(meta_entry.GetMtime()));
- MarkDeletedChildrenSynced(dir, &deleted_folders);
-
- int commit_count = static_cast<int>(proj.size());
- if (commit_count == successes) {
- return SYNCER_OK;
- } else if (error_commits > 0) {
- return SERVER_RETURN_UNKNOWN_ERROR;
- } else if (transient_error_commits > 0) {
- return SERVER_RETURN_TRANSIENT_ERROR;
- } else if (conflicting_commits > 0) {
- // This means that the server already has an item with this version, but
- // we haven't seen that update yet.
- //
- // A well-behaved client should respond to this by proceeding to the
- // download updates phase, fetching the conflicting items, then attempting
- // to resolve the conflict. That's not what this client does.
- //
- // We don't currently have any code to support that exceptional control
- // flow. Instead, we abort the current sync cycle and start a new one. The
- // end result is the same.
- return SERVER_RETURN_CONFLICT;
+ // Deletion is final on the server, let's move things and then delete them.
+ if (meta_entry.GetIsDel()) {
+ sync_entry->set_deleted(true);
} else {
- LOG(FATAL) << "Inconsistent counts when processing commit response";
- return SYNCER_OK;
+ if (meta_entry.GetSpecifics().has_bookmark()) {
+ // Both insert_after_item_id and position_in_parent fields are set only
+ // for legacy reasons. See comments in sync.proto for more information.
+ const Id& prev_id = meta_entry.GetPredecessorId();
+ string prev_id_string =
+ prev_id.IsRoot() ? string() : prev_id.GetServerId();
+ sync_entry->set_insert_after_item_id(prev_id_string);
+ sync_entry->set_position_in_parent(
+ meta_entry.GetUniquePosition().ToInt64());
+ meta_entry.GetUniquePosition().ToProto(
+ sync_entry->mutable_unique_position());
+ }
+ SetEntrySpecifics(meta_entry, sync_entry);
}
}
+
+// Helpers for ProcessSingleCommitResponse.
+namespace {
+
void LogServerError(const sync_pb::CommitResponse_EntryResponse& res) {
if (res.has_error_message())
LOG(WARNING) << " " << res.error_message();
@@ -164,79 +189,7 @@ void LogServerError(const sync_pb::CommitResponse_EntryResponse& res) {
LOG(WARNING) << " No detailed error message returned from server";
}
-CommitResponse::ResponseType
-ProcessCommitResponseCommand::ProcessSingleCommitResponse(
- syncable::WriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& server_entry,
- const sync_pb::SyncEntity& commit_request_entry,
- const int64 metahandle,
- set<syncable::Id>* deleted_folders) {
- MutableEntry local_entry(trans, GET_BY_HANDLE, metahandle);
- CHECK(local_entry.good());
- bool syncing_was_set = local_entry.GetSyncing();
- local_entry.PutSyncing(false);
-
- CommitResponse::ResponseType response = (CommitResponse::ResponseType)
- server_entry.response_type();
- if (!CommitResponse::ResponseType_IsValid(response)) {
- LOG(ERROR) << "Commit response has unknown response type! Possibly out "
- "of date client?";
- return CommitResponse::INVALID_MESSAGE;
- }
- if (CommitResponse::TRANSIENT_ERROR == response) {
- DVLOG(1) << "Transient Error Committing: " << local_entry;
- LogServerError(server_entry);
- return CommitResponse::TRANSIENT_ERROR;
- }
- if (CommitResponse::INVALID_MESSAGE == response) {
- LOG(ERROR) << "Error Commiting: " << local_entry;
- LogServerError(server_entry);
- return response;
- }
- if (CommitResponse::CONFLICT == response) {
- DVLOG(1) << "Conflict Committing: " << local_entry;
- return response;
- }
- if (CommitResponse::RETRY == response) {
- DVLOG(1) << "Retry Committing: " << local_entry;
- return response;
- }
- if (CommitResponse::OVER_QUOTA == response) {
- LOG(WARNING) << "Hit deprecated OVER_QUOTA Committing: " << local_entry;
- return response;
- }
- if (!server_entry.has_id_string()) {
- LOG(ERROR) << "Commit response has no id";
- return CommitResponse::INVALID_MESSAGE;
- }
-
- // Implied by the IsValid call above, but here for clarity.
- DCHECK_EQ(CommitResponse::SUCCESS, response) << response;
- // Check to see if we've been given the ID of an existing entry. If so treat
- // it as an error response and retry later.
- const syncable::Id& server_entry_id =
- SyncableIdFromProto(server_entry.id_string());
- if (local_entry.GetId() != server_entry_id) {
- Entry e(trans, GET_BY_ID, server_entry_id);
- if (e.good()) {
- LOG(ERROR)
- << "Got duplicate id when commiting id: "
- << local_entry.GetId()
- << ". Treating as an error return";
- return CommitResponse::INVALID_MESSAGE;
- }
- }
-
- if (server_entry.version() == 0) {
- LOG(WARNING) << "Server returned a zero version on a commit response.";
- }
-
- ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
- local_entry.GetId(), &local_entry, syncing_was_set, deleted_folders);
- return response;
-}
-
-const string& ProcessCommitResponseCommand::GetResultingPostCommitName(
+const string& GetResultingPostCommitName(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response) {
const string& response_name =
@@ -246,11 +199,11 @@ const string& ProcessCommitResponseCommand::GetResultingPostCommitName(
return SyncerProtoUtil::NameFromSyncEntity(committed_entry);
}
-bool ProcessCommitResponseCommand::UpdateVersionAfterCommit(
+bool UpdateVersionAfterCommit(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response,
const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry) {
+ syncable::ModelNeutralMutableEntry* local_entry) {
int64 old_version = local_entry->GetBaseVersion();
int64 new_version = entry_response.version();
bool bad_commit_version = false;
@@ -283,11 +236,11 @@ bool ProcessCommitResponseCommand::UpdateVersionAfterCommit(
return true;
}
-bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
+bool ChangeIdAfterCommit(
const sync_pb::CommitResponse_EntryResponse& entry_response,
const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry) {
- syncable::WriteTransaction* trans = local_entry->write_transaction();
+ syncable::ModelNeutralMutableEntry* local_entry) {
+ syncable::BaseWriteTransaction* trans = local_entry->base_write_transaction();
const syncable::Id& entry_response_id =
SyncableIdFromProto(entry_response.id_string());
if (entry_response_id != pre_commit_id) {
@@ -297,7 +250,10 @@ bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
DVLOG(1) << " ID changed while committing an old entry. "
<< pre_commit_id << " became " << entry_response_id << ".";
}
- MutableEntry same_id(trans, GET_BY_ID, entry_response_id);
+ syncable::ModelNeutralMutableEntry same_id(
+ trans,
+ syncable::GET_BY_ID,
+ entry_response_id);
// We should trap this before this function.
if (same_id.good()) {
LOG(ERROR) << "ID clash with id " << entry_response_id
@@ -310,10 +266,10 @@ bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
return true;
}
-void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
+void UpdateServerFieldsAfterCommit(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response,
- syncable::MutableEntry* local_entry) {
+ syncable::ModelNeutralMutableEntry* local_entry) {
// We just committed an entry successfully, and now we want to make our view
// of the server state consistent with the server state. We must be careful;
@@ -361,10 +317,11 @@ void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
}
}
-void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
+void ProcessSuccessfulCommitResponse(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ const syncable::Id& pre_commit_id,
+ syncable::ModelNeutralMutableEntry* local_entry,
bool syncing_was_set, set<syncable::Id>* deleted_folders) {
DCHECK(local_entry->GetIsUnsynced());
@@ -402,4 +359,82 @@ void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
}
}
+} // namespace
+
+sync_pb::CommitResponse::ResponseType
+ProcessSingleCommitResponse(
+ syncable::BaseWriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& server_entry,
+ const sync_pb::SyncEntity& commit_request_entry,
+ int64 metahandle,
+ set<syncable::Id>* deleted_folders) {
+ syncable::ModelNeutralMutableEntry local_entry(
+ trans,
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ CHECK(local_entry.good());
+ bool syncing_was_set = local_entry.GetSyncing();
+ local_entry.PutSyncing(false);
+
+ sync_pb::CommitResponse::ResponseType response = server_entry.response_type();
+ if (!sync_pb::CommitResponse::ResponseType_IsValid(response)) {
+ LOG(ERROR) << "Commit response has unknown response type! Possibly out "
+ "of date client?";
+ return sync_pb::CommitResponse::INVALID_MESSAGE;
+ }
+ if (sync_pb::CommitResponse::TRANSIENT_ERROR == response) {
+ DVLOG(1) << "Transient Error Committing: " << local_entry;
+ LogServerError(server_entry);
+ return sync_pb::CommitResponse::TRANSIENT_ERROR;
+ }
+ if (sync_pb::CommitResponse::INVALID_MESSAGE == response) {
+ LOG(ERROR) << "Error Commiting: " << local_entry;
+ LogServerError(server_entry);
+ return response;
+ }
+ if (sync_pb::CommitResponse::CONFLICT == response) {
+ DVLOG(1) << "Conflict Committing: " << local_entry;
+ return response;
+ }
+ if (sync_pb::CommitResponse::RETRY == response) {
+ DVLOG(1) << "Retry Committing: " << local_entry;
+ return response;
+ }
+ if (sync_pb::CommitResponse::OVER_QUOTA == response) {
+ LOG(WARNING) << "Hit deprecated OVER_QUOTA Committing: " << local_entry;
+ return response;
+ }
+ if (!server_entry.has_id_string()) {
+ LOG(ERROR) << "Commit response has no id";
+ return sync_pb::CommitResponse::INVALID_MESSAGE;
+ }
+
+ // Implied by the IsValid call above, but here for clarity.
+ DCHECK_EQ(sync_pb::CommitResponse::SUCCESS, response) << response;
+ // Check to see if we've been given the ID of an existing entry. If so treat
+ // it as an error response and retry later.
+ const syncable::Id& server_entry_id =
+ SyncableIdFromProto(server_entry.id_string());
+ if (local_entry.GetId() != server_entry_id) {
+ Entry e(trans, syncable::GET_BY_ID, server_entry_id);
+ if (e.good()) {
+ LOG(ERROR)
+ << "Got duplicate id when commiting id: "
+ << local_entry.GetId()
+ << ". Treating as an error return";
+ return sync_pb::CommitResponse::INVALID_MESSAGE;
+ }
+ }
+
+ if (server_entry.version() == 0) {
+ LOG(WARNING) << "Server returned a zero version on a commit response.";
+ }
+
+ ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
+ local_entry.GetId(), &local_entry, syncing_was_set, deleted_folders);
+ return response;
+}
+
+} // namespace commit_util
+
} // namespace syncer
diff --git a/chromium/sync/engine/commit_util.h b/chromium/sync/engine/commit_util.h
new file mode 100644
index 00000000000..387bdcf95e2
--- /dev/null
+++ b/chromium/sync/engine/commit_util.h
@@ -0,0 +1,64 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
+#define SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/util/extensions_activity.h"
+
+namespace sync_pb {
+class CommitMessage;
+class SyncEntity;
+}
+
+namespace syncer {
+
+namespace syncable {
+class BaseTransaction;
+class Entry;
+class Id;
+class BaseWriteTransaction;
+}
+
+namespace commit_util {
+
+// Adds bookmark extensions activity report to |message|.
+SYNC_EXPORT_PRIVATE void AddExtensionsActivityToMessage(
+ ExtensionsActivity* activity,
+ ExtensionsActivity::Records* extensions_activity_buffer,
+ sync_pb::CommitMessage* message);
+
+// Fills the config_params field of |message|.
+SYNC_EXPORT_PRIVATE void AddClientConfigParamsToMessage(
+ ModelTypeSet enabled_types,
+ sync_pb::CommitMessage* message);
+
+// Takes a snapshot of |meta_entry| and puts it into a protobuf suitable for use
+// in a commit request message.
+SYNC_EXPORT_PRIVATE void BuildCommitItem(
+ const syncable::Entry& meta_entry,
+ sync_pb::SyncEntity* sync_entry);
+
+// Process a single commit response. Updates the entry's SERVER fields using
+// |pb_commit_response| and |pb_committed_entry|.
+//
+// The |deleted_folders| parameter is a set of IDs that represent deleted
+// folders. This function will add its entry's ID to this set if it finds
+// itself processing a folder deletion.
+SYNC_EXPORT_PRIVATE
+sync_pb::CommitResponse::ResponseType ProcessSingleCommitResponse(
+ syncable::BaseWriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& server_entry,
+ const sync_pb::SyncEntity& commit_request_entry,
+ int64 metahandle,
+ std::set<syncable::Id>* deleted_folders);
+
+} // namespace commit_util
+
+} // namespace syncer
+
+#endif // SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
diff --git a/chromium/sync/engine/download.cc b/chromium/sync/engine/download.cc
index acda8349edc..2bc7f7a0a9d 100644
--- a/chromium/sync/engine/download.cc
+++ b/chromium/sync/engine/download.cc
@@ -7,8 +7,8 @@
#include <string>
#include "base/command_line.h"
-#include "sync/engine/process_updates_command.h"
-#include "sync/engine/store_timestamps_command.h"
+#include "sync/engine/process_updates_util.h"
+#include "sync/engine/sync_directory_update_handler.h"
#include "sync/engine/syncer.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/sessions/nudge_tracker.h"
@@ -16,8 +16,6 @@
#include "sync/syncable/nigori_handler.h"
#include "sync/syncable/syncable_read_transaction.h"
-using sync_pb::DebugInfo;
-
namespace syncer {
using sessions::StatusController;
@@ -25,8 +23,12 @@ using sessions::SyncSession;
using sessions::SyncSessionContext;
using std::string;
+namespace download {
+
namespace {
+typedef std::map<ModelType, size_t> TypeToIndexMap;
+
SyncerError HandleGetEncryptionKeyResponse(
const sync_pb::ClientToServerResponse& update_response,
syncable::Directory* dir) {
@@ -78,27 +80,10 @@ bool ShouldRequestEncryptionKey(
return need_encryption_key;
}
-void AppendClientDebugInfoIfNeeded(
- SyncSession* session,
- DebugInfo* debug_info) {
- // We want to send the debug info only once per sync cycle. Check if it has
- // already been sent.
- if (!session->status_controller().debug_info_sent()) {
- DVLOG(1) << "Sending client debug info ...";
- // Could be null in some unit tests.
- if (session->context()->debug_info_getter()) {
- session->context()->debug_info_getter()->GetAndClearDebugInfo(
- debug_info);
- }
- session->mutable_status_controller()->set_debug_info_sent();
- }
-}
-
-void InitDownloadUpdatesRequest(
+void InitDownloadUpdatesContext(
SyncSession* session,
bool create_mobile_bookmarks_folder,
- sync_pb::ClientToServerMessage* message,
- ModelTypeSet request_types) {
+ sync_pb::ClientToServerMessage* message) {
message->set_share(session->context()->account_name());
message->set_message_contents(sync_pb::ClientToServerMessage::GET_UPDATES);
@@ -109,9 +94,6 @@ void InitDownloadUpdatesRequest(
// (e.g. Bookmark URLs but not their containing folders).
get_updates->set_fetch_folders(true);
- DebugInfo* debug_info = message->mutable_debug_info();
- AppendClientDebugInfoIfNeeded(session, debug_info);
-
get_updates->set_create_mobile_bookmarks_folder(
create_mobile_bookmarks_folder);
bool need_encryption_key = ShouldRequestEncryptionKey(session->context());
@@ -120,21 +102,95 @@ void InitDownloadUpdatesRequest(
// Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
get_updates->mutable_caller_info()->set_notifications_enabled(
session->context()->notifications_enabled());
+}
- StatusController* status = session->mutable_status_controller();
- status->set_updates_request_types(request_types);
-
- syncable::Directory* dir = session->context()->directory();
- for (ModelTypeSet::Iterator it = request_types.First();
+void InitDownloadUpdatesProgress(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* handler_map,
+ sync_pb::GetUpdatesMessage* get_updates) {
+ for (ModelTypeSet::Iterator it = proto_request_types.First();
it.Good(); it.Inc()) {
- if (ProxyTypes().Has(it.Get()))
- continue;
+ UpdateHandlerMap::iterator handler_it = handler_map->find(it.Get());
+ DCHECK(handler_it != handler_map->end());
sync_pb::DataTypeProgressMarker* progress_marker =
get_updates->add_from_progress_marker();
- dir->GetDownloadProgress(it.Get(), progress_marker);
+ handler_it->second->GetDownloadProgress(progress_marker);
+ }
+}
+
+// Builds a map of ModelTypes to indices to progress markers in the given
+// |gu_response| message. The map is returned in the |index_map| parameter.
+void PartitionProgressMarkersByType(
+ const sync_pb::GetUpdatesResponse& gu_response,
+ ModelTypeSet request_types,
+ TypeToIndexMap* index_map) {
+ for (int i = 0; i < gu_response.new_progress_marker_size(); ++i) {
+ int field_number = gu_response.new_progress_marker(i).data_type_id();
+ ModelType model_type = GetModelTypeFromSpecificsFieldNumber(field_number);
+ if (!IsRealDataType(model_type)) {
+ DLOG(WARNING) << "Unknown field number " << field_number;
+ continue;
+ }
+ if (!request_types.Has(model_type)) {
+ DLOG(WARNING)
+ << "Skipping unexpected progress marker for non-enabled type "
+ << ModelTypeToString(model_type);
+ continue;
+ }
+ index_map->insert(std::make_pair(model_type, i));
}
}
+// Examines the contents of the GetUpdates response message and forwards
+// relevant data to the UpdateHandlers for processing and persisting.
+bool ProcessUpdateResponseContents(
+ const sync_pb::GetUpdatesResponse& gu_response,
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* handler_map,
+ StatusController* status) {
+ TypeSyncEntityMap updates_by_type;
+ PartitionUpdatesByType(gu_response, proto_request_types, &updates_by_type);
+ DCHECK_EQ(proto_request_types.Size(), updates_by_type.size());
+
+ TypeToIndexMap progress_index_by_type;
+ PartitionProgressMarkersByType(gu_response,
+ proto_request_types,
+ &progress_index_by_type);
+ if (proto_request_types.Size() != progress_index_by_type.size()) {
+ NOTREACHED() << "Missing progress markers in GetUpdates response.";
+ return false;
+ }
+
+ // Iterate over these maps in parallel, processing updates for each type.
+ TypeToIndexMap::iterator progress_marker_iter =
+ progress_index_by_type.begin();
+ TypeSyncEntityMap::iterator updates_iter = updates_by_type.begin();
+ for ( ; (progress_marker_iter != progress_index_by_type.end()
+ && updates_iter != updates_by_type.end());
+ ++progress_marker_iter, ++updates_iter) {
+ DCHECK_EQ(progress_marker_iter->first, updates_iter->first);
+ ModelType type = progress_marker_iter->first;
+
+ UpdateHandlerMap::iterator update_handler_iter = handler_map->find(type);
+
+ if (update_handler_iter != handler_map->end()) {
+ update_handler_iter->second->ProcessGetUpdatesResponse(
+ gu_response.new_progress_marker(progress_marker_iter->second),
+ updates_iter->second,
+ status);
+ } else {
+ DLOG(WARNING)
+ << "Ignoring received updates of a type we can't handle. "
+ << "Type is: " << ModelTypeToString(type);
+ continue;
+ }
+ }
+ DCHECK(progress_marker_iter == progress_index_by_type.end()
+ && updates_iter == updates_by_type.end());
+
+ return true;
+}
+
} // namespace
void BuildNormalDownloadUpdates(
@@ -143,19 +199,35 @@ void BuildNormalDownloadUpdates(
ModelTypeSet request_types,
const sessions::NudgeTracker& nudge_tracker,
sync_pb::ClientToServerMessage* client_to_server_message) {
- InitDownloadUpdatesRequest(
- session,
- create_mobile_bookmarks_folder,
- client_to_server_message,
- request_types);
- sync_pb::GetUpdatesMessage* get_updates =
- client_to_server_message->mutable_get_updates();
-
// Request updates for all requested types.
DVLOG(1) << "Getting updates for types "
<< ModelTypeSetToString(request_types);
DCHECK(!request_types.Empty());
+ InitDownloadUpdatesContext(
+ session,
+ create_mobile_bookmarks_folder,
+ client_to_server_message);
+
+ BuildNormalDownloadUpdatesImpl(
+ Intersection(request_types, ProtocolTypes()),
+ session->context()->update_handler_map(),
+ nudge_tracker,
+ client_to_server_message->mutable_get_updates());
+}
+
+void BuildNormalDownloadUpdatesImpl(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* update_handler_map,
+ const sessions::NudgeTracker& nudge_tracker,
+ sync_pb::GetUpdatesMessage* get_updates) {
+ DCHECK(!proto_request_types.Empty());
+
+ InitDownloadUpdatesProgress(
+ proto_request_types,
+ update_handler_map,
+ get_updates);
+
// Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
get_updates->mutable_caller_info()->set_source(
nudge_tracker.updates_source());
@@ -186,18 +258,32 @@ void BuildDownloadUpdatesForConfigure(
sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
ModelTypeSet request_types,
sync_pb::ClientToServerMessage* client_to_server_message) {
- InitDownloadUpdatesRequest(
- session,
- create_mobile_bookmarks_folder,
- client_to_server_message,
- request_types);
- sync_pb::GetUpdatesMessage* get_updates =
- client_to_server_message->mutable_get_updates();
-
// Request updates for all enabled types.
DVLOG(1) << "Initial download for types "
<< ModelTypeSetToString(request_types);
- DCHECK(!request_types.Empty());
+
+ InitDownloadUpdatesContext(
+ session,
+ create_mobile_bookmarks_folder,
+ client_to_server_message);
+ BuildDownloadUpdatesForConfigureImpl(
+ Intersection(request_types, ProtocolTypes()),
+ session->context()->update_handler_map(),
+ source,
+ client_to_server_message->mutable_get_updates());
+}
+
+void BuildDownloadUpdatesForConfigureImpl(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* update_handler_map,
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
+ sync_pb::GetUpdatesMessage* get_updates) {
+ DCHECK(!proto_request_types.Empty());
+
+ InitDownloadUpdatesProgress(
+ proto_request_types,
+ update_handler_map,
+ get_updates);
// Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
get_updates->mutable_caller_info()->set_source(source);
@@ -213,17 +299,29 @@ void BuildDownloadUpdatesForPoll(
bool create_mobile_bookmarks_folder,
ModelTypeSet request_types,
sync_pb::ClientToServerMessage* client_to_server_message) {
- InitDownloadUpdatesRequest(
+ DVLOG(1) << "Polling for types "
+ << ModelTypeSetToString(request_types);
+
+ InitDownloadUpdatesContext(
session,
create_mobile_bookmarks_folder,
- client_to_server_message,
- request_types);
- sync_pb::GetUpdatesMessage* get_updates =
- client_to_server_message->mutable_get_updates();
+ client_to_server_message);
+ BuildDownloadUpdatesForPollImpl(
+ Intersection(request_types, ProtocolTypes()),
+ session->context()->update_handler_map(),
+ client_to_server_message->mutable_get_updates());
+}
- DVLOG(1) << "Polling for types "
- << ModelTypeSetToString(request_types);
- DCHECK(!request_types.Empty());
+void BuildDownloadUpdatesForPollImpl(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* update_handler_map,
+ sync_pb::GetUpdatesMessage* get_updates) {
+ DCHECK(!proto_request_types.Empty());
+
+ InitDownloadUpdatesProgress(
+ proto_request_types,
+ update_handler_map,
+ get_updates);
// Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
get_updates->mutable_caller_info()->set_source(
@@ -234,12 +332,18 @@ void BuildDownloadUpdatesForPoll(
}
SyncerError ExecuteDownloadUpdates(
+ ModelTypeSet request_types,
SyncSession* session,
sync_pb::ClientToServerMessage* msg) {
sync_pb::ClientToServerResponse update_response;
StatusController* status = session->mutable_status_controller();
bool need_encryption_key = ShouldRequestEncryptionKey(session->context());
+ if (session->context()->debug_info_getter()) {
+ sync_pb::DebugInfo* debug_info = msg->mutable_debug_info();
+ CopyClientDebugInfo(session->context()->debug_info_getter(), debug_info);
+ }
+
SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
msg,
&update_response,
@@ -249,32 +353,74 @@ SyncerError ExecuteDownloadUpdates(
update_response);
if (result != SYNCER_OK) {
- status->mutable_updates_response()->Clear();
LOG(ERROR) << "PostClientToServerMessage() failed during GetUpdates";
- } else {
- status->mutable_updates_response()->CopyFrom(update_response);
-
- DVLOG(1) << "GetUpdates "
- << " returned " << update_response.get_updates().entries_size()
- << " updates and indicated "
- << update_response.get_updates().changes_remaining()
- << " updates left on server.";
-
- if (need_encryption_key ||
- update_response.get_updates().encryption_keys_size() > 0) {
- syncable::Directory* dir = session->context()->directory();
- status->set_last_get_key_result(
- HandleGetEncryptionKeyResponse(update_response, dir));
- }
+ return result;
+ }
+
+ DVLOG(1) << "GetUpdates "
+ << " returned " << update_response.get_updates().entries_size()
+ << " updates and indicated "
+ << update_response.get_updates().changes_remaining()
+ << " updates left on server.";
+
+ if (session->context()->debug_info_getter()) {
+ // Clear debug info now that we have successfully sent it to the server.
+ DVLOG(1) << "Clearing client debug info.";
+ session->context()->debug_info_getter()->ClearDebugInfo();
+ }
+
+ if (need_encryption_key ||
+ update_response.get_updates().encryption_keys_size() > 0) {
+ syncable::Directory* dir = session->context()->directory();
+ status->set_last_get_key_result(
+ HandleGetEncryptionKeyResponse(update_response, dir));
+ }
+
+ const ModelTypeSet proto_request_types =
+ Intersection(request_types, ProtocolTypes());
+
+ return ProcessResponse(update_response.get_updates(),
+ proto_request_types,
+ session->context()->update_handler_map(),
+ status);
+}
+
+SyncerError ProcessResponse(
+ const sync_pb::GetUpdatesResponse& gu_response,
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* handler_map,
+ StatusController* status) {
+ status->increment_num_updates_downloaded_by(gu_response.entries_size());
+
+ // The changes remaining field is used to prevent the client from looping. If
+ // that field is being set incorrectly, we're in big trouble.
+ if (!gu_response.has_changes_remaining()) {
+ return SERVER_RESPONSE_VALIDATION_FAILED;
}
+ status->set_num_server_changes_remaining(gu_response.changes_remaining());
- ProcessUpdatesCommand process_updates;
- process_updates.Execute(session);
- StoreTimestampsCommand store_timestamps;
- store_timestamps.Execute(session);
+ if (!ProcessUpdateResponseContents(gu_response,
+ proto_request_types,
+ handler_map,
+ status)) {
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
+
+ if (gu_response.changes_remaining() == 0) {
+ return SYNCER_OK;
+ } else {
+ return SERVER_MORE_TO_DOWNLOAD;
+ }
+}
- return result;
+void CopyClientDebugInfo(
+ sessions::DebugInfoGetter* debug_info_getter,
+ sync_pb::DebugInfo* debug_info) {
+ DVLOG(1) << "Copying client debug info to send.";
+ debug_info_getter->GetDebugInfo(debug_info);
}
+} // namespace download
+
} // namespace syncer
diff --git a/chromium/sync/engine/download.h b/chromium/sync/engine/download.h
index ddae79b115b..5bc08d434e1 100644
--- a/chromium/sync/engine/download.h
+++ b/chromium/sync/engine/download.h
@@ -6,6 +6,7 @@
#define SYNC_ENGINE_DOWNLOAD_H_
#include "sync/base/sync_export.h"
+#include "sync/engine/sync_directory_update_handler.h"
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/util/syncer_error.h"
#include "sync/protocol/sync.pb.h"
@@ -17,11 +18,13 @@ class DebugInfo;
namespace syncer {
namespace sessions {
+class DebugInfoGetter;
class NudgeTracker;
+class StatusController;
class SyncSession;
} // namespace sessions
-class Syncer;
+namespace download {
// This function executes a single GetUpdate request and stores the response in
// the session's StatusController. It constructs the type of request used to
@@ -33,6 +36,13 @@ SYNC_EXPORT_PRIVATE void BuildNormalDownloadUpdates(
const sessions::NudgeTracker& nudge_tracker,
sync_pb::ClientToServerMessage* client_to_server_message);
+// Helper function. Defined here for testing.
+SYNC_EXPORT_PRIVATE void BuildNormalDownloadUpdatesImpl(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* update_handler_map,
+ const sessions::NudgeTracker& nudge_tracker,
+ sync_pb::GetUpdatesMessage* get_updates);
+
// This function executes a single GetUpdate request and stores the response in
// the session's StatusController. It constructs the type of request used to
// initialize a type for the first time.
@@ -43,6 +53,13 @@ SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForConfigure(
ModelTypeSet request_types,
sync_pb::ClientToServerMessage* client_to_server_message);
+// Helper function. Defined here for testing.
+SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForConfigureImpl(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* update_handler_map,
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
+ sync_pb::GetUpdatesMessage* get_updates);
+
// This function executes a single GetUpdate request and stores the response in
// the session's status controller. It constructs the type of request used for
// periodic polling.
@@ -52,12 +69,35 @@ SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForPoll(
ModelTypeSet request_types,
sync_pb::ClientToServerMessage* client_to_server_message);
+// Helper function. Defined here for testing.
+SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForPollImpl(
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* update_handler_map,
+ sync_pb::GetUpdatesMessage* get_updates);
+
// Sends the specified message to the server and stores the response in a member
// of the |session|'s StatusController.
SYNC_EXPORT_PRIVATE SyncerError
- ExecuteDownloadUpdates(sessions::SyncSession* session,
+ ExecuteDownloadUpdates(ModelTypeSet request_types,
+ sessions::SyncSession* session,
sync_pb::ClientToServerMessage* msg);
+// Helper function for processing responses from the server.
+// Defined here for testing.
+SYNC_EXPORT_PRIVATE SyncerError ProcessResponse(
+ const sync_pb::GetUpdatesResponse& gu_response,
+ ModelTypeSet proto_request_types,
+ UpdateHandlerMap* handler_map,
+ sessions::StatusController* status);
+
+// Helper function to copy client debug info from debug_info_getter to
+// debug_info. Defined here for testing.
+SYNC_EXPORT_PRIVATE void CopyClientDebugInfo(
+ sessions::DebugInfoGetter* debug_info_getter,
+ sync_pb::DebugInfo* debug_info);
+
+} // namespace download
+
} // namespace syncer
#endif // SYNC_ENGINE_DOWNLOAD_H_
diff --git a/chromium/sync/engine/download_unittest.cc b/chromium/sync/engine/download_unittest.cc
index fb4431a5c1f..eae627791ab 100644
--- a/chromium/sync/engine/download_unittest.cc
+++ b/chromium/sync/engine/download_unittest.cc
@@ -3,49 +3,102 @@
// found in the LICENSE file.
#include "sync/engine/download.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/stl_util.h"
+#include "sync/engine/sync_directory_update_handler.h"
#include "sync/internal_api/public/base/model_type_test_util.h"
#include "sync/protocol/sync.pb.h"
+#include "sync/sessions/debug_info_getter.h"
#include "sync/sessions/nudge_tracker.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/directory.h"
#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/sessions/mock_debug_info_getter.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
+using sessions::MockDebugInfoGetter;
+
// A test fixture for tests exercising download updates functions.
-class DownloadUpdatesTest : public SyncerCommandTest {
+class DownloadUpdatesTest : public ::testing::Test {
protected:
- DownloadUpdatesTest() {
+ DownloadUpdatesTest()
+ : update_handler_map_deleter_(&update_handler_map_) {
}
virtual void SetUp() {
- workers()->clear();
- mutable_routing_info()->clear();
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- (*mutable_routing_info())[AUTOFILL] = GROUP_DB;
- (*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
- (*mutable_routing_info())[PREFERENCES] = GROUP_UI;
- SyncerCommandTest::SetUp();
+ dir_maker_.SetUp();
+
+ AddUpdateHandler(AUTOFILL, GROUP_DB);
+ AddUpdateHandler(BOOKMARKS, GROUP_UI);
+ AddUpdateHandler(PREFERENCES, GROUP_UI);
+ }
+
+ virtual void TearDown() {
+ dir_maker_.TearDown();
+ }
+
+ ModelTypeSet proto_request_types() {
+ ModelTypeSet types;
+ for (UpdateHandlerMap::iterator it = update_handler_map_.begin();
+ it != update_handler_map_.end(); ++it) {
+ types.Put(it->first);
+ }
+ return types;
+ }
+
+ syncable::Directory* directory() {
+ return dir_maker_.directory();
+ }
+
+ UpdateHandlerMap* update_handler_map() {
+ return &update_handler_map_;
+ }
+
+ void InitFakeUpdateResponse(sync_pb::GetUpdatesResponse* response) {
+ ModelTypeSet types = proto_request_types();
+
+ for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
+ sync_pb::DataTypeProgressMarker* marker =
+ response->add_new_progress_marker();
+ marker->set_data_type_id(GetSpecificsFieldNumberFromModelType(it.Get()));
+ marker->set_token("foobarbaz");
+ }
+
+ response->set_changes_remaining(0);
}
private:
+ void AddUpdateHandler(ModelType type, ModelSafeGroup group) {
+ DCHECK(directory());
+ scoped_refptr<ModelSafeWorker> worker = new FakeModelWorker(group);
+ SyncDirectoryUpdateHandler* handler =
+ new SyncDirectoryUpdateHandler(directory(), type, worker);
+ update_handler_map_.insert(std::make_pair(type, handler));
+ }
+
+ base::MessageLoop loop_; // Needed for directory init.
+ TestDirectorySetterUpper dir_maker_;
+
+ UpdateHandlerMap update_handler_map_;
+ STLValueDeleter<UpdateHandlerMap> update_handler_map_deleter_;
+
DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesTest);
};
-TEST_F(DownloadUpdatesTest, ExecuteNoStates) {
+// Basic test to make sure nudges are expressed properly in the request.
+TEST_F(DownloadUpdatesTest, BookmarkNudge) {
sessions::NudgeTracker nudge_tracker;
nudge_tracker.RecordLocalChange(ModelTypeSet(BOOKMARKS));
- scoped_ptr<sessions::SyncSession> session(
- sessions::SyncSession::Build(context(), delegate()));
sync_pb::ClientToServerMessage msg;
- BuildNormalDownloadUpdates(session.get(),
- false,
- GetRoutingInfoTypes(routing_info()),
- nudge_tracker,
- &msg);
+ download::BuildNormalDownloadUpdatesImpl(proto_request_types(),
+ update_handler_map(),
+ nudge_tracker,
+ msg.mutable_get_updates());
const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::LOCAL,
@@ -54,7 +107,6 @@ TEST_F(DownloadUpdatesTest, ExecuteNoStates) {
for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
gu_msg.from_progress_marker(i).data_type_id());
- EXPECT_TRUE(GetRoutingInfoTypes(routing_info()).Has(type));
const sync_pb::DataTypeProgressMarker& progress_marker =
gu_msg.from_progress_marker(i);
@@ -76,7 +128,8 @@ TEST_F(DownloadUpdatesTest, ExecuteNoStates) {
}
}
-TEST_F(DownloadUpdatesTest, ExecuteWithStates) {
+// Basic test to ensure invalidation payloads are expressed in the request.
+TEST_F(DownloadUpdatesTest, NotifyMany) {
sessions::NudgeTracker nudge_tracker;
nudge_tracker.RecordRemoteInvalidation(
BuildInvalidationMap(AUTOFILL, 1, "autofill_payload"));
@@ -89,14 +142,11 @@ TEST_F(DownloadUpdatesTest, ExecuteWithStates) {
notified_types.Put(BOOKMARKS);
notified_types.Put(PREFERENCES);
- scoped_ptr<sessions::SyncSession> session(
- sessions::SyncSession::Build(context(), delegate()));
sync_pb::ClientToServerMessage msg;
- BuildNormalDownloadUpdates(session.get(),
- false,
- GetRoutingInfoTypes(routing_info()),
- nudge_tracker,
- &msg);
+ download::BuildNormalDownloadUpdatesImpl(proto_request_types(),
+ update_handler_map(),
+ nudge_tracker,
+ msg.mutable_get_updates());
const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
@@ -105,7 +155,6 @@ TEST_F(DownloadUpdatesTest, ExecuteWithStates) {
for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
gu_msg.from_progress_marker(i).data_type_id());
- EXPECT_TRUE(GetRoutingInfoTypes(routing_info()).Has(type));
const sync_pb::DataTypeProgressMarker& progress_marker =
gu_msg.from_progress_marker(i);
@@ -125,45 +174,133 @@ TEST_F(DownloadUpdatesTest, ExecuteWithStates) {
}
}
-// Test that debug info is sent uploaded only once per sync session.
-TEST_F(DownloadUpdatesTest, VerifyAppendDebugInfo) {
- // Start by expecting that no events are uploaded.
- sessions::NudgeTracker nudge_tracker;
- nudge_tracker.RecordLocalChange(ModelTypeSet(BOOKMARKS));
+TEST_F(DownloadUpdatesTest, ConfigureTest) {
+ sync_pb::ClientToServerMessage msg;
+ download::BuildDownloadUpdatesForConfigureImpl(
+ proto_request_types(),
+ update_handler_map(),
+ sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
+ msg.mutable_get_updates());
+
+ const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
+
+ EXPECT_EQ(sync_pb::SyncEnums::RECONFIGURATION, gu_msg.get_updates_origin());
+ EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
+ gu_msg.caller_info().source());
+
+ ModelTypeSet progress_types;
+ for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
+ syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
+ gu_msg.from_progress_marker(i).data_type_id());
+ progress_types.Put(type);
+ }
+ EXPECT_TRUE(proto_request_types().Equals(progress_types));
+}
+
+TEST_F(DownloadUpdatesTest, PollTest) {
+ sync_pb::ClientToServerMessage msg;
+ download::BuildDownloadUpdatesForPollImpl(
+ proto_request_types(),
+ update_handler_map(),
+ msg.mutable_get_updates());
+
+ const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
+
+ EXPECT_EQ(sync_pb::SyncEnums::PERIODIC, gu_msg.get_updates_origin());
+ EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::PERIODIC,
+ gu_msg.caller_info().source());
+
+ ModelTypeSet progress_types;
+ for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
+ syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
+ gu_msg.from_progress_marker(i).data_type_id());
+ progress_types.Put(type);
+ }
+ EXPECT_TRUE(proto_request_types().Equals(progress_types));
+}
+
+// Verify that a bogus response message is detected.
+TEST_F(DownloadUpdatesTest, InvalidResponse) {
+ sync_pb::GetUpdatesResponse gu_response;
+ InitFakeUpdateResponse(&gu_response);
+
+ // This field is essential for making the client stop looping. If it's unset
+ // then something is very wrong. The client should detect this.
+ gu_response.clear_changes_remaining();
+
+ sessions::StatusController status;
+ SyncerError error = download::ProcessResponse(gu_response,
+ proto_request_types(),
+ update_handler_map(),
+ &status);
+ EXPECT_EQ(error, SERVER_RESPONSE_VALIDATION_FAILED);
+}
+
+// Verify that we correctly detect when there's more work to be done.
+TEST_F(DownloadUpdatesTest, MoreToDownloadResponse) {
+ sync_pb::GetUpdatesResponse gu_response;
+ InitFakeUpdateResponse(&gu_response);
+ gu_response.set_changes_remaining(1);
+
+ sessions::StatusController status;
+ SyncerError error = download::ProcessResponse(gu_response,
+ proto_request_types(),
+ update_handler_map(),
+ &status);
+ EXPECT_EQ(error, SERVER_MORE_TO_DOWNLOAD);
+}
+
+// A simple scenario: No updates returned and nothing more to download.
+TEST_F(DownloadUpdatesTest, NormalResponseTest) {
+ sync_pb::GetUpdatesResponse gu_response;
+ InitFakeUpdateResponse(&gu_response);
+ gu_response.set_changes_remaining(0);
+
+ sessions::StatusController status;
+ SyncerError error = download::ProcessResponse(gu_response,
+ proto_request_types(),
+ update_handler_map(),
+ &status);
+ EXPECT_EQ(error, SYNCER_OK);
+}
+
+class DownloadUpdatesDebugInfoTest : public ::testing::Test {
+ public:
+ DownloadUpdatesDebugInfoTest() {}
+ virtual ~DownloadUpdatesDebugInfoTest() {}
+
+ sessions::StatusController* status() {
+ return &status_;
+ }
+
+ sessions::DebugInfoGetter* debug_info_getter() {
+ return &debug_info_getter_;
+ }
+
+ void AddDebugEvent() {
+ debug_info_getter_.AddDebugEvent();
+ }
+
+ private:
+ sessions::StatusController status_;
+ MockDebugInfoGetter debug_info_getter_;
+};
+
+
+// Verify CopyClientDebugInfo when there are no events to upload.
+TEST_F(DownloadUpdatesDebugInfoTest, VerifyCopyClientDebugInfo_Empty) {
+ sync_pb::DebugInfo debug_info;
+ download::CopyClientDebugInfo(debug_info_getter(), &debug_info);
+ EXPECT_EQ(0, debug_info.events_size());
+}
- sync_pb::ClientToServerMessage msg1;
- scoped_ptr<sessions::SyncSession> session1(
- sessions::SyncSession::Build(context(), delegate()));
- BuildNormalDownloadUpdates(session1.get(),
- false,
- GetRoutingInfoTypes(routing_info()),
- nudge_tracker,
- &msg1);
- EXPECT_EQ(0, msg1.debug_info().events_size());
-
- // Create a new session, record an event, and try again.
- scoped_ptr<sessions::SyncSession> session2(
- sessions::SyncSession::Build(context(), delegate()));
- DataTypeConfigurationStats stats;
- stats.model_type = BOOKMARKS;
- debug_info_event_listener()->OnDataTypeConfigureComplete(
- std::vector<DataTypeConfigurationStats>(1, stats));
- sync_pb::ClientToServerMessage msg2;
- BuildNormalDownloadUpdates(session2.get(),
- false,
- GetRoutingInfoTypes(routing_info()),
- nudge_tracker,
- &msg2);
- EXPECT_EQ(1, msg2.debug_info().events_size());
-
- // Events should never be sent up more than once per session.
- sync_pb::ClientToServerMessage msg3;
- BuildNormalDownloadUpdates(session2.get(),
- false,
- GetRoutingInfoTypes(routing_info()),
- nudge_tracker,
- &msg3);
- EXPECT_EQ(0, msg3.debug_info().events_size());
+TEST_F(DownloadUpdatesDebugInfoTest, VerifyCopyOverwrites) {
+ sync_pb::DebugInfo debug_info;
+ AddDebugEvent();
+ download::CopyClientDebugInfo(debug_info_getter(), &debug_info);
+ EXPECT_EQ(1, debug_info.events_size());
+ download::CopyClientDebugInfo(debug_info_getter(), &debug_info);
+ EXPECT_EQ(1, debug_info.events_size());
}
} // namespace syncer
diff --git a/chromium/sync/engine/get_commit_ids.cc b/chromium/sync/engine/get_commit_ids.cc
index cc437ce7d2f..42faf21d435 100644
--- a/chromium/sync/engine/get_commit_ids.cc
+++ b/chromium/sync/engine/get_commit_ids.cc
@@ -73,9 +73,8 @@ void GetCommitIdsForType(
// We filter out all unready entries from the set of unsynced handles. This
// new set of ready and unsynced items is then what we use to determine what
- // is a candidate for commit. The caller of this SyncerCommand is responsible
- // for ensuring that no throttled types are included among the
- // requested_types.
+ // is a candidate for commit. The caller is responsible for ensuring that no
+ // throttled types are included among the requested_types.
FilterUnreadyEntries(trans,
ModelTypeSet(type),
encrypted_types,
@@ -506,25 +505,4 @@ void OrderCommitIds(
} // namespace
-void GetCommitIds(
- syncable::BaseTransaction* trans,
- ModelTypeSet requested_types,
- size_t commit_batch_size,
- sessions::OrderedCommitSet* ordered_commit_set) {
- for (ModelTypeSet::Iterator it = requested_types.First();
- it.Good(); it.Inc()) {
- DCHECK_LE(ordered_commit_set->Size(), commit_batch_size);
- if (ordered_commit_set->Size() >= commit_batch_size)
- break;
- size_t space_remaining = commit_batch_size - ordered_commit_set->Size();
- syncable::Directory::Metahandles out;
- GetCommitIdsForType(
- trans,
- it.Get(),
- space_remaining,
- &out);
- ordered_commit_set->AddCommitItems(out, it.Get());
- }
-}
-
} // namespace syncer
diff --git a/chromium/sync/engine/get_commit_ids.h b/chromium/sync/engine/get_commit_ids.h
index 557853f36df..b435848e349 100644
--- a/chromium/sync/engine/get_commit_ids.h
+++ b/chromium/sync/engine/get_commit_ids.h
@@ -15,15 +15,14 @@ using std::vector;
namespace syncer {
-namespace sessions {
-class OrderedCommitSet;
-}
-
namespace syncable {
class BaseTransaction;
}
-// These functions return handles in "commit order". A valid commit ordering is
+// Returns up to |max_entries| metahandles of entries that belong to the
+// specified |type| and are ready for commit.
+//
+// This function returns handles in "commit order". A valid commit ordering is
// one where parents are placed before children, predecessors are placed before
// successors, and deletes appear after creates and moves.
//
@@ -32,25 +31,12 @@ class BaseTransaction;
// system can handle receiving the elements within a folder out of order, so we
// may be able to remove that functionality in the future.
// See crbug.com/287938.
-
-// Returns up to |max_entries| metahandles of entries that belong to the
-// specified |type| and are ready for commit. The returned handles will be
-// in a valid commit ordering.
SYNC_EXPORT_PRIVATE void GetCommitIdsForType(
syncable::BaseTransaction* trans,
ModelType type,
size_t max_entries,
std::vector<int64>* out);
-// Fills the specified |ordered_commit_set| with up to |commit_batch_size|
-// metahandles that belong to the set of types |requested_types| and are ready
-// for commit. The list will be in a valid commit ordering.
-SYNC_EXPORT_PRIVATE void GetCommitIds(
- syncable::BaseTransaction* trans,
- ModelTypeSet requested_types,
- size_t commit_batch_size,
- sessions::OrderedCommitSet* ordered_commit_set);
-
} // namespace syncer
#endif // SYNC_ENGINE_GET_COMMIT_IDS_H_
diff --git a/chromium/sync/engine/model_changing_syncer_command.cc b/chromium/sync/engine/model_changing_syncer_command.cc
deleted file mode 100644
index a79362645ff..00000000000
--- a/chromium/sync/engine/model_changing_syncer_command.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/model_changing_syncer_command.h"
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/sessions/sync_session.h"
-
-namespace syncer {
-
-SyncerError ModelChangingSyncerCommand::ExecuteImpl(
- sessions::SyncSession* session) {
- work_session_ = session;
- SyncerError result = SYNCER_OK;
-
- const std::set<ModelSafeGroup>& groups_to_change =
- GetGroupsToChange(*work_session_);
- for (size_t i = 0; i < session->context()->workers().size(); ++i) {
- ModelSafeWorker* worker = session->context()->workers()[i].get();
- ModelSafeGroup group = worker->GetModelSafeGroup();
- // Skip workers whose group isn't active.
- if (groups_to_change.count(group) == 0u) {
- DVLOG(2) << "Skipping worker for group "
- << ModelSafeGroupToString(group);
- continue;
- }
-
- sessions::StatusController* status =
- work_session_->mutable_status_controller();
- sessions::ScopedModelSafeGroupRestriction r(status, group);
- WorkCallback c = base::Bind(
- &ModelChangingSyncerCommand::StartChangingModel,
- // We wait until the callback is executed. So it is safe to use
- // unretained.
- base::Unretained(this));
-
- SyncerError this_worker_result = worker->DoWorkAndWaitUntilDone(c);
- // TODO(rlarocque): Figure out a better way to deal with errors from
- // multiple models at once. See also: crbug.com/109422.
- if (this_worker_result != SYNCER_OK)
- result = this_worker_result;
- }
-
- return result;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/model_changing_syncer_command.h b/chromium/sync/engine/model_changing_syncer_command.h
deleted file mode 100644
index f9ffe3722be..00000000000
--- a/chromium/sync/engine/model_changing_syncer_command.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
-#define SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/syncer_command.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-
-namespace syncer {
-namespace sessions {
-class SyncSession;
-}
-
-// An abstract SyncerCommand which dispatches its Execute step to the
-// model-safe worker thread. Classes derived from ModelChangingSyncerCommand
-// instead of SyncerCommand must implement ModelChangingExecuteImpl instead of
-// ExecuteImpl, but otherwise, the contract is the same.
-//
-// A command should derive from ModelChangingSyncerCommand instead of
-// SyncerCommand whenever the operation might change any client-visible
-// fields on any syncable::Entry. If the operation involves creating a
-// WriteTransaction, this is a sign that ModelChangingSyncerCommand is likely
-// necessary.
-class SYNC_EXPORT_PRIVATE ModelChangingSyncerCommand : public SyncerCommand {
- public:
- ModelChangingSyncerCommand() : work_session_(NULL) { }
- virtual ~ModelChangingSyncerCommand() { }
-
- // SyncerCommand implementation. Sets work_session to session.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
-
- // Wrapper so implementations don't worry about storing work_session.
- SyncerError StartChangingModel() {
- return ModelChangingExecuteImpl(work_session_);
- }
-
- std::set<ModelSafeGroup> GetGroupsToChangeForTest(
- const sessions::SyncSession& session) const {
- return GetGroupsToChange(session);
- }
-
- protected:
- // This should return the set of groups in |session| that need to be
- // changed. The returned set should be a subset of
- // session.GetEnabledGroups(). Subclasses can guarantee this either
- // by calling one of the session.GetEnabledGroups*() functions and
- // filtering that, or using GetGroupForModelType() (which handles
- // top-level/unspecified nodes) to project from model types to
- // groups.
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const = 0;
-
- // Abstract method to be implemented by subclasses to handle logic that
- // operates on the model. This is invoked with a SyncSession ModelSafeGroup
- // restriction in place so that bits of state belonging to data types
- // running on an unsafe thread are siloed away.
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) = 0;
-
- private:
- // ExecuteImpl is expected to be run by SyncerCommand to set work_session.
- // StartChangingModel is called to start this command running.
- // Implementations will implement ModelChangingExecuteImpl and not
- // worry about storing the session or setting it. They are given work_session.
- sessions::SyncSession* work_session_;
-
- DISALLOW_COPY_AND_ASSIGN(ModelChangingSyncerCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
diff --git a/chromium/sync/engine/model_changing_syncer_command_unittest.cc b/chromium/sync/engine/model_changing_syncer_command_unittest.cc
deleted file mode 100644
index 71345eff777..00000000000
--- a/chromium/sync/engine/model_changing_syncer_command_unittest.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "sync/engine/model_changing_syncer_command.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class FakeModelChangingSyncerCommand : public ModelChangingSyncerCommand {
- public:
- FakeModelChangingSyncerCommand() {}
- virtual ~FakeModelChangingSyncerCommand() {}
-
- const std::set<ModelSafeGroup>& changed_groups() const {
- return changed_groups_;
- }
-
- protected:
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const OVERRIDE {
- // This command doesn't actually make changes, so the empty set might be an
- // appropriate response. That would not be very interesting, so instead we
- // return the set of groups with active types.
- std::set<ModelSafeGroup> enabled_groups;
- const ModelSafeRoutingInfo& routing_info =
- session.context()->routing_info();
- for (ModelSafeRoutingInfo::const_iterator it = routing_info.begin();
- it != routing_info.end(); ++it) {
- enabled_groups.insert(it->second);
- }
- enabled_groups.insert(GROUP_PASSIVE);
- return enabled_groups;
- }
-
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) OVERRIDE {
- changed_groups_.insert(session->status_controller().group_restriction());
- return SYNCER_OK;
- }
-
- private:
- std::set<ModelSafeGroup> changed_groups_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeModelChangingSyncerCommand);
-};
-
-class ModelChangingSyncerCommandTest : public SyncerCommandTest {
- protected:
- ModelChangingSyncerCommandTest() {}
- virtual ~ModelChangingSyncerCommandTest() {}
-
- virtual void SetUp() {
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_PASSWORD)));
- (*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
- (*mutable_routing_info())[PASSWORDS] = GROUP_PASSWORD;
- SyncerCommandTest::SetUp();
- }
-
- FakeModelChangingSyncerCommand command_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ModelChangingSyncerCommandTest);
-};
-
-TEST_F(ModelChangingSyncerCommandTest, Basic) {
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_PASSWORD, GROUP_PASSIVE);
- EXPECT_TRUE(command_.changed_groups().empty());
- command_.ExecuteImpl(session());
- EXPECT_EQ(command_.GetGroupsToChangeForTest(*session()),
- command_.changed_groups());
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/engine/net/server_connection_manager.cc b/chromium/sync/engine/net/server_connection_manager.cc
index 99e62b0659e..2cb781098a1 100644
--- a/chromium/sync/engine/net/server_connection_manager.cc
+++ b/chromium/sync/engine/net/server_connection_manager.cc
@@ -178,12 +178,10 @@ ServerConnectionManager::ServerConnectionManager(
const string& server,
int port,
bool use_ssl,
- bool use_oauth2_token,
CancelationSignal* cancelation_signal)
: sync_server_(server),
sync_server_port_(port),
use_ssl_(use_ssl),
- use_oauth2_token_(use_oauth2_token),
proto_sync_path_(kSyncServerSyncPath),
server_status_(HttpResponse::NONE),
terminated_(false),
@@ -235,6 +233,14 @@ bool ServerConnectionManager::SetAuthToken(const std::string& auth_token) {
previously_invalidated_token = std::string();
return true;
}
+
+ // This could happen in case like server outage/bug. E.g. token returned by
+ // first request is considered invalid by sync server and because
+ // of token server's caching policy, etc, same token is returned on second
+ // request. Need to notify sync frontend again to request new token,
+ // otherwise backend will stay in SYNC_AUTH_ERROR state while frontend thinks
+ // everything is fine and takes no actions.
+ SetServerStatus(HttpResponse::SYNC_AUTH_ERROR);
return false;
}
@@ -254,8 +260,12 @@ void ServerConnectionManager::InvalidateAndClearAuthToken() {
void ServerConnectionManager::SetServerStatus(
HttpResponse::ServerConnectionCode server_status) {
- if (server_status_ == server_status)
+ // SYNC_AUTH_ERROR is permanent error. Need to notify observer to take
+ // action externally to resolve.
+ if (server_status != HttpResponse::SYNC_AUTH_ERROR &&
+ server_status_ == server_status) {
return;
+ }
server_status_ = server_status;
NotifyStatusChanged();
}
@@ -287,6 +297,8 @@ bool ServerConnectionManager::PostBufferToPath(PostBufferParams* params,
// to clean it.
if (auth_token.empty() || auth_token == "credentials_lost") {
params->response.server_status = HttpResponse::SYNC_AUTH_ERROR;
+ // Print a log to distinguish this "known failure" from others.
+ LOG(WARNING) << "ServerConnectionManager forcing SYNC_AUTH_ERROR";
return false;
}
diff --git a/chromium/sync/engine/net/server_connection_manager.h b/chromium/sync/engine/net/server_connection_manager.h
index 7496a5338b2..e6a48f0e659 100644
--- a/chromium/sync/engine/net/server_connection_manager.h
+++ b/chromium/sync/engine/net/server_connection_manager.h
@@ -81,9 +81,6 @@ struct SYNC_EXPORT_PRIVATE HttpResponse {
// The size of a download request's payload.
int64 payload_length;
- // Value of the Update-Client-Auth header.
- std::string update_client_auth_header;
-
// Identifies the type of failure, if any.
ServerConnectionCode server_status;
@@ -167,12 +164,10 @@ class SYNC_EXPORT_PRIVATE ServerConnectionManager : public CancelationObserver {
void GetServerParams(std::string* server,
int* server_port,
- bool* use_ssl,
- bool* use_oauth2_token) const {
+ bool* use_ssl) const {
server->assign(scm_->sync_server_);
*server_port = scm_->sync_server_port_;
*use_ssl = scm_->use_ssl_;
- *use_oauth2_token = scm_->use_oauth2_token_;
}
std::string buffer_;
@@ -186,7 +181,6 @@ class SYNC_EXPORT_PRIVATE ServerConnectionManager : public CancelationObserver {
ServerConnectionManager(const std::string& server,
int port,
bool use_ssl,
- bool use_oauth2_token,
CancelationSignal* cancelation_signal);
virtual ~ServerConnectionManager();
@@ -292,11 +286,6 @@ class SYNC_EXPORT_PRIVATE ServerConnectionManager : public CancelationObserver {
// Indicates whether or not requests should be made using HTTPS.
bool use_ssl_;
- // Indicates if token should be handled as OAuth2 token. Connection should set
- // auth header appropriately.
- // TODO(pavely): Remove once sync on android switches to oauth2 tokens.
- bool use_oauth2_token_;
-
// The paths we post to.
std::string proto_sync_path_;
diff --git a/chromium/sync/engine/process_commit_response_command.h b/chromium/sync/engine/process_commit_response_command.h
deleted file mode 100644
index 72b5963438e..00000000000
--- a/chromium/sync/engine/process_commit_response_command.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
-#define SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
-
-#include <set>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/model_changing_syncer_command.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-namespace sessions {
-class OrderedCommitSet;
-}
-
-namespace syncable {
-class Id;
-class WriteTransaction;
-class MutableEntry;
-class Directory;
-}
-
-// A class that processes the server's response to our commmit attempt. Note
-// that some of the preliminary processing is performed in
-// PostClientToServerMessage command.
-//
-// As part of processing the commit response, this command will modify sync
-// entries. It can rename items, update their versions, etc.
-//
-// This command will return a non-SYNCER_OK value if an error occurred while
-// processing the response, or if the server's response indicates that it had
-// trouble processing the request.
-//
-// See SyncerCommand documentation for more info.
-class SYNC_EXPORT_PRIVATE ProcessCommitResponseCommand
- : public ModelChangingSyncerCommand {
- public:
-
- // The commit_set parameter contains references to all the items which were
- // to be committed in this batch.
- //
- // The commmit_message parameter contains the message that was sent to the
- // server.
- //
- // The commit_response parameter contains the response received from the
- // server. This may be uninitialized if we were unable to contact the server
- // or a serious error was encountered.
- ProcessCommitResponseCommand(
- const sessions::OrderedCommitSet& commit_set,
- const sync_pb::ClientToServerMessage& commit_message,
- const sync_pb::ClientToServerResponse& commit_response);
- virtual ~ProcessCommitResponseCommand();
-
- protected:
- // ModelChangingSyncerCommand implementation.
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const OVERRIDE;
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) OVERRIDE;
-
- private:
- sync_pb::CommitResponse::ResponseType ProcessSingleCommitResponse(
- syncable::WriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& pb_commit_response,
- const sync_pb::SyncEntity& pb_committed_entry,
- int64 metahandle,
- std::set<syncable::Id>* deleted_folders);
-
- void ProcessSuccessfulCommitResponse(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
- bool syncing_was_set, std::set<syncable::Id>* deleted_folders);
-
- // Update the BASE_VERSION and SERVER_VERSION, post-commit.
- // Helper for ProcessSuccessfulCommitResponse.
- bool UpdateVersionAfterCommit(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry);
-
- // If the server generated an ID for us during a commit, apply the new ID.
- // Helper for ProcessSuccessfulCommitResponse.
- bool ChangeIdAfterCommit(
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry);
-
- // Update the SERVER_ fields to reflect the server state after committing.
- // Helper for ProcessSuccessfulCommitResponse.
- void UpdateServerFieldsAfterCommit(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- syncable::MutableEntry* local_entry);
-
- // Helper to extract the final name from the protobufs.
- const std::string& GetResultingPostCommitName(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response);
-
- // Helper to clean up in case of failure.
- void ClearSyncingBits(
- syncable::Directory *dir,
- const std::vector<syncable::Id>& commit_ids);
-
- const sessions::OrderedCommitSet& commit_set_;
- const sync_pb::ClientToServerMessage& commit_message_;
- const sync_pb::ClientToServerResponse& commit_response_;
-
- DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
diff --git a/chromium/sync/engine/process_commit_response_command_unittest.cc b/chromium/sync/engine/process_commit_response_command_unittest.cc
deleted file mode 100644
index de2f5ec47a0..00000000000
--- a/chromium/sync/engine/process_commit_response_command_unittest.cc
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/process_commit_response_command.h"
-
-#include <vector>
-
-#include "base/location.h"
-#include "base/strings/stringprintf.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using std::string;
-using sync_pb::ClientToServerMessage;
-using sync_pb::CommitResponse;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using syncable::BASE_VERSION;
-using syncable::Entry;
-using syncable::ID;
-using syncable::IS_DIR;
-using syncable::IS_UNSYNCED;
-using syncable::Id;
-using syncable::MutableEntry;
-using syncable::NON_UNIQUE_NAME;
-using syncable::UNIQUE_POSITION;
-using syncable::UNITTEST;
-using syncable::WriteTransaction;
-
-// A test fixture for tests exercising ProcessCommitResponseCommand.
-class ProcessCommitResponseCommandTest : public SyncerCommandTest {
- public:
- virtual void SetUp() {
- workers()->clear();
- mutable_routing_info()->clear();
-
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- (*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
- (*mutable_routing_info())[PREFERENCES] = GROUP_UI;
- (*mutable_routing_info())[AUTOFILL] = GROUP_DB;
-
- SyncerCommandTest::SetUp();
-
- test_entry_factory_.reset(new TestEntryFactory(directory()));
- }
-
- protected:
-
- ProcessCommitResponseCommandTest()
- : next_new_revision_(4000),
- next_server_position_(10000) {
- }
-
- void CheckEntry(Entry* e, const std::string& name,
- ModelType model_type, const Id& parent_id) {
- EXPECT_TRUE(e->good());
- ASSERT_EQ(name, e->GetNonUniqueName());
- ASSERT_EQ(model_type, e->GetModelType());
- ASSERT_EQ(parent_id, e->GetParentId());
- ASSERT_LT(0, e->GetBaseVersion())
- << "Item should have a valid (positive) server base revision";
- }
-
- // Create a new unsynced item in the database, and synthesize a commit record
- // and a commit response for it in the syncer session. If item_id is a local
- // ID, the item will be a create operation. Otherwise, it will be an edit.
- // Returns the metahandle of the newly created item.
- int CreateUnprocessedCommitResult(
- const Id& item_id,
- const Id& parent_id,
- const string& name,
- bool is_folder,
- ModelType model_type,
- sessions::OrderedCommitSet *commit_set,
- sync_pb::ClientToServerMessage *commit,
- sync_pb::ClientToServerResponse *response) {
- int64 metahandle = 0;
- test_entry_factory_->CreateUnsyncedItem(item_id, parent_id, name,
- is_folder, model_type, &metahandle);
-
- // ProcessCommitResponseCommand consumes commit_ids from the session
- // state, so we need to update that. O(n^2) because it's a test.
- commit_set->AddCommitItem(metahandle, model_type);
-
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
- EXPECT_TRUE(entry.good());
- entry.PutSyncing(true);
-
- // Add to the commit message.
- // TODO(sync): Use the real commit-building code to construct this.
- commit->set_message_contents(ClientToServerMessage::COMMIT);
- sync_pb::SyncEntity* entity = commit->mutable_commit()->add_entries();
- entity->set_non_unique_name(entry.GetNonUniqueName());
- entity->set_folder(entry.GetIsDir());
- entity->set_parent_id_string(
- SyncableIdToProto(entry.GetParentId()));
- entity->set_version(entry.GetBaseVersion());
- entity->mutable_specifics()->CopyFrom(entry.GetSpecifics());
- entity->set_id_string(SyncableIdToProto(item_id));
-
- if (!entry.GetUniqueClientTag().empty()) {
- entity->set_client_defined_unique_tag(
- entry.GetUniqueClientTag());
- }
-
- // Add to the response message.
- response->set_error_code(sync_pb::SyncEnums::SUCCESS);
- sync_pb::CommitResponse_EntryResponse* entry_response =
- response->mutable_commit()->add_entryresponse();
- entry_response->set_response_type(CommitResponse::SUCCESS);
- entry_response->set_name("Garbage.");
- entry_response->set_non_unique_name(entity->name());
- if (item_id.ServerKnows())
- entry_response->set_id_string(entity->id_string());
- else
- entry_response->set_id_string(id_factory_.NewServerId().GetServerId());
- entry_response->set_version(next_new_revision_++);
-
- // If the ID of our parent item committed earlier in the batch was
- // rewritten, rewrite it in the entry response. This matches
- // the server behavior.
- entry_response->set_parent_id_string(entity->parent_id_string());
- for (int i = 0; i < commit->commit().entries_size(); ++i) {
- if (commit->commit().entries(i).id_string() ==
- entity->parent_id_string()) {
- entry_response->set_parent_id_string(
- response->commit().entryresponse(i).id_string());
- }
- }
-
- return metahandle;
- }
-
- void SetLastErrorCode(sync_pb::CommitResponse::ResponseType error_code,
- sync_pb::ClientToServerResponse* response) {
- sync_pb::CommitResponse_EntryResponse* entry_response =
- response->mutable_commit()->mutable_entryresponse(
- response->mutable_commit()->entryresponse_size() - 1);
- entry_response->set_response_type(error_code);
- }
-
- TestIdFactory id_factory_;
- scoped_ptr<TestEntryFactory> test_entry_factory_;
- private:
- int64 next_new_revision_;
- int64 next_server_position_;
- DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommandTest);
-};
-
-TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
- sessions::OrderedCommitSet commit_set(session()->context()->routing_info());
- sync_pb::ClientToServerMessage request;
- sync_pb::ClientToServerResponse response;
-
- Id bookmark_folder_id = id_factory_.NewLocalId();
- int bookmark_folder_handle = CreateUnprocessedCommitResult(
- bookmark_folder_id, id_factory_.root(), "A bookmark folder", true,
- BOOKMARKS, &commit_set, &request, &response);
- int bookmark1_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), bookmark_folder_id, "bookmark 1", false,
- BOOKMARKS, &commit_set, &request, &response);
- int bookmark2_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), bookmark_folder_id, "bookmark 2", false,
- BOOKMARKS, &commit_set, &request, &response);
- int pref1_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Pref 1", false,
- PREFERENCES, &commit_set, &request, &response);
- int pref2_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Pref 2", false,
- PREFERENCES, &commit_set, &request, &response);
- int autofill1_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Autofill 1", false,
- AUTOFILL, &commit_set, &request, &response);
- int autofill2_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Autofill 2", false,
- AUTOFILL, &commit_set, &request, &response);
-
- ProcessCommitResponseCommand command(commit_set, request, response);
- ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
- command.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry b_folder(&trans, syncable::GET_BY_HANDLE, bookmark_folder_handle);
- ASSERT_TRUE(b_folder.good());
-
- Id new_fid = b_folder.GetId();
- ASSERT_FALSE(new_fid.IsRoot());
- EXPECT_TRUE(new_fid.ServerKnows());
- EXPECT_FALSE(bookmark_folder_id.ServerKnows());
- EXPECT_FALSE(new_fid == bookmark_folder_id);
-
- ASSERT_EQ("A bookmark folder", b_folder.GetNonUniqueName())
- << "Name of bookmark folder should not change.";
- ASSERT_LT(0, b_folder.GetBaseVersion())
- << "Bookmark folder should have a valid (positive) server base revision";
-
- // Look at the two bookmarks in bookmark_folder.
- Entry b1(&trans, syncable::GET_BY_HANDLE, bookmark1_handle);
- Entry b2(&trans, syncable::GET_BY_HANDLE, bookmark2_handle);
- CheckEntry(&b1, "bookmark 1", BOOKMARKS, new_fid);
- CheckEntry(&b2, "bookmark 2", BOOKMARKS, new_fid);
-
- // Look at the prefs and autofill items.
- Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
- Entry p2(&trans, syncable::GET_BY_HANDLE, pref2_handle);
- CheckEntry(&p1, "Pref 1", PREFERENCES, id_factory_.root());
- CheckEntry(&p2, "Pref 2", PREFERENCES, id_factory_.root());
-
- Entry a1(&trans, syncable::GET_BY_HANDLE, autofill1_handle);
- Entry a2(&trans, syncable::GET_BY_HANDLE, autofill2_handle);
- CheckEntry(&a1, "Autofill 1", AUTOFILL, id_factory_.root());
- CheckEntry(&a2, "Autofill 2", AUTOFILL, id_factory_.root());
-}
-
-// In this test, we test processing a commit response for a commit batch that
-// includes a newly created folder and some (but not all) of its children.
-// In particular, the folder has 50 children, which alternate between being
-// new items and preexisting items. This mixture of new and old is meant to
-// be a torture test of the code in ProcessCommitResponseCommand that changes
-// an item's ID from a local ID to a server-generated ID on the first commit.
-// We commit only the first 25 children in the sibling order, leaving the
-// second 25 children as unsynced items. http://crbug.com/33081 describes
-// how this scenario used to fail, reversing the order for the second half
-// of the children.
-TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
- sessions::OrderedCommitSet commit_set(session()->context()->routing_info());
- sync_pb::ClientToServerMessage request;
- sync_pb::ClientToServerResponse response;
-
- // Create the parent folder, a new item whose ID will change on commit.
- Id folder_id = id_factory_.NewLocalId();
- CreateUnprocessedCommitResult(folder_id, id_factory_.root(),
- "A", true, BOOKMARKS,
- &commit_set, &request, &response);
-
- // Verify that the item is reachable.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
- ASSERT_TRUE(root.good());
- Id child_id = root.GetFirstChildId();
- ASSERT_EQ(folder_id, child_id);
- }
-
- // The first 25 children of the parent folder will be part of the commit
- // batch. They will be placed left to right in order of creation.
- int batch_size = 25;
- int i = 0;
- Id prev_id = TestIdFactory::root();
- for (; i < batch_size; ++i) {
- // Alternate between new and old child items, just for kicks.
- Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
- int64 handle = CreateUnprocessedCommitResult(
- id, folder_id, base::StringPrintf("Item %d", i), false,
- BOOKMARKS, &commit_set, &request, &response);
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- e.PutPredecessor(prev_id);
- }
- prev_id = id;
- }
- // The second 25 children will be unsynced items but NOT part of the commit
- // batch. When the ID of the parent folder changes during the commit,
- // these items PARENT_ID should be updated, and their ordering should be
- // preserved.
- for (; i < 2*batch_size; ++i) {
- // Alternate between new and old child items, just for kicks.
- Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
- int64 handle = -1;
- test_entry_factory_->CreateUnsyncedItem(
- id, folder_id, base::StringPrintf("Item %d", i),
- false, BOOKMARKS, &handle);
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- e.PutPredecessor(prev_id);
- }
- prev_id = id;
- }
-
- // Process the commit response for the parent folder and the first
- // 25 items. This should apply the values indicated by
- // each CommitResponse_EntryResponse to the syncable Entries. All new
- // items in the commit batch should have their IDs changed to server IDs.
- ProcessCommitResponseCommand command(commit_set, request, response);
- ExpectGroupToChange(command, GROUP_UI);
- command.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- // Lookup the parent folder by finding a child of the root. We can't use
- // folder_id here, because it changed during the commit.
- syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
- ASSERT_TRUE(root.good());
- Id new_fid = root.GetFirstChildId();
- ASSERT_FALSE(new_fid.IsRoot());
- EXPECT_TRUE(new_fid.ServerKnows());
- EXPECT_FALSE(folder_id.ServerKnows());
- EXPECT_TRUE(new_fid != folder_id);
- Entry parent(&trans, syncable::GET_BY_ID, new_fid);
- ASSERT_TRUE(parent.good());
- ASSERT_EQ("A", parent.GetNonUniqueName())
- << "Name of parent folder should not change.";
- ASSERT_LT(0, parent.GetBaseVersion())
- << "Parent should have a valid (positive) server base revision";
-
- Id cid = parent.GetFirstChildId();
-
- int child_count = 0;
- // Now loop over all the children of the parent folder, verifying
- // that they are in their original order by checking to see that their
- // names are still sequential.
- while (!cid.IsRoot()) {
- SCOPED_TRACE(::testing::Message("Examining item #") << child_count);
- Entry c(&trans, syncable::GET_BY_ID, cid);
- DCHECK(c.good());
- ASSERT_EQ(base::StringPrintf("Item %d", child_count),
- c.GetNonUniqueName());
- ASSERT_EQ(new_fid, c.GetParentId());
- if (child_count < batch_size) {
- ASSERT_FALSE(c.GetIsUnsynced()) << "Item should be committed";
- ASSERT_TRUE(cid.ServerKnows());
- ASSERT_LT(0, c.GetBaseVersion());
- } else {
- ASSERT_TRUE(c.GetIsUnsynced()) << "Item should be uncommitted";
- // We alternated between creates and edits; double check that these items
- // have been preserved.
- if (child_count % 4 < 2) {
- ASSERT_FALSE(cid.ServerKnows());
- ASSERT_GE(0, c.GetBaseVersion());
- } else {
- ASSERT_TRUE(cid.ServerKnows());
- ASSERT_LT(0, c.GetBaseVersion());
- }
- }
- cid = c.GetSuccessorId();
- child_count++;
- }
- ASSERT_EQ(batch_size*2, child_count)
- << "Too few or too many children in parent folder after commit.";
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/process_updates_command.h b/chromium/sync/engine/process_updates_command.h
deleted file mode 100644
index ce793879b36..00000000000
--- a/chromium/sync/engine/process_updates_command.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
-#define SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/model_changing_syncer_command.h"
-#include "sync/engine/syncer_types.h"
-
-namespace sync_pb {
-class SyncEntity;
-}
-
-namespace syncer {
-
-namespace syncable {
-class WriteTransaction;
-}
-
-class Cryptographer;
-
-// A syncer command for verifying and processing updates.
-//
-// Preconditions - Updates in the SyncerSesssion have been downloaded.
-//
-// Postconditions - All of the verified SyncEntity data will be copied to
-// the server fields of the corresponding syncable entries.
-class SYNC_EXPORT_PRIVATE ProcessUpdatesCommand
- : public ModelChangingSyncerCommand {
- public:
- ProcessUpdatesCommand();
- virtual ~ProcessUpdatesCommand();
-
- protected:
- // ModelChangingSyncerCommand implementation.
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const OVERRIDE;
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) OVERRIDE;
-
- private:
- VerifyResult VerifyUpdate(
- syncable::WriteTransaction* trans,
- const sync_pb::SyncEntity& entry,
- ModelTypeSet requested_types,
- const ModelSafeRoutingInfo& routes);
- ServerUpdateProcessingResult ProcessUpdate(
- const sync_pb::SyncEntity& proto_update,
- const Cryptographer* cryptographer,
- syncable::WriteTransaction* const trans);
- DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
diff --git a/chromium/sync/engine/process_updates_command_unittest.cc b/chromium/sync/engine/process_updates_command_unittest.cc
deleted file mode 100644
index b46dc7846c0..00000000000
--- a/chromium/sync/engine/process_updates_command_unittest.cc
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "sync/engine/process_updates_command.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using sync_pb::SyncEntity;
-using syncable::Id;
-using syncable::MutableEntry;
-using syncable::UNITTEST;
-using syncable::WriteTransaction;
-
-namespace {
-
-class ProcessUpdatesCommandTest : public SyncerCommandTest {
- protected:
- ProcessUpdatesCommandTest() {}
- virtual ~ProcessUpdatesCommandTest() {}
-
- virtual void SetUp() {
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
- (*mutable_routing_info())[PREFERENCES] = GROUP_UI;
- (*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
- (*mutable_routing_info())[AUTOFILL] = GROUP_DB;
- SyncerCommandTest::SetUp();
- test_entry_factory_.reset(new TestEntryFactory(directory()));
- }
-
- void CreateLocalItem(const std::string& item_id,
- const std::string& parent_id,
- const ModelType& type) {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
- Id::CreateFromServerId(item_id));
- ASSERT_TRUE(entry.good());
-
- entry.PutBaseVersion(1);
- entry.PutServerVersion(1);
- entry.PutNonUniqueName(item_id);
- entry.PutParentId(Id::CreateFromServerId(parent_id));
- sync_pb::EntitySpecifics default_specifics;
- AddDefaultFieldValue(type, &default_specifics);
- entry.PutServerSpecifics(default_specifics);
- }
-
- SyncEntity* AddUpdate(sync_pb::GetUpdatesResponse* updates,
- const std::string& id, const std::string& parent,
- const ModelType& type) {
- sync_pb::SyncEntity* e = updates->add_entries();
- e->set_id_string(id);
- e->set_parent_id_string(parent);
- e->set_non_unique_name(id);
- e->set_name(id);
- e->set_version(1000);
- AddDefaultFieldValue(type, e->mutable_specifics());
- return e;
- }
-
- ProcessUpdatesCommand command_;
- scoped_ptr<TestEntryFactory> test_entry_factory_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommandTest);
-};
-
-TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
- std::string root = syncable::GetNullId().GetServerId();
-
- CreateLocalItem("p1", root, PREFERENCES);
- CreateLocalItem("a1", root, AUTOFILL);
-
- ExpectNoGroupsToChange(command_);
-
- sync_pb::GetUpdatesResponse* updates =
- session()->mutable_status_controller()->
- mutable_updates_response()->mutable_get_updates();
- AddUpdate(updates, "p1", root, PREFERENCES);
- AddUpdate(updates, "a1", root, AUTOFILL);
-
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
-
- command_.ExecuteImpl(session());
-}
-
-static const char kCacheGuid[] = "IrcjZ2jyzHDV9Io4+zKcXQ==";
-
-// Test that the bookmark tag is set on newly downloaded items.
-TEST_F(ProcessUpdatesCommandTest, NewBookmarkTag) {
- std::string root = syncable::GetNullId().GetServerId();
- sync_pb::GetUpdatesResponse* updates =
- session()->mutable_status_controller()->
- mutable_updates_response()->mutable_get_updates();
- Id server_id = Id::CreateFromServerId("b1");
- SyncEntity* e =
- AddUpdate(updates, SyncableIdToProto(server_id), root, BOOKMARKS);
-
- e->set_originator_cache_guid(
- std::string(kCacheGuid, arraysize(kCacheGuid)-1));
- Id client_id = Id::CreateFromClientString("-2");
- e->set_originator_client_item_id(client_id.GetServerId());
- e->set_position_in_parent(0);
-
- command_.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(
- UniquePosition::IsValidSuffix(entry.GetUniqueBookmarkTag()));
- EXPECT_TRUE(entry.GetServerUniquePosition().IsValid());
-
- // If this assertion fails, that might indicate that the algorithm used to
- // generate bookmark tags has been modified. This could have implications for
- // bookmark ordering. Please make sure you know what you're doing if you
- // intend to make such a change.
- EXPECT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=",
- entry.GetUniqueBookmarkTag());
-}
-
-TEST_F(ProcessUpdatesCommandTest, ReceiveServerCreatedBookmarkFolders) {
- Id server_id = Id::CreateFromServerId("xyz");
- std::string root = syncable::GetNullId().GetServerId();
- sync_pb::GetUpdatesResponse* updates =
- session()->mutable_status_controller()->
- mutable_updates_response()->mutable_get_updates();
-
- // Create an update that mimics the bookmark root.
- SyncEntity* e =
- AddUpdate(updates, SyncableIdToProto(server_id), root, BOOKMARKS);
- e->set_server_defined_unique_tag("google_chrome_bookmarks");
- e->set_folder(true);
-
- EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
-
- command_.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
- ASSERT_TRUE(entry.good());
-
- EXPECT_FALSE(entry.ShouldMaintainPosition());
- EXPECT_FALSE(entry.GetUniquePosition().IsValid());
- EXPECT_FALSE(entry.GetServerUniquePosition().IsValid());
- EXPECT_TRUE(entry.GetUniqueBookmarkTag().empty());
-}
-
-TEST_F(ProcessUpdatesCommandTest, ReceiveNonBookmarkItem) {
- Id server_id = Id::CreateFromServerId("xyz");
- std::string root = syncable::GetNullId().GetServerId();
- sync_pb::GetUpdatesResponse* updates =
- session()->mutable_status_controller()->
- mutable_updates_response()->mutable_get_updates();
-
- SyncEntity* e =
- AddUpdate(updates, SyncableIdToProto(server_id), root, AUTOFILL);
- e->set_server_defined_unique_tag("9PGRuKdX5sHyGMB17CvYTXuC43I=");
-
- EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
-
- command_.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
- ASSERT_TRUE(entry.good());
-
- EXPECT_FALSE(entry.ShouldMaintainPosition());
- EXPECT_FALSE(entry.GetUniquePosition().IsValid());
- EXPECT_FALSE(entry.GetServerUniquePosition().IsValid());
- EXPECT_TRUE(entry.GetUniqueBookmarkTag().empty());
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/engine/process_updates_command.cc b/chromium/sync/engine/process_updates_util.cc
index 4854852558c..49e40b366d0 100644
--- a/chromium/sync/engine/process_updates_command.cc
+++ b/chromium/sync/engine/process_updates_util.cc
@@ -1,51 +1,25 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "sync/engine/process_updates_command.h"
+#include "sync/engine/process_updates_util.h"
-#include <vector>
-
-#include "base/basictypes.h"
#include "base/location.h"
-#include "sync/engine/syncer.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/engine/syncer_util.h"
-#include "sync/sessions/sync_session.h"
#include "sync/syncable/directory.h"
-#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
#include "sync/syncable/syncable_proto_util.h"
#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/cryptographer.h"
-using std::vector;
-
namespace syncer {
-using sessions::SyncSession;
using sessions::StatusController;
using syncable::GET_BY_ID;
-ProcessUpdatesCommand::ProcessUpdatesCommand() {}
-ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
-
-std::set<ModelSafeGroup> ProcessUpdatesCommand::GetGroupsToChange(
- const sessions::SyncSession& session) const {
- std::set<ModelSafeGroup> groups_with_updates;
-
- const sync_pb::GetUpdatesResponse& updates =
- session.status_controller().updates_response().get_updates();
- for (int i = 0; i < updates.entries().size(); i++) {
- groups_with_updates.insert(
- GetGroupForModelType(GetModelType(updates.entries(i)),
- session.context()->routing_info()));
- }
-
- return groups_with_updates;
-}
-
namespace {
// This function attempts to determine whether or not this update is genuinely
@@ -102,55 +76,52 @@ bool UpdateContainsNewVersion(syncable::BaseTransaction *trans,
} // namespace
-SyncerError ProcessUpdatesCommand::ModelChangingExecuteImpl(
- SyncSession* session) {
- syncable::Directory* dir = session->context()->directory();
-
- syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
-
- sessions::StatusController* status = session->mutable_status_controller();
- const sync_pb::GetUpdatesResponse& updates =
- status->updates_response().get_updates();
+void PartitionUpdatesByType(
+ const sync_pb::GetUpdatesResponse& updates,
+ ModelTypeSet requested_types,
+ TypeSyncEntityMap* updates_by_type) {
int update_count = updates.entries().size();
-
- ModelTypeSet requested_types = GetRoutingInfoTypes(
- session->context()->routing_info());
-
- DVLOG(1) << update_count << " entries to verify";
- for (int i = 0; i < update_count; i++) {
+ for (ModelTypeSet::Iterator it = requested_types.First();
+ it.Good(); it.Inc()) {
+ updates_by_type->insert(std::make_pair(it.Get(), SyncEntityList()));
+ }
+ for (int i = 0; i < update_count; ++i) {
const sync_pb::SyncEntity& update = updates.entries(i);
+ ModelType type = GetModelType(update);
+ if (!IsRealDataType(type)) {
+ NOTREACHED() << "Received update with invalid type.";
+ continue;
+ }
- // The current function gets executed on several different threads, but
- // every call iterates over the same list of items that the server returned
- // to us. We're not allowed to process items unless we're on the right
- // thread for that type. This check will ensure we only touch the items
- // that live on our current thread.
- // TODO(tim): Don't allow access to objects in other ModelSafeGroups.
- // See crbug.com/121521 .
- ModelSafeGroup g = GetGroupForModelType(GetModelType(update),
- session->context()->routing_info());
- if (g != status->group_restriction())
+ TypeSyncEntityMap::iterator it = updates_by_type->find(type);
+ if (it == updates_by_type->end()) {
+ DLOG(WARNING) << "Skipping update for unexpected type "
+ << ModelTypeToString(type);
continue;
+ }
+
+ it->second.push_back(&update);
+ }
+}
- VerifyResult verify_result = VerifyUpdate(
- &trans, update, requested_types, session->context()->routing_info());
- status->increment_num_updates_downloaded_by(1);
- if (!UpdateContainsNewVersion(&trans, update))
+void ProcessDownloadedUpdates(
+ syncable::Directory* dir,
+ syncable::ModelNeutralWriteTransaction* trans,
+ ModelType type,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status) {
+ for (SyncEntityList::const_iterator update_it = applicable_updates.begin();
+ update_it != applicable_updates.end(); ++update_it) {
+ DCHECK_EQ(type, GetModelType(**update_it));
+ if (!UpdateContainsNewVersion(trans, **update_it))
status->increment_num_reflected_updates_downloaded_by(1);
- if (update.deleted())
+ if ((*update_it)->deleted())
status->increment_num_tombstone_updates_downloaded_by(1);
-
+ VerifyResult verify_result = VerifyUpdate(trans, **update_it, type);
if (verify_result != VERIFY_SUCCESS && verify_result != VERIFY_UNDELETE)
continue;
-
- ServerUpdateProcessingResult process_result =
- ProcessUpdate(update, dir->GetCryptographer(&trans), &trans);
-
- DCHECK(process_result == SUCCESS_PROCESSED ||
- process_result == SUCCESS_STORED);
+ ProcessUpdate(**update_it, dir->GetCryptographer(trans), trans);
}
-
- return SYNCER_OK;
}
namespace {
@@ -159,8 +130,9 @@ namespace {
// will have refused to unify the update.
// We should not attempt to apply it at all since it violates consistency
// rules.
-VerifyResult VerifyTagConsistency(const sync_pb::SyncEntity& entry,
- const syncable::MutableEntry& same_id) {
+VerifyResult VerifyTagConsistency(
+ const sync_pb::SyncEntity& entry,
+ const syncable::ModelNeutralMutableEntry& same_id) {
if (entry.has_client_defined_unique_tag() &&
entry.client_defined_unique_tag() !=
same_id.GetUniqueClientTag()) {
@@ -171,10 +143,10 @@ VerifyResult VerifyTagConsistency(const sync_pb::SyncEntity& entry,
} // namespace
-VerifyResult ProcessUpdatesCommand::VerifyUpdate(
- syncable::WriteTransaction* trans, const sync_pb::SyncEntity& entry,
- ModelTypeSet requested_types,
- const ModelSafeRoutingInfo& routes) {
+VerifyResult VerifyUpdate(
+ syncable::ModelNeutralWriteTransaction* trans,
+ const sync_pb::SyncEntity& entry,
+ ModelType requested_type) {
syncable::Id id = SyncableIdFromProto(entry.id_string());
VerifyResult result = VERIFY_FAIL;
@@ -194,7 +166,7 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
}
}
- syncable::MutableEntry same_id(trans, GET_BY_ID, id);
+ syncable::ModelNeutralMutableEntry same_id(trans, GET_BY_ID, id);
result = VerifyNewEntry(entry, &same_id, deleted);
ModelType placement_type = !deleted ? GetModelType(entry)
@@ -208,8 +180,7 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
if (deleted) {
// For deletes the server could send tombostones for items that
// the client did not request. If so ignore those items.
- if (IsRealDataType(placement_type) &&
- !requested_types.Has(placement_type)) {
+ if (IsRealDataType(placement_type) && requested_type != placement_type) {
result = VERIFY_SKIP;
} else {
result = VERIFY_SUCCESS;
@@ -220,8 +191,8 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
// If we have an existing entry, we check here for updates that break
// consistency rules.
if (VERIFY_UNDECIDED == result) {
- result = VerifyUpdateConsistency(trans, entry, &same_id,
- deleted, is_directory, model_type);
+ result = VerifyUpdateConsistency(trans, entry, deleted,
+ is_directory, model_type, &same_id);
}
if (VERIFY_UNDECIDED == result)
@@ -232,9 +203,9 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
namespace {
// Returns true if the entry is still ok to process.
-bool ReverifyEntry(syncable::WriteTransaction* trans,
+bool ReverifyEntry(syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& entry,
- syncable::MutableEntry* same_id) {
+ syncable::ModelNeutralMutableEntry* same_id) {
const bool deleted = entry.has_deleted() && entry.deleted();
const bool is_directory = IsFolder(entry);
@@ -242,18 +213,18 @@ bool ReverifyEntry(syncable::WriteTransaction* trans,
return VERIFY_SUCCESS == VerifyUpdateConsistency(trans,
entry,
- same_id,
deleted,
is_directory,
- model_type);
+ model_type,
+ same_id);
}
} // namespace
// Process a single update. Will avoid touching global state.
-ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
+void ProcessUpdate(
const sync_pb::SyncEntity& update,
const Cryptographer* cryptographer,
- syncable::WriteTransaction* const trans) {
+ syncable::ModelNeutralWriteTransaction* const trans) {
const syncable::Id& server_id = SyncableIdFromProto(update.id_string());
const std::string name = SyncerProtoUtil::NameFromSyncEntity(update);
@@ -263,19 +234,19 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// FindLocalEntryToUpdate has veto power.
if (local_id.IsNull()) {
- return SUCCESS_PROCESSED; // The entry has become irrelevant.
+ return; // The entry has become irrelevant.
}
CreateNewEntry(trans, local_id);
// We take a two step approach. First we store the entries data in the
// server fields of a local entry and then move the data to the local fields
- syncable::MutableEntry target_entry(trans, GET_BY_ID, local_id);
+ syncable::ModelNeutralMutableEntry target_entry(trans, GET_BY_ID, local_id);
// We need to run the Verify checks again; the world could have changed
// since we last verified.
if (!ReverifyEntry(trans, update, &target_entry)) {
- return SUCCESS_PROCESSED; // The entry has become irrelevant.
+ return; // The entry has become irrelevant.
}
// If we're repurposing an existing local entry with a new server ID,
@@ -352,7 +323,7 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
UpdateServerFieldsFromUpdate(&target_entry, update, name);
- return SUCCESS_PROCESSED;
+ return;
}
} // namespace syncer
diff --git a/chromium/sync/engine/process_updates_util.h b/chromium/sync/engine/process_updates_util.h
new file mode 100644
index 00000000000..6e8bc71b859
--- /dev/null
+++ b/chromium/sync/engine/process_updates_util.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_PROCESS_UPDATES_UTIL_H_
+#define SYNC_ENGINE_PROCESS_UPDATES_UTIL_H_
+
+#include <map>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "sync/base/sync_export.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/internal_api/public/base/model_type.h"
+
+namespace sync_pb {
+class GetUpdatesResponse;
+class SyncEntity;
+}
+
+namespace syncer {
+
+namespace sessions {
+class StatusController;
+}
+
+namespace syncable {
+class ModelNeutralWriteTransaction;
+class Directory;
+}
+
+class Cryptographer;
+
+// TODO(rlarocque): Move these definitions somewhere else?
+typedef std::vector<const sync_pb::SyncEntity*> SyncEntityList;
+typedef std::map<ModelType, SyncEntityList> TypeSyncEntityMap;
+
+// Given a GetUpdates response, iterates over all the returned items and
+// divides them according to their type. Outputs a map from model types to
+// received SyncEntities. The output map will have entries (possibly empty)
+// for all types in |requested_types|.
+void PartitionUpdatesByType(
+ const sync_pb::GetUpdatesResponse& updates,
+ ModelTypeSet requested_types,
+ TypeSyncEntityMap* updates_by_type);
+
+// Processes all the updates associated with a single ModelType.
+void ProcessDownloadedUpdates(
+ syncable::Directory* dir,
+ syncable::ModelNeutralWriteTransaction* trans,
+ ModelType type,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status);
+
+// Checks whether or not an update is fit for processing.
+//
+// The answer may be "no" if the update appears invalid, or it's not releveant
+// (ie. a delete for an item we've never heard of), or other reasons.
+VerifyResult VerifyUpdate(
+ syncable::ModelNeutralWriteTransaction* trans,
+ const sync_pb::SyncEntity& entry,
+ ModelType requested_type);
+
+// If the update passes a series of checks, this function will copy
+// the SyncEntity's data into the SERVER side of the syncable::Directory.
+void ProcessUpdate(
+ const sync_pb::SyncEntity& proto_update,
+ const Cryptographer* cryptographer,
+ syncable::ModelNeutralWriteTransaction* const trans);
+
+} // namespace syncer
+
+#endif // SYNC_ENGINE_PROCESS_UPDATES_UTIL_H_
diff --git a/chromium/sync/engine/store_timestamps_command.cc b/chromium/sync/engine/store_timestamps_command.cc
deleted file mode 100644
index cbff68fec50..00000000000
--- a/chromium/sync/engine/store_timestamps_command.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/store_timestamps_command.h"
-
-#include "base/logging.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-
-namespace syncer {
-
-ModelTypeSet ProcessNewProgressMarkers(
- const sync_pb::GetUpdatesResponse& response,
- syncable::Directory* dir) {
- ModelTypeSet forward_progress_types;
- // If a marker was omitted for any one type, that indicates no
- // change from the previous state.
- for (int i = 0; i < response.new_progress_marker_size(); ++i) {
- int field_number = response.new_progress_marker(i).data_type_id();
- ModelType model_type = GetModelTypeFromSpecificsFieldNumber(field_number);
- if (!IsRealDataType(model_type)) {
- DLOG(WARNING) << "Unknown field number " << field_number;
- continue;
- }
- forward_progress_types.Put(model_type);
- dir->SetDownloadProgress(model_type, response.new_progress_marker(i));
- }
- return forward_progress_types;
-}
-
-StoreTimestampsCommand::StoreTimestampsCommand() {}
-StoreTimestampsCommand::~StoreTimestampsCommand() {}
-
-SyncerError StoreTimestampsCommand::ExecuteImpl(
- sessions::SyncSession* session) {
- const sync_pb::GetUpdatesResponse& updates =
- session->status_controller().updates_response().get_updates();
-
- sessions::StatusController* status = session->mutable_status_controller();
-
- ModelTypeSet forward_progress_types =
- ProcessNewProgressMarkers(updates, session->context()->directory());
- DCHECK(!forward_progress_types.Empty() ||
- updates.changes_remaining() == 0);
- if (VLOG_IS_ON(1)) {
- DVLOG_IF(1, !forward_progress_types.Empty())
- << "Get Updates got new progress marker for types: "
- << ModelTypeSetToString(forward_progress_types)
- << " out of possible: "
- << ModelTypeSetToString(status->updates_request_types());
- }
- if (updates.has_changes_remaining()) {
- int64 changes_left = updates.changes_remaining();
- DVLOG(1) << "Changes remaining: " << changes_left;
- status->set_num_server_changes_remaining(changes_left);
- }
-
- return SYNCER_OK;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/store_timestamps_command.h b/chromium/sync/engine/store_timestamps_command.h
deleted file mode 100644
index a6426a8937b..00000000000
--- a/chromium/sync/engine/store_timestamps_command.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_STORE_TIMESTAMPS_COMMAND_H_
-#define SYNC_ENGINE_STORE_TIMESTAMPS_COMMAND_H_
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/syncer_command.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace sync_pb {
-class GetUpdatesResponse;
-} // namespace sync_pb
-
-namespace syncer {
-
-namespace syncable {
-class Directory;
-} // namespace syncable
-
-// Sets |dir|'s progress markers from the data in |response|. Returns
-// the set of model types with new progress markers.
-SYNC_EXPORT_PRIVATE ModelTypeSet ProcessNewProgressMarkers(
- const sync_pb::GetUpdatesResponse& response,
- syncable::Directory* dir);
-
-// A syncer command that extracts the changelog timestamp information from
-// a GetUpdatesResponse (fetched in DownloadUpdatesCommand) and stores
-// it in the directory. This is meant to run immediately after
-// ProcessUpdatesCommand.
-//
-// Preconditions - all updates in the SyncerSesssion have been stored in the
-// database, meaning it is safe to update the persisted
-// timestamps.
-//
-// Postconditions - The next_timestamp returned by the server will be
-// saved into the directory (where it will be used
-// the next time that DownloadUpdatesCommand runs).
-class StoreTimestampsCommand : public SyncerCommand {
- public:
- StoreTimestampsCommand();
- virtual ~StoreTimestampsCommand();
-
- // SyncerCommand implementation.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StoreTimestampsCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_STORE_TIMESTAMPS_COMMAND_H_
diff --git a/chromium/sync/engine/store_timestamps_command_unittest.cc b/chromium/sync/engine/store_timestamps_command_unittest.cc
deleted file mode 100644
index e49eb3126ee..00000000000
--- a/chromium/sync/engine/store_timestamps_command_unittest.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "sync/engine/store_timestamps_command.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/test/engine/syncer_command_test.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-// Adds a progress marker to |response| for the given field number and
-// token.
-void AddProgressMarkerForFieldNumber(
- sync_pb::GetUpdatesResponse* response,
- int field_number, const std::string& token) {
- sync_pb::DataTypeProgressMarker* marker =
- response->add_new_progress_marker();
- marker->set_data_type_id(field_number);
- marker->set_token(token);
-}
-
-// Adds a progress marker to |response| for the given model type and
-// token.
-void AddProgressMarkerForModelType(
- sync_pb::GetUpdatesResponse* response,
- ModelType model_type, const std::string& token) {
- AddProgressMarkerForFieldNumber(
- response, GetSpecificsFieldNumberFromModelType(model_type), token);
-}
-
-class StoreTimestampsCommandTest : public SyncerCommandTest {
- protected:
- // Gets the directory's progress marker's token for the given model
- // type.
- std::string GetProgessMarkerToken(ModelType model_type) {
- sync_pb::DataTypeProgressMarker progress_marker;
- session()->context()->directory()->GetDownloadProgress(
- model_type, &progress_marker);
- EXPECT_EQ(
- GetSpecificsFieldNumberFromModelType(model_type),
- progress_marker.data_type_id());
- return progress_marker.token();
- }
-};
-
-// Builds a GetUpdatesResponse with some progress markers, including
-// invalid ones. ProcessNewProgressMarkers() should return the model
-// types for the valid progress markers and fill in the progress
-// markers in the directory.
-TEST_F(StoreTimestampsCommandTest, ProcessNewProgressMarkers) {
- sync_pb::GetUpdatesResponse response;
- AddProgressMarkerForModelType(&response, BOOKMARKS, "token1");
- AddProgressMarkerForModelType(&response,
- HISTORY_DELETE_DIRECTIVES, "token2");
- AddProgressMarkerForFieldNumber(&response, -1, "bad token");
-
- ModelTypeSet forward_progress_types =
- ProcessNewProgressMarkers(
- response, session()->context()->directory());
-
- EXPECT_TRUE(
- forward_progress_types.Equals(
- ModelTypeSet(BOOKMARKS, HISTORY_DELETE_DIRECTIVES)));
-
- EXPECT_EQ("token1", GetProgessMarkerToken(BOOKMARKS));
- EXPECT_EQ("token2", GetProgessMarkerToken(HISTORY_DELETE_DIRECTIVES));
-
- ModelTypeSet non_forward_progress_types =
- Difference(ProtocolTypes(), forward_progress_types);
- for (ModelTypeSet::Iterator it = non_forward_progress_types.First();
- it.Good(); it.Inc()) {
- EXPECT_TRUE(GetProgessMarkerToken(it.Get()).empty());
- }
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contribution.cc b/chromium/sync/engine/sync_directory_commit_contribution.cc
new file mode 100644
index 00000000000..f43131e300f
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_commit_contribution.cc
@@ -0,0 +1,164 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_commit_contribution.h"
+
+#include "sync/engine/commit_util.h"
+#include "sync/engine/get_commit_ids.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
+
+namespace syncer {
+
+using syncable::GET_BY_HANDLE;
+using syncable::SYNCER;
+
+SyncDirectoryCommitContribution::~SyncDirectoryCommitContribution() {
+ DCHECK(!syncing_bits_set_);
+}
+
+// static.
+SyncDirectoryCommitContribution* SyncDirectoryCommitContribution::Build(
+ syncable::Directory* dir,
+ ModelType type,
+ size_t max_entries) {
+ std::vector<int64> metahandles;
+
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir);
+ GetCommitIdsForType(&trans, type, max_entries, &metahandles);
+
+ if (metahandles.empty())
+ return NULL;
+
+ google::protobuf::RepeatedPtrField<sync_pb::SyncEntity> entities;
+ for (std::vector<int64>::iterator it = metahandles.begin();
+ it != metahandles.end(); ++it) {
+ sync_pb::SyncEntity* entity = entities.Add();
+ syncable::ModelNeutralMutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ commit_util::BuildCommitItem(entry, entity);
+ entry.PutSyncing(true);
+ }
+
+ return new SyncDirectoryCommitContribution(metahandles, entities, dir);
+}
+
+void SyncDirectoryCommitContribution::AddToCommitMessage(
+ sync_pb::ClientToServerMessage* msg) {
+ DCHECK(syncing_bits_set_);
+ sync_pb::CommitMessage* commit_message = msg->mutable_commit();
+ entries_start_index_ = commit_message->entries_size();
+ std::copy(entities_.begin(),
+ entities_.end(),
+ RepeatedPtrFieldBackInserter(commit_message->mutable_entries()));
+}
+
+SyncerError SyncDirectoryCommitContribution::ProcessCommitResponse(
+ const sync_pb::ClientToServerResponse& response,
+ sessions::StatusController* status) {
+ DCHECK(syncing_bits_set_);
+ const sync_pb::CommitResponse& commit_response = response.commit();
+
+ int transient_error_commits = 0;
+ int conflicting_commits = 0;
+ int error_commits = 0;
+ int successes = 0;
+
+ std::set<syncable::Id> deleted_folders;
+ {
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
+ for (size_t i = 0; i < metahandles_.size(); ++i) {
+ sync_pb::CommitResponse::ResponseType response_type =
+ commit_util::ProcessSingleCommitResponse(
+ &trans,
+ commit_response.entryresponse(entries_start_index_ + i),
+ entities_.Get(i),
+ metahandles_[i],
+ &deleted_folders);
+ switch (response_type) {
+ case sync_pb::CommitResponse::INVALID_MESSAGE:
+ ++error_commits;
+ break;
+ case sync_pb::CommitResponse::CONFLICT:
+ ++conflicting_commits;
+ status->increment_num_server_conflicts();
+ break;
+ case sync_pb::CommitResponse::SUCCESS:
+ ++successes;
+ {
+ syncable::Entry e(&trans, GET_BY_HANDLE, metahandles_[i]);
+ if (e.GetModelType() == BOOKMARKS)
+ status->increment_num_successful_bookmark_commits();
+ }
+ status->increment_num_successful_commits();
+ break;
+ case sync_pb::CommitResponse::OVER_QUOTA:
+ // We handle over quota like a retry, which is same as transient.
+ case sync_pb::CommitResponse::RETRY:
+ case sync_pb::CommitResponse::TRANSIENT_ERROR:
+ ++transient_error_commits;
+ break;
+ default:
+ LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
+ }
+ }
+ MarkDeletedChildrenSynced(dir_, &trans, &deleted_folders);
+ }
+
+ int commit_count = static_cast<int>(metahandles_.size());
+ if (commit_count == successes) {
+ return SYNCER_OK;
+ } else if (error_commits > 0) {
+ return SERVER_RETURN_UNKNOWN_ERROR;
+ } else if (transient_error_commits > 0) {
+ return SERVER_RETURN_TRANSIENT_ERROR;
+ } else if (conflicting_commits > 0) {
+ // This means that the server already has an item with this version, but
+ // we haven't seen that update yet.
+ //
+ // A well-behaved client should respond to this by proceeding to the
+ // download updates phase, fetching the conflicting items, then attempting
+ // to resolve the conflict. That's not what this client does.
+ //
+ // We don't currently have any code to support that exceptional control
+ // flow. Instead, we abort the current sync cycle and start a new one. The
+ // end result is the same.
+ return SERVER_RETURN_CONFLICT;
+ } else {
+ LOG(FATAL) << "Inconsistent counts when processing commit response";
+ return SYNCER_OK;
+ }
+}
+
+void SyncDirectoryCommitContribution::CleanUp() {
+ DCHECK(syncing_bits_set_);
+ UnsetSyncingBits();
+}
+
+size_t SyncDirectoryCommitContribution::GetNumEntries() const {
+ return metahandles_.size();
+}
+
+SyncDirectoryCommitContribution::SyncDirectoryCommitContribution(
+ const std::vector<int64>& metahandles,
+ const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
+ syncable::Directory* dir)
+ : dir_(dir),
+ metahandles_(metahandles),
+ entities_(entities),
+ entries_start_index_(0xDEADBEEF),
+ syncing_bits_set_(true) {
+}
+
+void SyncDirectoryCommitContribution::UnsetSyncingBits() {
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
+ for (std::vector<int64>::const_iterator it = metahandles_.begin();
+ it != metahandles_.end(); ++it) {
+ syncable::ModelNeutralMutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ entry.PutSyncing(false);
+ }
+ syncing_bits_set_ = false;
+}
+
+} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contribution.h b/chromium/sync/engine/sync_directory_commit_contribution.h
new file mode 100644
index 00000000000..89340566755
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_commit_contribution.h
@@ -0,0 +1,102 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
+#define SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
+
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/util/syncer_error.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/sessions/status_controller.h"
+
+namespace syncer {
+
+namespace sessions {
+class StatusController;
+} // namespace sessions
+
+namespace syncable {
+class Directory;
+} // namespace syncable
+
+// This class represents a set of items belonging to a particular data type that
+// have been selected from the syncable Directory and prepared for commit.
+//
+// This class handles the bookkeeping related to the commit of these items,
+// including processing the commit response message and setting and unsetting
+// the SYNCING bits.
+class SYNC_EXPORT_PRIVATE SyncDirectoryCommitContribution {
+ public:
+ // This destructor will DCHECK if UnsetSyncingBits() has not been called yet.
+ ~SyncDirectoryCommitContribution();
+
+ // Build a CommitContribution from the IS_UNSYNCED items in |dir| with the
+ // given |type|. The contribution will include at most |max_items| entries.
+ //
+ // This function may return NULL if this type has no items ready for and
+ // requiring commit. This function may make model neutral changes to the
+ // directory.
+ static SyncDirectoryCommitContribution* Build(
+ syncable::Directory* dir,
+ ModelType type,
+ size_t max_items);
+
+ // Serialize this contribution's entries to the given commit request |msg|.
+ //
+ // This function is not const. It will update some state in this contribution
+ // that will be used when processing the associated commit response. This
+ // function should not be called more than once.
+ void AddToCommitMessage(sync_pb::ClientToServerMessage* msg);
+
+ // Updates this contribution's contents in accordance with the provided
+ // |response|.
+ //
+ // This function may make model-neutral changes to the directory. It is not
+ // valid to call this function unless AddToCommitMessage() was called earlier.
+ // This function should not be called more than once.
+ SyncerError ProcessCommitResponse(
+ const sync_pb::ClientToServerResponse& response,
+ sessions::StatusController* status);
+
+ // Cleans up any temproary state associated with the commit. Must be called
+ // before destruction.
+ void CleanUp();
+
+ // Returns the number of entries included in this contribution.
+ size_t GetNumEntries() const;
+
+ private:
+ class SyncDirectoryCommitContributionTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncDirectoryCommitContributionTest, GatherByTypes);
+ FRIEND_TEST_ALL_PREFIXES(SyncDirectoryCommitContributionTest,
+ GatherAndTruncate);
+
+ SyncDirectoryCommitContribution(
+ const std::vector<int64>& metahandles,
+ const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
+ syncable::Directory* directory);
+
+ void UnsetSyncingBits();
+
+ syncable::Directory* dir_;
+ const std::vector<int64> metahandles_;
+ const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity> entities_;
+ size_t entries_start_index_;
+
+ // This flag is tracks whether or not the directory entries associated with
+ // this commit still have their SYNCING bits set. These bits will be set when
+ // the CommitContribution is created with Build() and unset when CleanUp() is
+ // called. This flag must be unset by the time our destructor is called.
+ bool syncing_bits_set_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncDirectoryCommitContribution);
+};
+
+} // namespace syncer
+
+#endif // SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
diff --git a/chromium/sync/engine/sync_directory_commit_contribution_unittest.cc b/chromium/sync/engine/sync_directory_commit_contribution_unittest.cc
new file mode 100644
index 00000000000..75f88bd8550
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_commit_contribution_unittest.cc
@@ -0,0 +1,235 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_commit_contribution.h"
+
+#include "base/message_loop/message_loop.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/entry.h"
+#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/syncable_read_transaction.h"
+#include "sync/syncable/syncable_write_transaction.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/engine/test_syncable_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+class SyncDirectoryCommitContributionTest : public ::testing::Test {
+ public:
+ virtual void SetUp() OVERRIDE {
+ dir_maker_.SetUp();
+
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ CreateTypeRoot(&trans, dir(), PREFERENCES);
+ CreateTypeRoot(&trans, dir(), EXTENSIONS);
+ }
+
+ virtual void TearDown() OVERRIDE {
+ dir_maker_.TearDown();
+ }
+
+ protected:
+ int64 CreateUnsyncedItem(syncable::WriteTransaction* trans,
+ ModelType type,
+ const std::string& tag) {
+ syncable::Entry parent_entry(
+ trans,
+ syncable::GET_BY_SERVER_TAG,
+ ModelTypeToRootTag(type));
+ syncable::MutableEntry entry(
+ trans,
+ syncable::CREATE,
+ type,
+ parent_entry.GetId(),
+ tag);
+ entry.PutIsUnsynced(true);
+ return entry.GetMetahandle();
+ }
+
+ void CreateSuccessfulCommitResponse(
+ const sync_pb::SyncEntity& entity,
+ sync_pb::CommitResponse::EntryResponse* response) {
+ response->set_response_type(sync_pb::CommitResponse::SUCCESS);
+ response->set_non_unique_name(entity.name());
+ response->set_version(entity.version() + 1);
+ response->set_parent_id_string(entity.parent_id_string());
+
+ if (entity.id_string()[0] == '-') // Look for the - in 'c-1234' style IDs.
+ response->set_id_string(id_factory_.NewServerId().GetServerId());
+ else
+ response->set_id_string(entity.id_string());
+ }
+
+ syncable::Directory* dir() {
+ return dir_maker_.directory();
+ }
+
+ TestIdFactory id_factory_;
+
+ private:
+ base::MessageLoop loop_; // Neeed to initialize the directory.
+ TestDirectorySetterUpper dir_maker_;
+};
+
+// Verify that the SyncDirectoryCommitContribution contains only entries of its
+// specified type.
+TEST_F(SyncDirectoryCommitContributionTest, GatherByTypes) {
+ int64 pref1;
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ pref1 = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 5));
+ ASSERT_EQ(2U, cc->GetNumEntries());
+
+ const std::vector<int64>& metahandles = cc->metahandles_;
+ EXPECT_TRUE(std::find(metahandles.begin(), metahandles.end(), pref1) !=
+ metahandles.end());
+ EXPECT_TRUE(std::find(metahandles.begin(), metahandles.end(), pref1) !=
+ metahandles.end());
+
+ cc->CleanUp();
+}
+
+// Verify that the SyncDirectoryCommitContributionTest builder function
+// truncates if necessary.
+TEST_F(SyncDirectoryCommitContributionTest, GatherAndTruncate) {
+ int64 pref1;
+ int64 pref2;
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ pref1 = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ pref2 = CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 1));
+ ASSERT_EQ(1U, cc->GetNumEntries());
+
+ int64 only_metahandle = cc->metahandles_[0];
+ EXPECT_TRUE(only_metahandle == pref1 || only_metahandle == pref2);
+
+ cc->CleanUp();
+}
+
+// Sanity check for building commits from SyncDirectoryCommitContributions.
+// This test makes two CommitContribution objects of different types and uses
+// them to initialize a commit message. Then it checks that the contents of the
+// commit message match those of the directory they came from.
+TEST_F(SyncDirectoryCommitContributionTest, PrepareCommit) {
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> pref_cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 25));
+ scoped_ptr<SyncDirectoryCommitContribution> ext_cc(
+ SyncDirectoryCommitContribution::Build(dir(), EXTENSIONS, 25));
+
+ sync_pb::ClientToServerMessage message;
+ pref_cc->AddToCommitMessage(&message);
+ ext_cc->AddToCommitMessage(&message);
+
+ const sync_pb::CommitMessage& commit_message = message.commit();
+
+ std::set<syncable::Id> ids_for_commit;
+ ASSERT_EQ(3, commit_message.entries_size());
+ for (int i = 0; i < commit_message.entries_size(); ++i) {
+ const sync_pb::SyncEntity& entity = commit_message.entries(i);
+ // The entities in this test have client-style IDs since they've never been
+ // committed before, so we must use CreateFromClientString to re-create them
+ // from the commit message.
+ ids_for_commit.insert(syncable::Id::CreateFromClientString(
+ entity.id_string()));
+ }
+
+ ASSERT_EQ(3U, ids_for_commit.size());
+ {
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ for (std::set<syncable::Id>::iterator it = ids_for_commit.begin();
+ it != ids_for_commit.end(); ++it) {
+ SCOPED_TRACE(it->value());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, *it);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.GetSyncing());
+ }
+ }
+
+ pref_cc->CleanUp();
+ ext_cc->CleanUp();
+}
+
+// Creates some unsynced items, pretends to commit them, and hands back a
+// specially crafted response to the syncer in order to test commit response
+// processing. The response simulates a succesful commit scenario.
+TEST_F(SyncDirectoryCommitContributionTest, ProcessCommitResponse) {
+ int64 pref1_handle;
+ int64 pref2_handle;
+ int64 ext1_handle;
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ pref1_handle = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ pref2_handle = CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ ext1_handle = CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> pref_cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 25));
+ scoped_ptr<SyncDirectoryCommitContribution> ext_cc(
+ SyncDirectoryCommitContribution::Build(dir(), EXTENSIONS, 25));
+
+ sync_pb::ClientToServerMessage message;
+ pref_cc->AddToCommitMessage(&message);
+ ext_cc->AddToCommitMessage(&message);
+
+ const sync_pb::CommitMessage& commit_message = message.commit();
+ ASSERT_EQ(3, commit_message.entries_size());
+
+ sync_pb::ClientToServerResponse response;
+ for (int i = 0; i < commit_message.entries_size(); ++i) {
+ sync_pb::SyncEntity entity = commit_message.entries(i);
+ sync_pb::CommitResponse_EntryResponse* entry_response =
+ response.mutable_commit()->add_entryresponse();
+ CreateSuccessfulCommitResponse(entity, entry_response);
+ }
+
+ sessions::StatusController status;
+
+ // Process these in reverse order. Just because we can.
+ ext_cc->ProcessCommitResponse(response, &status);
+ pref_cc->ProcessCommitResponse(response, &status);
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ syncable::Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
+ EXPECT_TRUE(p1.GetId().ServerKnows());
+ EXPECT_FALSE(p1.GetSyncing());
+ EXPECT_LT(0, p1.GetServerVersion());
+
+ syncable::Entry p2(&trans, syncable::GET_BY_HANDLE, pref2_handle);
+ EXPECT_TRUE(p2.GetId().ServerKnows());
+ EXPECT_FALSE(p2.GetSyncing());
+ EXPECT_LT(0, p2.GetServerVersion());
+
+ syncable::Entry e1(&trans, syncable::GET_BY_HANDLE, ext1_handle);
+ EXPECT_TRUE(e1.GetId().ServerKnows());
+ EXPECT_FALSE(e1.GetSyncing());
+ EXPECT_LT(0, e1.GetServerVersion());
+ }
+
+ pref_cc->CleanUp();
+ ext_cc->CleanUp();
+}
+
+} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contributor.cc b/chromium/sync/engine/sync_directory_commit_contributor.cc
new file mode 100644
index 00000000000..c87c8eda870
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_commit_contributor.cc
@@ -0,0 +1,24 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_commit_contributor.h"
+
+#include "sync/engine/sync_directory_commit_contribution.h"
+
+namespace syncer {
+
+SyncDirectoryCommitContributor::SyncDirectoryCommitContributor(
+ syncable::Directory* dir,
+ ModelType type)
+ : dir_(dir),
+ type_(type) {}
+
+SyncDirectoryCommitContributor::~SyncDirectoryCommitContributor() {}
+
+SyncDirectoryCommitContribution*
+SyncDirectoryCommitContributor::GetContribution(size_t max_entries) {
+ return SyncDirectoryCommitContribution::Build(dir_, type_, max_entries);
+}
+
+} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contributor.h b/chromium/sync/engine/sync_directory_commit_contributor.h
new file mode 100644
index 00000000000..6ffaeb7761a
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_commit_contributor.h
@@ -0,0 +1,45 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
+#define SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
+
+#include <map>
+
+#include "sync/internal_api/public/base/model_type.h"
+
+namespace syncer {
+
+class SyncDirectoryCommitContribution;
+
+namespace syncable {
+class Directory;
+}
+
+// This class represents the syncable::Directory as a source of items to commit
+// to the sync server.
+//
+// Each instance of this class represents a particular type within the
+// syncable::Directory. When asked, it will iterate through the directory, grab
+// any items of its type that are ready for commit, and return them in the form
+// of a SyncDirectoryCommitContribution.
+class SyncDirectoryCommitContributor {
+ public:
+ SyncDirectoryCommitContributor(syncable::Directory* dir, ModelType type);
+ ~SyncDirectoryCommitContributor();
+
+ SyncDirectoryCommitContribution* GetContribution(size_t max_entries);
+
+ private:
+ syncable::Directory* dir_;
+ ModelType type_;
+};
+
+// TODO(rlarocque): Find a better place for this definition.
+typedef std::map<ModelType, SyncDirectoryCommitContributor*>
+ CommitContributorMap;
+
+} // namespace
+
+#endif // SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
diff --git a/chromium/sync/engine/sync_directory_update_handler.cc b/chromium/sync/engine/sync_directory_update_handler.cc
new file mode 100644
index 00000000000..1a9bd1ec6de
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_update_handler.cc
@@ -0,0 +1,148 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_update_handler.h"
+
+#include "sync/engine/conflict_resolver.h"
+#include "sync/engine/process_updates_util.h"
+#include "sync/engine/update_applicator.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/directory.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
+#include "sync/syncable/syncable_write_transaction.h"
+
+namespace syncer {
+
+using syncable::SYNCER;
+
+SyncDirectoryUpdateHandler::SyncDirectoryUpdateHandler(
+ syncable::Directory* dir,
+ ModelType type,
+ scoped_refptr<ModelSafeWorker> worker)
+ : dir_(dir),
+ type_(type),
+ worker_(worker) {}
+
+SyncDirectoryUpdateHandler::~SyncDirectoryUpdateHandler() {}
+
+void SyncDirectoryUpdateHandler::GetDownloadProgress(
+ sync_pb::DataTypeProgressMarker* progress_marker) const {
+ dir_->GetDownloadProgress(type_, progress_marker);
+}
+
+void SyncDirectoryUpdateHandler::ProcessGetUpdatesResponse(
+ const sync_pb::DataTypeProgressMarker& progress_marker,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status) {
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
+ UpdateSyncEntities(&trans, applicable_updates, status);
+ UpdateProgressMarker(progress_marker);
+}
+
+void SyncDirectoryUpdateHandler::ApplyUpdates(
+ sessions::StatusController* status) {
+ if (IsControlType(type_)) {
+ return; // We don't process control types here.
+ }
+
+ if (!dir_->TypeHasUnappliedUpdates(type_)) {
+ return; // No work to do. Skip this type.
+ }
+
+ WorkCallback c = base::Bind(
+ &SyncDirectoryUpdateHandler::ApplyUpdatesImpl,
+ // We wait until the callback is executed. We can safely use Unretained.
+ base::Unretained(this),
+ base::Unretained(status));
+ worker_->DoWorkAndWaitUntilDone(c);
+}
+
+SyncerError SyncDirectoryUpdateHandler::ApplyUpdatesImpl(
+ sessions::StatusController* status) {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir_);
+
+ std::vector<int64> handles;
+ dir_->GetUnappliedUpdateMetaHandles(
+ &trans,
+ FullModelTypeSet(type_),
+ &handles);
+
+ // First set of update application passes.
+ UpdateApplicator applicator(dir_->GetCryptographer(&trans));
+ applicator.AttemptApplications(&trans, handles);
+ status->increment_num_updates_applied_by(applicator.updates_applied());
+ status->increment_num_hierarchy_conflicts_by(
+ applicator.hierarchy_conflicts());
+ status->increment_num_encryption_conflicts_by(
+ applicator.encryption_conflicts());
+
+ if (applicator.simple_conflict_ids().size() != 0) {
+ // Resolve the simple conflicts we just detected.
+ ConflictResolver resolver;
+ resolver.ResolveConflicts(&trans,
+ dir_->GetCryptographer(&trans),
+ applicator.simple_conflict_ids(),
+ status);
+
+ // Conflict resolution sometimes results in more updates to apply.
+ handles.clear();
+ dir_->GetUnappliedUpdateMetaHandles(
+ &trans,
+ FullModelTypeSet(type_),
+ &handles);
+
+ UpdateApplicator conflict_applicator(dir_->GetCryptographer(&trans));
+ conflict_applicator.AttemptApplications(&trans, handles);
+
+ // We count the number of updates from both applicator passes.
+ status->increment_num_updates_applied_by(
+ conflict_applicator.updates_applied());
+
+ // Encryption conflicts should remain unchanged by the resolution of simple
+ // conflicts. Those can only be solved by updating our nigori key bag.
+ DCHECK_EQ(conflict_applicator.encryption_conflicts(),
+ applicator.encryption_conflicts());
+
+ // Hierarchy conflicts should also remain unchanged, for reasons that are
+ // more subtle. Hierarchy conflicts exist when the application of a pending
+ // update from the server would make the local folder hierarchy
+ // inconsistent. The resolution of simple conflicts could never affect the
+ // hierarchy conflicting item directly, because hierarchy conflicts are not
+ // processed by the conflict resolver. It could, in theory, modify the
+ // local hierarchy on which hierarchy conflict detection depends. However,
+ // the conflict resolution algorithm currently in use does not allow this.
+ DCHECK_EQ(conflict_applicator.hierarchy_conflicts(),
+ applicator.hierarchy_conflicts());
+
+ // There should be no simple conflicts remaining. We know this because the
+ // resolver should have resolved all the conflicts we detected last time
+ // and, by the two previous assertions, that no conflicts have been
+ // downgraded from encryption or hierarchy down to simple.
+ DCHECK(conflict_applicator.simple_conflict_ids().empty());
+ }
+
+ return SYNCER_OK;
+}
+
+void SyncDirectoryUpdateHandler::UpdateSyncEntities(
+ syncable::ModelNeutralWriteTransaction* trans,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status) {
+ ProcessDownloadedUpdates(dir_, trans, type_, applicable_updates, status);
+}
+
+void SyncDirectoryUpdateHandler::UpdateProgressMarker(
+ const sync_pb::DataTypeProgressMarker& progress_marker) {
+ int field_number = progress_marker.data_type_id();
+ ModelType model_type = GetModelTypeFromSpecificsFieldNumber(field_number);
+ if (!IsRealDataType(model_type) || type_ != model_type) {
+ NOTREACHED()
+ << "Update handler of type " << ModelTypeToString(type_)
+ << " asked to process progress marker with invalid type "
+ << field_number;
+ }
+ dir_->SetDownloadProgress(type_, progress_marker);
+}
+
+} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_update_handler.h b/chromium/sync/engine/sync_directory_update_handler.h
new file mode 100644
index 00000000000..ea4d791465f
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_update_handler.h
@@ -0,0 +1,97 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNC_DIRECTORY_UPDATE_HANDLER_H_
+#define SYNC_ENGINE_SYNC_DIRECTORY_UPDATE_HANDLER_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "sync/base/sync_export.h"
+#include "sync/engine/process_updates_util.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/util/syncer_error.h"
+
+namespace sync_pb {
+class DataTypeProgressMarker;
+class GetUpdatesResponse;
+}
+
+namespace syncer {
+
+namespace sessions {
+class StatusController;
+}
+
+namespace syncable {
+class Directory;
+}
+
+class ModelSafeWorker;
+
+// This class represents the syncable::Directory's processes for requesting and
+// processing updates from the sync server.
+//
+// Each instance of this class represents a particular type in the
+// syncable::Directory. It can store and retreive that type's progress markers.
+// It can also process a set of received SyncEntities and store their data.
+class SYNC_EXPORT_PRIVATE SyncDirectoryUpdateHandler {
+ public:
+ SyncDirectoryUpdateHandler(syncable::Directory* dir,
+ ModelType type,
+ scoped_refptr<ModelSafeWorker> worker);
+ ~SyncDirectoryUpdateHandler();
+
+ // Fills the given parameter with the stored progress marker for this type.
+ void GetDownloadProgress(
+ sync_pb::DataTypeProgressMarker* progress_marker) const;
+
+ // Processes the contents of a GetUpdates response message.
+ //
+ // Should be invoked with the progress marker and set of SyncEntities from a
+ // single GetUpdates response message. The progress marker's type must match
+ // this update handler's type, and the set of SyncEntities must include all
+ // entities of this type found in the response message.
+ void ProcessGetUpdatesResponse(
+ const sync_pb::DataTypeProgressMarker& progress_marker,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status);
+
+ // If there are updates to apply, apply them on the proper thread.
+ // Delegates to ApplyUpdatesImpl().
+ void ApplyUpdates(sessions::StatusController* status);
+
+ private:
+ friend class SyncDirectoryUpdateHandlerApplyUpdateTest;
+ friend class SyncDirectoryUpdateHandlerProcessUpdateTest;
+
+ // Processes the given SyncEntities and stores their data in the directory.
+ // Their types must match this update handler's type.
+ void UpdateSyncEntities(
+ syncable::ModelNeutralWriteTransaction* trans,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status);
+
+ // Stores the given progress marker in the directory.
+ // Its type must match this update handler's type.
+ void UpdateProgressMarker(
+ const sync_pb::DataTypeProgressMarker& progress_marker);
+
+ // Skips all checks and goes straight to applying the updates.
+ SyncerError ApplyUpdatesImpl(sessions::StatusController* status);
+
+ syncable::Directory* dir_;
+ ModelType type_;
+ scoped_refptr<ModelSafeWorker> worker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncDirectoryUpdateHandler);
+};
+
+// TODO(rlarocque): Find a better place to define this.
+typedef std::map<ModelType, SyncDirectoryUpdateHandler*> UpdateHandlerMap;
+
+} // namespace syncer
+
+#endif // SYNC_ENGINE_SYNC_DIRECTORY_UPDATE_HANDLER_H_
diff --git a/chromium/sync/engine/sync_directory_update_handler_unittest.cc b/chromium/sync/engine/sync_directory_update_handler_unittest.cc
new file mode 100644
index 00000000000..86d447eba38
--- /dev/null
+++ b/chromium/sync/engine/sync_directory_update_handler_unittest.cc
@@ -0,0 +1,826 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_update_handler.h"
+
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop.h"
+#include "base/stl_util.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/test/test_entry_factory.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/directory.h"
+#include "sync/syncable/entry.h"
+#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
+#include "sync/syncable/syncable_proto_util.h"
+#include "sync/syncable/syncable_read_transaction.h"
+#include "sync/syncable/syncable_write_transaction.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/engine/test_syncable_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+using syncable::UNITTEST;
+
+// A test harness for tests that focus on processing updates.
+//
+// Update processing is what occurs when we first download updates. It converts
+// the received protobuf message into information in the syncable::Directory.
+// Any invalid or redundant updates will be dropped at this point.
+class SyncDirectoryUpdateHandlerProcessUpdateTest : public ::testing::Test {
+ public:
+ SyncDirectoryUpdateHandlerProcessUpdateTest()
+ : ui_worker_(new FakeModelWorker(GROUP_UI)) {
+ }
+
+ virtual ~SyncDirectoryUpdateHandlerProcessUpdateTest() {}
+
+ virtual void SetUp() OVERRIDE {
+ dir_maker_.SetUp();
+ }
+
+ virtual void TearDown() OVERRIDE {
+ dir_maker_.TearDown();
+ }
+
+ syncable::Directory* dir() {
+ return dir_maker_.directory();
+ }
+ protected:
+ scoped_ptr<sync_pb::SyncEntity> CreateUpdate(
+ const std::string& id,
+ const std::string& parent,
+ const ModelType& type);
+
+ // This exists mostly to give tests access to the protected member function.
+ // Warning: This takes the syncable directory lock.
+ void UpdateSyncEntities(
+ SyncDirectoryUpdateHandler* handler,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status);
+
+ // Another function to access private member functions.
+ void UpdateProgressMarkers(
+ SyncDirectoryUpdateHandler* handler,
+ const sync_pb::DataTypeProgressMarker& progress);
+
+ scoped_refptr<FakeModelWorker> ui_worker() {
+ return ui_worker_;
+ }
+
+ private:
+ base::MessageLoop loop_; // Needed to initialize the directory.
+ TestDirectorySetterUpper dir_maker_;
+ scoped_refptr<FakeModelWorker> ui_worker_;
+};
+
+scoped_ptr<sync_pb::SyncEntity>
+SyncDirectoryUpdateHandlerProcessUpdateTest::CreateUpdate(
+ const std::string& id,
+ const std::string& parent,
+ const ModelType& type) {
+ scoped_ptr<sync_pb::SyncEntity> e(new sync_pb::SyncEntity());
+ e->set_id_string(id);
+ e->set_parent_id_string(parent);
+ e->set_non_unique_name(id);
+ e->set_name(id);
+ e->set_version(1000);
+ AddDefaultFieldValue(type, e->mutable_specifics());
+ return e.Pass();
+}
+
+void SyncDirectoryUpdateHandlerProcessUpdateTest::UpdateSyncEntities(
+ SyncDirectoryUpdateHandler* handler,
+ const SyncEntityList& applicable_updates,
+ sessions::StatusController* status) {
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, UNITTEST, dir());
+ handler->UpdateSyncEntities(&trans, applicable_updates, status);
+}
+
+void SyncDirectoryUpdateHandlerProcessUpdateTest::UpdateProgressMarkers(
+ SyncDirectoryUpdateHandler* handler,
+ const sync_pb::DataTypeProgressMarker& progress) {
+ handler->UpdateProgressMarker(progress);
+}
+
+static const char kCacheGuid[] = "IrcjZ2jyzHDV9Io4+zKcXQ==";
+
+// Test that the bookmark tag is set on newly downloaded items.
+TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest, NewBookmarkTag) {
+ SyncDirectoryUpdateHandler handler(dir(), BOOKMARKS, ui_worker());
+ sync_pb::GetUpdatesResponse gu_response;
+ sessions::StatusController status;
+
+ // Add a bookmark item to the update message.
+ std::string root = syncable::GetNullId().GetServerId();
+ syncable::Id server_id = syncable::Id::CreateFromServerId("b1");
+ scoped_ptr<sync_pb::SyncEntity> e =
+ CreateUpdate(SyncableIdToProto(server_id), root, BOOKMARKS);
+ e->set_originator_cache_guid(
+ std::string(kCacheGuid, arraysize(kCacheGuid)-1));
+ syncable::Id client_id = syncable::Id::CreateFromClientString("-2");
+ e->set_originator_client_item_id(client_id.GetServerId());
+ e->set_position_in_parent(0);
+
+ // Add it to the applicable updates list.
+ SyncEntityList bookmark_updates;
+ bookmark_updates.push_back(e.get());
+
+ // Process the update.
+ UpdateSyncEntities(&handler, bookmark_updates, &status);
+
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(UniquePosition::IsValidSuffix(entry.GetUniqueBookmarkTag()));
+ EXPECT_TRUE(entry.GetServerUniquePosition().IsValid());
+
+ // If this assertion fails, that might indicate that the algorithm used to
+ // generate bookmark tags has been modified. This could have implications for
+ // bookmark ordering. Please make sure you know what you're doing if you
+ // intend to make such a change.
+ EXPECT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=", entry.GetUniqueBookmarkTag());
+}
+
+// Test the receipt of a type root node.
+TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest,
+ ReceiveServerCreatedBookmarkFolders) {
+ SyncDirectoryUpdateHandler handler(dir(), BOOKMARKS, ui_worker());
+ sync_pb::GetUpdatesResponse gu_response;
+ sessions::StatusController status;
+
+ // Create an update that mimics the bookmark root.
+ syncable::Id server_id = syncable::Id::CreateFromServerId("xyz");
+ std::string root = syncable::GetNullId().GetServerId();
+ scoped_ptr<sync_pb::SyncEntity> e =
+ CreateUpdate(SyncableIdToProto(server_id), root, BOOKMARKS);
+ e->set_server_defined_unique_tag("google_chrome_bookmarks");
+ e->set_folder(true);
+
+ // Add it to the applicable updates list.
+ SyncEntityList bookmark_updates;
+ bookmark_updates.push_back(e.get());
+
+ EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
+
+ // Process it.
+ UpdateSyncEntities(&handler, bookmark_updates, &status);
+
+ // Verify the results.
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+
+ EXPECT_FALSE(entry.ShouldMaintainPosition());
+ EXPECT_FALSE(entry.GetUniquePosition().IsValid());
+ EXPECT_FALSE(entry.GetServerUniquePosition().IsValid());
+ EXPECT_TRUE(entry.GetUniqueBookmarkTag().empty());
+}
+
+// Test the receipt of a non-bookmark item.
+TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest, ReceiveNonBookmarkItem) {
+ SyncDirectoryUpdateHandler handler(dir(), PREFERENCES, ui_worker());
+ sync_pb::GetUpdatesResponse gu_response;
+ sessions::StatusController status;
+
+ std::string root = syncable::GetNullId().GetServerId();
+ syncable::Id server_id = syncable::Id::CreateFromServerId("xyz");
+ scoped_ptr<sync_pb::SyncEntity> e =
+ CreateUpdate(SyncableIdToProto(server_id), root, PREFERENCES);
+ e->set_server_defined_unique_tag("9PGRuKdX5sHyGMB17CvYTXuC43I=");
+
+ // Add it to the applicable updates list.
+ SyncEntityList autofill_updates;
+ autofill_updates.push_back(e.get());
+
+ EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
+
+ // Process it.
+ UpdateSyncEntities(&handler, autofill_updates, &status);
+
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+
+ EXPECT_FALSE(entry.ShouldMaintainPosition());
+ EXPECT_FALSE(entry.GetUniquePosition().IsValid());
+ EXPECT_FALSE(entry.GetServerUniquePosition().IsValid());
+ EXPECT_TRUE(entry.GetUniqueBookmarkTag().empty());
+}
+
+// Tests the setting of progress markers.
+TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest, ProcessNewProgressMarkers) {
+ SyncDirectoryUpdateHandler handler(dir(), BOOKMARKS, ui_worker());
+
+ sync_pb::DataTypeProgressMarker progress;
+ progress.set_data_type_id(GetSpecificsFieldNumberFromModelType(BOOKMARKS));
+ progress.set_token("token");
+
+ UpdateProgressMarkers(&handler, progress);
+
+ sync_pb::DataTypeProgressMarker saved;
+ dir()->GetDownloadProgress(BOOKMARKS, &saved);
+
+ EXPECT_EQ(progress.token(), saved.token());
+ EXPECT_EQ(progress.data_type_id(), saved.data_type_id());
+}
+
+// A test harness for tests that focus on applying updates.
+//
+// Update application is performed when we want to take updates that were
+// previously downloaded, processed, and stored in our syncable::Directory
+// and use them to update our local state (both the Directory's local state
+// and the model's local state, though these tests focus only on the Directory's
+// local state).
+//
+// This is kept separate from the update processing test in part for historical
+// reasons, and in part because these tests may require a bit more infrastrcture
+// in the future. Update application should happen on a different thread a lot
+// of the time so these tests may end up requiring more infrastructure than the
+// update processing tests. Currently, we're bypassing most of those issues by
+// using FakeModelWorkers, so there's not much difference between the two test
+// harnesses.
+class SyncDirectoryUpdateHandlerApplyUpdateTest : public ::testing::Test {
+ public:
+ SyncDirectoryUpdateHandlerApplyUpdateTest()
+ : ui_worker_(new FakeModelWorker(GROUP_UI)),
+ password_worker_(new FakeModelWorker(GROUP_PASSWORD)),
+ passive_worker_(new FakeModelWorker(GROUP_PASSIVE)),
+ update_handler_map_deleter_(&update_handler_map_) {}
+
+ virtual void SetUp() OVERRIDE {
+ dir_maker_.SetUp();
+ entry_factory_.reset(new TestEntryFactory(directory()));
+
+ update_handler_map_.insert(std::make_pair(
+ BOOKMARKS,
+ new SyncDirectoryUpdateHandler(directory(), BOOKMARKS, ui_worker_)));
+ update_handler_map_.insert(std::make_pair(
+ PASSWORDS,
+ new SyncDirectoryUpdateHandler(directory(),
+ PASSWORDS,
+ password_worker_)));
+ }
+
+ virtual void TearDown() OVERRIDE {
+ dir_maker_.TearDown();
+ }
+
+ protected:
+ void ApplyBookmarkUpdates(sessions::StatusController* status) {
+ update_handler_map_[BOOKMARKS]->ApplyUpdates(status);
+ }
+
+ void ApplyPasswordUpdates(sessions::StatusController* status) {
+ update_handler_map_[PASSWORDS]->ApplyUpdates(status);
+ }
+
+ TestEntryFactory* entry_factory() {
+ return entry_factory_.get();
+ }
+
+ syncable::Directory* directory() {
+ return dir_maker_.directory();
+ }
+
+ private:
+ base::MessageLoop loop_; // Needed to initialize the directory.
+ TestDirectorySetterUpper dir_maker_;
+ scoped_ptr<TestEntryFactory> entry_factory_;
+
+ scoped_refptr<FakeModelWorker> ui_worker_;
+ scoped_refptr<FakeModelWorker> password_worker_;
+ scoped_refptr<FakeModelWorker> passive_worker_;
+
+ UpdateHandlerMap update_handler_map_;
+ STLValueDeleter<UpdateHandlerMap> update_handler_map_deleter_;
+};
+
+namespace {
+sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
+ sync_pb::EntitySpecifics result;
+ AddDefaultFieldValue(BOOKMARKS, &result);
+ return result;
+}
+} // namespace
+
+// Test update application for a few bookmark items.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, SimpleBookmark) {
+ sessions::StatusController status;
+
+ std::string root_server_id = syncable::GetNullId().GetServerId();
+ int64 parent_handle =
+ entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "parent", DefaultBookmarkSpecifics(), root_server_id);
+ int64 child_handle =
+ entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "child", DefaultBookmarkSpecifics(), "parent");
+
+ ApplyBookmarkUpdates(&status);
+
+ EXPECT_EQ(0, status.num_encryption_conflicts())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(0, status.num_hierarchy_conflicts())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(2, status.num_updates_applied())
+ << "All items should have been successfully applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+
+ syncable::Entry parent(&trans, syncable::GET_BY_HANDLE, parent_handle);
+ syncable::Entry child(&trans, syncable::GET_BY_HANDLE, child_handle);
+
+ ASSERT_TRUE(parent.good());
+ ASSERT_TRUE(child.good());
+
+ EXPECT_FALSE(parent.GetIsUnsynced());
+ EXPECT_FALSE(parent.GetIsUnappliedUpdate());
+ EXPECT_FALSE(child.GetIsUnsynced());
+ EXPECT_FALSE(child.GetIsUnappliedUpdate());
+ }
+}
+
+// Test that the applicator can handle updates delivered out of order.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
+ BookmarkChildrenBeforeParent) {
+ // Start with some bookmarks whose parents are unknown.
+ std::string root_server_id = syncable::GetNullId().GetServerId();
+ int64 a_handle = entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "a_child_created_first", DefaultBookmarkSpecifics(), "parent");
+ int64 x_handle = entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "x_child_created_first", DefaultBookmarkSpecifics(), "parent");
+
+ // Update application will fail.
+ sessions::StatusController status1;
+ ApplyBookmarkUpdates(&status1);
+ EXPECT_EQ(0, status1.num_updates_applied());
+ EXPECT_EQ(2, status1.num_hierarchy_conflicts());
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+
+ syncable::Entry a(&trans, syncable::GET_BY_HANDLE, a_handle);
+ syncable::Entry x(&trans, syncable::GET_BY_HANDLE, x_handle);
+
+ ASSERT_TRUE(a.good());
+ ASSERT_TRUE(x.good());
+
+ EXPECT_TRUE(a.GetIsUnappliedUpdate());
+ EXPECT_TRUE(x.GetIsUnappliedUpdate());
+ }
+
+ // Now add their parent and a few siblings.
+ entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "parent", DefaultBookmarkSpecifics(), root_server_id);
+ entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "a_child_created_second", DefaultBookmarkSpecifics(), "parent");
+ entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
+ "x_child_created_second", DefaultBookmarkSpecifics(), "parent");
+
+ // Update application will succeed.
+ sessions::StatusController status2;
+ ApplyBookmarkUpdates(&status2);
+ EXPECT_EQ(5, status2.num_updates_applied())
+ << "All updates should have been successfully applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+
+ syncable::Entry a(&trans, syncable::GET_BY_HANDLE, a_handle);
+ syncable::Entry x(&trans, syncable::GET_BY_HANDLE, x_handle);
+
+ ASSERT_TRUE(a.good());
+ ASSERT_TRUE(x.good());
+
+ EXPECT_FALSE(a.GetIsUnappliedUpdate());
+ EXPECT_FALSE(x.GetIsUnappliedUpdate());
+ }
+}
+
+// Try to apply changes on an item that is both IS_UNSYNCED and
+// IS_UNAPPLIED_UPDATE. Conflict resolution should be performed.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, SimpleBookmarkConflict) {
+ int64 handle = entry_factory()->CreateUnappliedAndUnsyncedBookmarkItem("x");
+
+ int original_server_version = -10;
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
+ original_server_version = e.GetServerVersion();
+ ASSERT_NE(original_server_version, e.GetBaseVersion());
+ EXPECT_TRUE(e.GetIsUnsynced());
+ }
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+ EXPECT_EQ(1, status.num_server_overwrites())
+ << "Unsynced and unapplied item conflict should be resolved";
+ EXPECT_EQ(0, status.num_updates_applied())
+ << "Update should not be applied; we should override the server.";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ EXPECT_EQ(original_server_version, e.GetServerVersion());
+ EXPECT_EQ(original_server_version, e.GetBaseVersion());
+ EXPECT_FALSE(e.GetIsUnappliedUpdate());
+
+ // The unsynced flag will remain set until we successfully commit the item.
+ EXPECT_TRUE(e.GetIsUnsynced());
+ }
+}
+
+// Create a simple conflict that is also a hierarchy conflict. If we were to
+// follow the normal "server wins" logic, we'd end up violating hierarchy
+// constraints. The hierarchy conflict must take precedence. We can not allow
+// the update to be applied. The item must remain in the conflict state.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, HierarchyAndSimpleConflict) {
+ // Create a simply-conflicting item. It will start with valid parent ids.
+ int64 handle = entry_factory()->CreateUnappliedAndUnsyncedBookmarkItem(
+ "orphaned_by_server");
+ {
+ // Manually set the SERVER_PARENT_ID to bad value.
+ // A bad parent indicates a hierarchy conflict.
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(entry.good());
+
+ entry.PutServerParentId(TestIdFactory::MakeServer("bogus_parent"));
+ }
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+ EXPECT_EQ(0, status.num_updates_applied());
+ EXPECT_EQ(0, status.num_server_overwrites());
+ EXPECT_EQ(1, status.num_hierarchy_conflicts());
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ EXPECT_TRUE(e.GetIsUnappliedUpdate());
+ EXPECT_TRUE(e.GetIsUnsynced());
+ }
+}
+
+// Attempt to apply an udpate that would create a bookmark folder loop. This
+// application should fail.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, BookmarkFolderLoop) {
+ // Item 'X' locally has parent of 'root'. Server is updating it to have
+ // parent of 'Y'.
+
+ // Create it as a child of root node.
+ int64 handle = entry_factory()->CreateSyncedItem("X", BOOKMARKS, true);
+
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(entry.good());
+
+ // Re-parent from root to "Y"
+ entry.PutServerVersion(entry_factory()->GetNextRevision());
+ entry.PutIsUnappliedUpdate(true);
+ entry.PutServerParentId(TestIdFactory::MakeServer("Y"));
+ }
+
+ // Item 'Y' is child of 'X'.
+ entry_factory()->CreateUnsyncedItem(
+ TestIdFactory::MakeServer("Y"), TestIdFactory::MakeServer("X"), "Y", true,
+ BOOKMARKS, NULL);
+
+ // If the server's update were applied, we would have X be a child of Y, and Y
+ // as a child of X. That's a directory loop. The UpdateApplicator should
+ // prevent the update from being applied and note that this is a hierarchy
+ // conflict.
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+
+ // This should count as a hierarchy conflict.
+ EXPECT_EQ(1, status.num_hierarchy_conflicts());
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ EXPECT_TRUE(e.GetIsUnappliedUpdate());
+ EXPECT_FALSE(e.GetIsUnsynced());
+ }
+}
+
+// Test update application where the update has been orphaned by a local folder
+// deletion. The update application attempt should fail.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
+ HierarchyConflictDeletedParent) {
+ // Create a locally deleted parent item.
+ int64 parent_handle;
+ entry_factory()->CreateUnsyncedItem(
+ syncable::Id::CreateFromServerId("parent"), TestIdFactory::root(),
+ "parent", true, BOOKMARKS, &parent_handle);
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry entry(&trans,
+ syncable::GET_BY_HANDLE,
+ parent_handle);
+ entry.PutIsDel(true);
+ }
+
+ // Create an incoming child from the server.
+ int64 child_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "child", DefaultBookmarkSpecifics(), "parent");
+
+ // The server's update may seem valid to some other client, but on this client
+ // that new item's parent no longer exists. The update should not be applied
+ // and the update applicator should indicate this is a hierarchy conflict.
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+ EXPECT_EQ(1, status.num_hierarchy_conflicts());
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry child(&trans, syncable::GET_BY_HANDLE, child_handle);
+ ASSERT_TRUE(child.good());
+ EXPECT_TRUE(child.GetIsUnappliedUpdate());
+ EXPECT_FALSE(child.GetIsUnsynced());
+ }
+}
+
+// Attempt to apply an update that deletes a folder where the folder has
+// locally-created children. The update application should fail.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
+ HierarchyConflictDeleteNonEmptyDirectory) {
+ // Create a server-deleted folder as a child of root node.
+ int64 parent_handle =
+ entry_factory()->CreateSyncedItem("parent", BOOKMARKS, true);
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry entry(&trans,
+ syncable::GET_BY_HANDLE,
+ parent_handle);
+ ASSERT_TRUE(entry.good());
+
+ // Delete it on the server.
+ entry.PutServerVersion(entry_factory()->GetNextRevision());
+ entry.PutIsUnappliedUpdate(true);
+ entry.PutServerParentId(TestIdFactory::root());
+ entry.PutServerIsDel(true);
+ }
+
+ // Create a local child of the server-deleted directory.
+ entry_factory()->CreateUnsyncedItem(
+ TestIdFactory::MakeServer("child"), TestIdFactory::MakeServer("parent"),
+ "child", false, BOOKMARKS, NULL);
+
+ // The server's request to delete the directory must be ignored, otherwise our
+ // unsynced new child would be orphaned. This is a hierarchy conflict.
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+
+ // This should count as a hierarchy conflict.
+ EXPECT_EQ(1, status.num_hierarchy_conflicts());
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry parent(&trans, syncable::GET_BY_HANDLE, parent_handle);
+ ASSERT_TRUE(parent.good());
+ EXPECT_TRUE(parent.GetIsUnappliedUpdate());
+ EXPECT_FALSE(parent.GetIsUnsynced());
+ }
+}
+
+// Attempt to apply updates where the updated item's parent is not known to this
+// client. The update application attempt should fail.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
+ HierarchyConflictUnknownParent) {
+ // We shouldn't be able to do anything with either of these items.
+ int64 x_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "some_item", DefaultBookmarkSpecifics(), "unknown_parent");
+ int64 y_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "some_other_item", DefaultBookmarkSpecifics(), "some_item");
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+
+ EXPECT_EQ(2, status.num_hierarchy_conflicts())
+ << "All updates with an unknown ancestors should be in conflict";
+ EXPECT_EQ(0, status.num_updates_applied())
+ << "No item with an unknown ancestor should be applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry x(&trans, syncable::GET_BY_HANDLE, x_handle);
+ syncable::Entry y(&trans, syncable::GET_BY_HANDLE, y_handle);
+ ASSERT_TRUE(x.good());
+ ASSERT_TRUE(y.good());
+ EXPECT_TRUE(x.GetIsUnappliedUpdate());
+ EXPECT_TRUE(y.GetIsUnappliedUpdate());
+ EXPECT_FALSE(x.GetIsUnsynced());
+ EXPECT_FALSE(y.GetIsUnsynced());
+ }
+}
+
+// Attempt application of a mix of items. Some update application attempts will
+// fail due to hierarchy conflicts. Others should succeed.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, ItemsBothKnownAndUnknown) {
+ // See what happens when there's a mixture of good and bad updates.
+ std::string root_server_id = syncable::GetNullId().GetServerId();
+ int64 u1_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "first_unknown_item", DefaultBookmarkSpecifics(), "unknown_parent");
+ int64 k1_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "first_known_item", DefaultBookmarkSpecifics(), root_server_id);
+ int64 u2_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "second_unknown_item", DefaultBookmarkSpecifics(), "unknown_parent");
+ int64 k2_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "second_known_item", DefaultBookmarkSpecifics(), "first_known_item");
+ int64 k3_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "third_known_item", DefaultBookmarkSpecifics(), "fourth_known_item");
+ int64 k4_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "fourth_known_item", DefaultBookmarkSpecifics(), root_server_id);
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+
+ EXPECT_EQ(2, status.num_hierarchy_conflicts())
+ << "The updates with unknown ancestors should be in conflict";
+ EXPECT_EQ(4, status.num_updates_applied())
+ << "The updates with known ancestors should be successfully applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry u1(&trans, syncable::GET_BY_HANDLE, u1_handle);
+ syncable::Entry u2(&trans, syncable::GET_BY_HANDLE, u2_handle);
+ syncable::Entry k1(&trans, syncable::GET_BY_HANDLE, k1_handle);
+ syncable::Entry k2(&trans, syncable::GET_BY_HANDLE, k2_handle);
+ syncable::Entry k3(&trans, syncable::GET_BY_HANDLE, k3_handle);
+ syncable::Entry k4(&trans, syncable::GET_BY_HANDLE, k4_handle);
+ ASSERT_TRUE(u1.good());
+ ASSERT_TRUE(u2.good());
+ ASSERT_TRUE(k1.good());
+ ASSERT_TRUE(k2.good());
+ ASSERT_TRUE(k3.good());
+ ASSERT_TRUE(k4.good());
+ EXPECT_TRUE(u1.GetIsUnappliedUpdate());
+ EXPECT_TRUE(u2.GetIsUnappliedUpdate());
+ EXPECT_FALSE(k1.GetIsUnappliedUpdate());
+ EXPECT_FALSE(k2.GetIsUnappliedUpdate());
+ EXPECT_FALSE(k3.GetIsUnappliedUpdate());
+ EXPECT_FALSE(k4.GetIsUnappliedUpdate());
+ }
+}
+
+// Attempt application of password upates where the passphrase is known.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, DecryptablePassword) {
+ // Decryptable password updates should be applied.
+ Cryptographer* cryptographer;
+ {
+ // Storing the cryptographer separately is bad, but for this test we
+ // know it's safe.
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+ }
+
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ cryptographer->AddKey(params);
+
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::PasswordSpecificsData data;
+ data.set_origin("http://example.com");
+
+ cryptographer->Encrypt(data,
+ specifics.mutable_password()->mutable_encrypted());
+ int64 handle =
+ entry_factory()->CreateUnappliedNewItem("item", specifics, false);
+
+ sessions::StatusController status;
+ ApplyPasswordUpdates(&status);
+
+ EXPECT_EQ(1, status.num_updates_applied())
+ << "The updates that can be decrypted should be applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ EXPECT_FALSE(e.GetIsUnappliedUpdate());
+ EXPECT_FALSE(e.GetIsUnsynced());
+ }
+}
+
+// Attempt application of encrypted items when the passphrase is not known.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, UndecryptableData) {
+ // Undecryptable updates should not be applied.
+ sync_pb::EntitySpecifics encrypted_bookmark;
+ encrypted_bookmark.mutable_encrypted();
+ AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
+ std::string root_server_id = syncable::GetNullId().GetServerId();
+ int64 folder_handle = entry_factory()->CreateUnappliedNewItemWithParent(
+ "folder",
+ encrypted_bookmark,
+ root_server_id);
+ int64 bookmark_handle = entry_factory()->CreateUnappliedNewItem(
+ "item2",
+ encrypted_bookmark,
+ false);
+ sync_pb::EntitySpecifics encrypted_password;
+ encrypted_password.mutable_password();
+ int64 password_handle = entry_factory()->CreateUnappliedNewItem(
+ "item3",
+ encrypted_password,
+ false);
+
+ sessions::StatusController status;
+ ApplyBookmarkUpdates(&status);
+ ApplyPasswordUpdates(&status);
+
+ EXPECT_EQ(3, status.num_encryption_conflicts())
+ << "Updates that can't be decrypted should be in encryption conflict";
+ EXPECT_EQ(0, status.num_updates_applied())
+ << "No update that can't be decrypted should be applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry folder(&trans, syncable::GET_BY_HANDLE, folder_handle);
+ syncable::Entry bm(&trans, syncable::GET_BY_HANDLE, bookmark_handle);
+ syncable::Entry pw(&trans, syncable::GET_BY_HANDLE, password_handle);
+ ASSERT_TRUE(folder.good());
+ ASSERT_TRUE(bm.good());
+ ASSERT_TRUE(pw.good());
+ EXPECT_TRUE(folder.GetIsUnappliedUpdate());
+ EXPECT_TRUE(bm.GetIsUnappliedUpdate());
+ EXPECT_TRUE(pw.GetIsUnappliedUpdate());
+ }
+}
+
+// Test a mix of decryptable and undecryptable updates.
+TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, SomeUndecryptablePassword) {
+ Cryptographer* cryptographer;
+
+ int64 decryptable_handle = -1;
+ int64 undecryptable_handle = -1;
+
+ // Only decryptable password updates should be applied.
+ {
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::PasswordSpecificsData data;
+ data.set_origin("http://example.com/1");
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ cryptographer->AddKey(params);
+
+ cryptographer->Encrypt(data,
+ specifics.mutable_password()->mutable_encrypted());
+ }
+ decryptable_handle =
+ entry_factory()->CreateUnappliedNewItem("item1", specifics, false);
+ }
+ {
+ // Create a new cryptographer, independent of the one in the session.
+ Cryptographer other_cryptographer(cryptographer->encryptor());
+ KeyParams params = {"localhost", "dummy", "bazqux"};
+ other_cryptographer.AddKey(params);
+
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::PasswordSpecificsData data;
+ data.set_origin("http://example.com/2");
+
+ other_cryptographer.Encrypt(data,
+ specifics.mutable_password()->mutable_encrypted());
+ undecryptable_handle =
+ entry_factory()->CreateUnappliedNewItem("item2", specifics, false);
+ }
+
+ sessions::StatusController status;
+ ApplyPasswordUpdates(&status);
+
+ EXPECT_EQ(1, status.num_encryption_conflicts())
+ << "The updates that can't be decrypted should be in encryption "
+ << "conflict";
+ EXPECT_EQ(1, status.num_updates_applied())
+ << "The undecryptable password update shouldn't be applied";
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry e1(&trans, syncable::GET_BY_HANDLE, decryptable_handle);
+ syncable::Entry e2(&trans, syncable::GET_BY_HANDLE, undecryptable_handle);
+ ASSERT_TRUE(e1.good());
+ ASSERT_TRUE(e2.good());
+ EXPECT_FALSE(e1.GetIsUnappliedUpdate());
+ EXPECT_TRUE(e2.GetIsUnappliedUpdate());
+ }
+}
+
+} // namespace syncer
diff --git a/chromium/sync/engine/sync_engine_event.h b/chromium/sync/engine/sync_engine_event.h
index 3328ff367ff..026d3292a7a 100644
--- a/chromium/sync/engine/sync_engine_event.h
+++ b/chromium/sync/engine/sync_engine_event.h
@@ -23,7 +23,7 @@ struct SYNC_EXPORT_PRIVATE SyncEngineEvent {
// Sent on entry of Syncer state machine
SYNC_CYCLE_BEGIN,
- // SyncerCommand generated events.
+ // Sent any time progress is made during a sync cycle.
STATUS_CHANGED,
// We have reached the SYNCER_END state in the main sync loop.
@@ -32,9 +32,6 @@ struct SYNC_EXPORT_PRIVATE SyncEngineEvent {
////////////////////////////////////////////////////////////////
// Generated in response to specific protocol actions or events.
- // New token in updated_token.
- UPDATED_TOKEN,
-
// This is sent after the Syncer (and SyncerThread) have initiated self
// halt due to no longer being permitted to communicate with the server.
// The listener should sever the sync / browser connections and delete sync
diff --git a/chromium/sync/engine/sync_scheduler.h b/chromium/sync/engine/sync_scheduler.h
index b31af827913..aef01182336 100644
--- a/chromium/sync/engine/sync_scheduler.h
+++ b/chromium/sync/engine/sync_scheduler.h
@@ -13,7 +13,6 @@
#include "base/time/time.h"
#include "sync/base/sync_export.h"
#include "sync/engine/nudge_source.h"
-#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/sessions/sync_session.h"
namespace tracked_objects {
@@ -22,6 +21,7 @@ class Location;
namespace syncer {
+class ObjectIdInvalidationMap;
struct ServerConnectionEvent;
struct SYNC_EXPORT_PRIVATE ConfigurationParams {
@@ -30,7 +30,8 @@ struct SYNC_EXPORT_PRIVATE ConfigurationParams {
const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource& source,
ModelTypeSet types_to_download,
const ModelSafeRoutingInfo& routing_info,
- const base::Closure& ready_task);
+ const base::Closure& ready_task,
+ const base::Closure& retry_task);
~ConfigurationParams();
// Source for the configuration.
@@ -41,6 +42,8 @@ struct SYNC_EXPORT_PRIVATE ConfigurationParams {
ModelSafeRoutingInfo routing_info;
// Callback to invoke on configuration completion.
base::Closure ready_task;
+ // Callback to invoke on configuration failure.
+ base::Closure retry_task;
};
class SYNC_EXPORT_PRIVATE SyncScheduler
@@ -71,9 +74,11 @@ class SYNC_EXPORT_PRIVATE SyncScheduler
// Schedules the configuration task specified by |params|. Returns true if
// the configuration task executed immediately, false if it had to be
// scheduled for a later attempt. |params.ready_task| is invoked whenever the
- // configuration task executes.
+ // configuration task executes. |params.retry_task| is invoked once if the
+ // configuration task could not execute. |params.ready_task| will still be
+ // called when configuration finishes.
// Note: must already be in CONFIGURATION mode.
- virtual bool ScheduleConfiguration(const ConfigurationParams& params) = 0;
+ virtual void ScheduleConfiguration(const ConfigurationParams& params) = 0;
// Request that the syncer avoid starting any new tasks and prepare for
// shutdown.
diff --git a/chromium/sync/engine/sync_scheduler_impl.cc b/chromium/sync/engine/sync_scheduler_impl.cc
index 78010d75fe9..0aaaba0f70b 100644
--- a/chromium/sync/engine/sync_scheduler_impl.cc
+++ b/chromium/sync/engine/sync_scheduler_impl.cc
@@ -73,12 +73,15 @@ ConfigurationParams::ConfigurationParams(
const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource& source,
ModelTypeSet types_to_download,
const ModelSafeRoutingInfo& routing_info,
- const base::Closure& ready_task)
+ const base::Closure& ready_task,
+ const base::Closure& retry_task)
: source(source),
types_to_download(types_to_download),
routing_info(routing_info),
- ready_task(ready_task) {
+ ready_task(ready_task),
+ retry_task(retry_task) {
DCHECK(!ready_task.is_null());
+ DCHECK(!retry_task.is_null());
}
ConfigurationParams::~ConfigurationParams() {}
@@ -152,11 +155,7 @@ SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
BackoffDelayProvider* delay_provider,
sessions::SyncSessionContext* context,
Syncer* syncer)
- : weak_ptr_factory_(this),
- weak_ptr_factory_for_weak_handle_(this),
- weak_handle_this_(MakeWeakHandle(
- weak_ptr_factory_for_weak_handle_.GetWeakPtr())),
- name_(name),
+ : name_(name),
started_(false),
syncer_short_poll_interval_seconds_(
TimeDelta::FromSeconds(kDefaultShortPollIntervalSeconds)),
@@ -169,7 +168,11 @@ SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
syncer_(syncer),
session_context_(context),
no_scheduling_allowed_(false),
- do_poll_after_credentials_updated_(false) {
+ do_poll_after_credentials_updated_(false),
+ weak_ptr_factory_(this),
+ weak_ptr_factory_for_weak_handle_(this) {
+ weak_handle_this_ = MakeWeakHandle(
+ weak_ptr_factory_for_weak_handle_.GetWeakPtr());
}
SyncSchedulerImpl::~SyncSchedulerImpl() {
@@ -204,7 +207,7 @@ void SyncSchedulerImpl::OnServerConnectionErrorFixed() {
// 4. A nudge was scheduled + saved while in configuration mode.
//
// In all cases except (2), we want to retry contacting the server. We
- // call DoCanaryJob to achieve this, and note that nothing -- not even a
+ // call TryCanaryJob to achieve this, and note that nothing -- not even a
// canary job -- can bypass a THROTTLED WaitInterval. The only thing that
// has the authority to do that is the Unthrottle timer.
TryCanaryJob();
@@ -234,15 +237,13 @@ void SyncSchedulerImpl::Start(Mode mode) {
CanRunNudgeJobNow(NORMAL_PRIORITY)) {
// We just got back to normal mode. Let's try to run the work that was
// queued up while we were configuring.
- DoNudgeSyncSessionJob(NORMAL_PRIORITY);
+ TrySyncSessionJob(NORMAL_PRIORITY);
}
}
ModelTypeSet SyncSchedulerImpl::GetEnabledAndUnthrottledTypes() {
- ModelTypeSet enabled_types =
- GetRoutingInfoTypes(session_context_->routing_info());
- ModelTypeSet throttled_types =
- nudge_tracker_.GetThrottledTypes();
+ ModelTypeSet enabled_types = session_context_->enabled_types();
+ ModelTypeSet throttled_types = nudge_tracker_.GetThrottledTypes();
return Difference(enabled_types, throttled_types);
}
@@ -274,7 +275,7 @@ void BuildModelSafeParams(
} // namespace.
-bool SyncSchedulerImpl::ScheduleConfiguration(
+void SyncSchedulerImpl::ScheduleConfiguration(
const ConfigurationParams& params) {
DCHECK(CalledOnValidThread());
DCHECK(IsConfigRelatedUpdateSourceValue(params.source));
@@ -296,22 +297,11 @@ bool SyncSchedulerImpl::ScheduleConfiguration(
// Only reconfigure if we have types to download.
if (!params.types_to_download.Empty()) {
pending_configure_params_.reset(new ConfigurationParams(params));
- bool succeeded = DoConfigurationSyncSessionJob(NORMAL_PRIORITY);
-
- // If we failed, the job would have been saved as the pending configure
- // job and a wait interval would have been set.
- if (!succeeded) {
- DCHECK(pending_configure_params_);
- } else {
- DCHECK(!pending_configure_params_);
- }
- return succeeded;
+ TrySyncSessionJob(NORMAL_PRIORITY);
} else {
SDVLOG(2) << "No change in routing info, calling ready task directly.";
params.ready_task.Run();
}
-
- return true;
}
bool SyncSchedulerImpl::CanRunJobNow(JobPriority priority) {
@@ -344,8 +334,7 @@ bool SyncSchedulerImpl::CanRunNudgeJobNow(JobPriority priority) {
return false;
}
- const ModelTypeSet enabled_types =
- GetRoutingInfoTypes(session_context_->routing_info());
+ const ModelTypeSet enabled_types = session_context_->enabled_types();
if (nudge_tracker_.GetThrottledTypes().HasAll(enabled_types)) {
SDVLOG(1) << "Not running a nudge because we're fully type throttled.";
return false;
@@ -393,12 +382,12 @@ void SyncSchedulerImpl::ScheduleInvalidationNudge(
const ObjectIdInvalidationMap& invalidation_map,
const tracked_objects::Location& nudge_location) {
DCHECK(CalledOnValidThread());
- DCHECK(!invalidation_map.empty());
+ DCHECK(!invalidation_map.Empty());
SDVLOG_LOC(nudge_location, 2)
<< "Scheduling sync because we received invalidation for "
- << ModelTypeSetToString(ObjectIdSetToModelTypeSet(
- ObjectIdInvalidationMapToSet(invalidation_map)));
+ << ModelTypeSetToString(
+ ObjectIdSetToModelTypeSet(invalidation_map.GetObjectIds()));
nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
ScheduleNudgeImpl(desired_delay, nudge_location);
}
@@ -428,12 +417,6 @@ void SyncSchedulerImpl::ScheduleNudgeImpl(
if (!CanRunNudgeJobNow(NORMAL_PRIORITY))
return;
- if (!started_) {
- SDVLOG_LOC(nudge_location, 2)
- << "Schedule not started; not running a nudge.";
- return;
- }
-
TimeTicks incoming_run_time = TimeTicks::Now() + delay;
if (!scheduled_nudge_time_.is_null() &&
(scheduled_nudge_time_ < incoming_run_time)) {
@@ -467,8 +450,8 @@ void SyncSchedulerImpl::DoNudgeSyncSessionJob(JobPriority priority) {
DCHECK(CalledOnValidThread());
DCHECK(CanRunNudgeJobNow(priority));
- DVLOG(2) << "Will run normal mode sync cycle with routing info "
- << ModelSafeRoutingInfoToString(session_context_->routing_info());
+ DVLOG(2) << "Will run normal mode sync cycle with types "
+ << ModelTypeSetToString(session_context_->enabled_types());
scoped_ptr<SyncSession> session(SyncSession::Build(session_context_, this));
bool premature_exit = !syncer_->NormalSyncShare(
GetEnabledAndUnthrottledTypes(),
@@ -497,20 +480,25 @@ void SyncSchedulerImpl::DoNudgeSyncSessionJob(JobPriority priority) {
}
}
-bool SyncSchedulerImpl::DoConfigurationSyncSessionJob(JobPriority priority) {
+void SyncSchedulerImpl::DoConfigurationSyncSessionJob(JobPriority priority) {
DCHECK(CalledOnValidThread());
DCHECK_EQ(mode_, CONFIGURATION_MODE);
+ DCHECK(pending_configure_params_ != NULL);
if (!CanRunJobNow(priority)) {
SDVLOG(2) << "Unable to run configure job right now.";
- return false;
+ if (!pending_configure_params_->retry_task.is_null()) {
+ pending_configure_params_->retry_task.Run();
+ pending_configure_params_->retry_task.Reset();
+ }
+ return;
}
- SDVLOG(2) << "Will run configure SyncShare with routes "
- << ModelSafeRoutingInfoToString(session_context_->routing_info());
+ SDVLOG(2) << "Will run configure SyncShare with types "
+ << ModelTypeSetToString(session_context_->enabled_types());
scoped_ptr<SyncSession> session(SyncSession::Build(session_context_, this));
bool premature_exit = !syncer_->ConfigureSyncShare(
- GetRoutingInfoTypes(session_context_->routing_info()),
+ session_context_->enabled_types(),
pending_configure_params_->source,
session.get());
AdjustPolling(FORCE_RESET);
@@ -529,10 +517,14 @@ bool SyncSchedulerImpl::DoConfigurationSyncSessionJob(JobPriority priority) {
// If we're here, then we successfully reached the server. End all backoff.
wait_interval_.reset();
NotifyRetryTime(base::Time());
- return true;
} else {
HandleFailure(session->status_controller().model_neutral_state());
- return false;
+ // Sync cycle might receive response from server that causes scheduler to
+ // stop and draws pending_configure_params_ invalid.
+ if (started_ && !pending_configure_params_->retry_task.is_null()) {
+ pending_configure_params_->retry_task.Run();
+ pending_configure_params_->retry_task.Reset();
+ }
}
}
@@ -566,14 +558,14 @@ void SyncSchedulerImpl::DoPollSyncSessionJob() {
return;
}
- SDVLOG(2) << "Polling with routes "
- << ModelSafeRoutingInfoToString(session_context_->routing_info());
+ SDVLOG(2) << "Polling with types "
+ << ModelTypeSetToString(session_context_->enabled_types());
scoped_ptr<SyncSession> session(SyncSession::Build(session_context_, this));
syncer_->PollSyncShare(
GetEnabledAndUnthrottledTypes(),
session.get());
- AdjustPolling(UPDATE_INTERVAL);
+ AdjustPolling(FORCE_RESET);
if (IsCurrentlyThrottled()) {
SDVLOG(2) << "Poll request got us throttled.";
@@ -600,18 +592,25 @@ void SyncSchedulerImpl::UpdateNudgeTimeRecords(ModelTypeSet types) {
}
}
+TimeDelta SyncSchedulerImpl::GetPollInterval() {
+ return (!session_context_->notifications_enabled() ||
+ !session_context_->ShouldFetchUpdatesBeforeCommit()) ?
+ syncer_short_poll_interval_seconds_ :
+ syncer_long_poll_interval_seconds_;
+}
+
void SyncSchedulerImpl::AdjustPolling(PollAdjustType type) {
DCHECK(CalledOnValidThread());
- TimeDelta poll = (!session_context_->notifications_enabled() ||
- !session_context_->ShouldFetchUpdatesBeforeCommit()) ?
- syncer_short_poll_interval_seconds_ :
- syncer_long_poll_interval_seconds_;
+ TimeDelta poll = GetPollInterval();
bool rate_changed = !poll_timer_.IsRunning() ||
poll != poll_timer_.GetCurrentDelay();
- if (type == FORCE_RESET && !rate_changed)
- poll_timer_.Reset();
+ if (type == FORCE_RESET) {
+ last_poll_reset_ = base::TimeTicks::Now();
+ if (!rate_changed)
+ poll_timer_.Reset();
+ }
if (!rate_changed)
return;
@@ -661,25 +660,61 @@ void SyncSchedulerImpl::Stop() {
// This is the only place where we invoke DoSyncSessionJob with canary
// privileges. Everyone else should use NORMAL_PRIORITY.
void SyncSchedulerImpl::TryCanaryJob() {
- DCHECK(CalledOnValidThread());
+ TrySyncSessionJob(CANARY_PRIORITY);
+}
- if (mode_ == CONFIGURATION_MODE && pending_configure_params_) {
- SDVLOG(2) << "Found pending configure job; will run as canary";
- DoConfigurationSyncSessionJob(CANARY_PRIORITY);
- } else if (mode_ == NORMAL_MODE && nudge_tracker_.IsSyncRequired() &&
- CanRunNudgeJobNow(CANARY_PRIORITY)) {
- SDVLOG(2) << "Found pending nudge job; will run as canary";
- DoNudgeSyncSessionJob(CANARY_PRIORITY);
- } else if (mode_ == NORMAL_MODE && CanRunJobNow(CANARY_PRIORITY) &&
- do_poll_after_credentials_updated_) {
- // Retry poll if poll timer recently fired and ProfileSyncService received
- // fresh access token.
- DoPollSyncSessionJob();
+void SyncSchedulerImpl::TrySyncSessionJob(JobPriority priority) {
+ // Post call to TrySyncSessionJobImpl on current thread. Later request for
+ // access token will be here.
+ base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &SyncSchedulerImpl::TrySyncSessionJobImpl,
+ weak_ptr_factory_.GetWeakPtr(),
+ priority));
+}
+
+void SyncSchedulerImpl::TrySyncSessionJobImpl(JobPriority priority) {
+ DCHECK(CalledOnValidThread());
+ if (mode_ == CONFIGURATION_MODE) {
+ if (pending_configure_params_) {
+ SDVLOG(2) << "Found pending configure job";
+ DoConfigurationSyncSessionJob(priority);
+ }
} else {
- SDVLOG(2) << "Found no work to do; will not run a canary";
+ DCHECK(mode_ == NORMAL_MODE);
+ if (nudge_tracker_.IsSyncRequired() && CanRunNudgeJobNow(priority)) {
+ SDVLOG(2) << "Found pending nudge job";
+ DoNudgeSyncSessionJob(priority);
+ } else if (do_poll_after_credentials_updated_ ||
+ ((base::TimeTicks::Now() - last_poll_reset_) >= GetPollInterval())) {
+ DoPollSyncSessionJob();
+ // Poll timer fires infrequently. Usually by this time access token is
+ // already expired and poll job will fail with auth error. Set flag to
+ // retry poll once ProfileSyncService gets new access token, TryCanaryJob
+ // will be called after access token is retrieved.
+ if (HttpResponse::SYNC_AUTH_ERROR ==
+ session_context_->connection_manager()->server_status()) {
+ do_poll_after_credentials_updated_ = true;
+ }
+ }
+ }
+
+ if (priority == CANARY_PRIORITY) {
+ // If this is canary job then whatever result was don't run poll job till
+ // the next time poll timer fires.
+ do_poll_after_credentials_updated_ = false;
+ }
+
+ if (IsBackingOff() && !pending_wakeup_timer_.IsRunning()) {
+ // If we succeeded, our wait interval would have been cleared. If it hasn't
+ // been cleared, then we should increase our backoff interval and schedule
+ // another retry.
+ TimeDelta length = delay_provider_->GetDelay(wait_interval_->length);
+ wait_interval_.reset(
+ new WaitInterval(WaitInterval::EXPONENTIAL_BACKOFF, length));
+ SDVLOG(2) << "Sync cycle failed. Will back off for "
+ << wait_interval_->length.InMilliseconds() << "ms.";
+ RestartWaiting();
}
- // Don't run poll job till the next time poll timer fires.
- do_poll_after_credentials_updated_ = false;
}
void SyncSchedulerImpl::PollTimerCallback() {
@@ -695,15 +730,7 @@ void SyncSchedulerImpl::PollTimerCallback() {
return;
}
- DoPollSyncSessionJob();
- // Poll timer fires infrequently. Usually by this time access token is already
- // expired and poll job will fail with auth error. Set flag to retry poll once
- // ProfileSyncService gets new access token, TryCanaryJob will be called in
- // this case.
- if (HttpResponse::SYNC_AUTH_ERROR ==
- session_context_->connection_manager()->server_status()) {
- do_poll_after_credentials_updated_ = true;
- }
+ TrySyncSessionJob(NORMAL_PRIORITY);
}
void SyncSchedulerImpl::Unthrottle() {
@@ -740,14 +767,14 @@ void SyncSchedulerImpl::TypeUnthrottle(base::TimeTicks unthrottle_time) {
// Maybe this is a good time to run a nudge job. Let's try it.
if (nudge_tracker_.IsSyncRequired() && CanRunNudgeJobNow(NORMAL_PRIORITY))
- DoNudgeSyncSessionJob(NORMAL_PRIORITY);
+ TrySyncSessionJob(NORMAL_PRIORITY);
}
void SyncSchedulerImpl::PerformDelayedNudge() {
// Circumstances may have changed since we scheduled this delayed nudge.
// We must check to see if it's OK to run the job before we do so.
if (CanRunNudgeJobNow(NORMAL_PRIORITY))
- DoNudgeSyncSessionJob(NORMAL_PRIORITY);
+ TrySyncSessionJob(NORMAL_PRIORITY);
// We're not responsible for setting up any retries here. The functions that
// first put us into a state that prevents successful sync cycles (eg. global
@@ -758,18 +785,6 @@ void SyncSchedulerImpl::PerformDelayedNudge() {
void SyncSchedulerImpl::ExponentialBackoffRetry() {
TryCanaryJob();
-
- if (IsBackingOff()) {
- // If we succeeded, our wait interval would have been cleared. If it hasn't
- // been cleared, then we should increase our backoff interval and schedule
- // another retry.
- TimeDelta length = delay_provider_->GetDelay(wait_interval_->length);
- wait_interval_.reset(
- new WaitInterval(WaitInterval::EXPONENTIAL_BACKOFF, length));
- SDVLOG(2) << "Sync cycle failed. Will back off for "
- << wait_interval_->length.InMilliseconds() << "ms.";
- RestartWaiting();
- }
}
void SyncSchedulerImpl::Notify(SyncEngineEvent::EventCause cause) {
@@ -850,13 +865,6 @@ void SyncSchedulerImpl::OnReceivedClientInvalidationHintBufferSize(int size) {
NOTREACHED() << "Hint buffer size should be > 0.";
}
-void SyncSchedulerImpl::OnShouldStopSyncingPermanently() {
- DCHECK(CalledOnValidThread());
- SDVLOG(2) << "OnShouldStopSyncingPermanently";
- Stop();
- Notify(SyncEngineEvent::STOP_SYNCING_PERMANENTLY);
-}
-
void SyncSchedulerImpl::OnActionableError(
const sessions::SyncSessionSnapshot& snap) {
DCHECK(CalledOnValidThread());
diff --git a/chromium/sync/engine/sync_scheduler_impl.h b/chromium/sync/engine/sync_scheduler_impl.h
index 8492463b530..4c0dd57016b 100644
--- a/chromium/sync/engine/sync_scheduler_impl.h
+++ b/chromium/sync/engine/sync_scheduler_impl.h
@@ -52,7 +52,7 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
virtual ~SyncSchedulerImpl();
virtual void Start(Mode mode) OVERRIDE;
- virtual bool ScheduleConfiguration(
+ virtual void ScheduleConfiguration(
const ConfigurationParams& params) OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void ScheduleLocalNudge(
@@ -87,7 +87,6 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
virtual void OnReceivedSessionsCommitDelay(
const base::TimeDelta& new_delay) OVERRIDE;
virtual void OnReceivedClientInvalidationHintBufferSize(int size) OVERRIDE;
- virtual void OnShouldStopSyncingPermanently() OVERRIDE;
virtual void OnSyncProtocolError(
const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
@@ -159,7 +158,7 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
void DoNudgeSyncSessionJob(JobPriority priority);
// Invoke the syncer to perform a configuration job.
- bool DoConfigurationSyncSessionJob(JobPriority priority);
+ void DoConfigurationSyncSessionJob(JobPriority priority);
// Helper function for Do{Nudge,Configuration}SyncSessionJob.
void HandleFailure(
@@ -168,6 +167,9 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
// Invoke the Syncer to perform a poll job.
void DoPollSyncSessionJob();
+ // Helper function to calculate poll interval.
+ base::TimeDelta GetPollInterval();
+
// Adjusts the poll timer to account for new poll interval, and possibly
// resets the poll interval, depedning on the flag's value.
void AdjustPolling(PollAdjustType type);
@@ -205,6 +207,11 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
// priority.
void TryCanaryJob();
+ // At the moment TrySyncSessionJob just posts call to TrySyncSessionJobImpl on
+ // current thread. In the future it will request access token here.
+ void TrySyncSessionJob(JobPriority priority);
+ void TrySyncSessionJobImpl(JobPriority priority);
+
// Transitions out of the THROTTLED WaitInterval then calls TryCanaryJob().
void Unthrottle();
@@ -239,14 +246,8 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
virtual void OnActionableError(const sessions::SyncSessionSnapshot& snapshot);
- base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_;
-
- // A second factory specially for weak_handle_this_, to allow the handle
- // to be const and alleviate threading concerns.
- base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_for_weak_handle_;
-
// For certain methods that need to worry about X-thread posting.
- const WeakHandle<SyncSchedulerImpl> weak_handle_this_;
+ WeakHandle<SyncSchedulerImpl> weak_handle_this_;
// Used for logging.
const std::string name_;
@@ -316,6 +317,17 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
// after credentials are updated.
bool do_poll_after_credentials_updated_;
+ // TryJob might get called for multiple reasons. It should only call
+ // DoPollSyncSessionJob after some time since the last attempt.
+ // last_poll_reset_ keeps track of when was last attempt.
+ base::TimeTicks last_poll_reset_;
+
+ base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_;
+
+ // A second factory specially for weak_handle_this_, to allow the handle
+ // to be const and alleviate threading concerns.
+ base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_for_weak_handle_;
+
DISALLOW_COPY_AND_ASSIGN(SyncSchedulerImpl);
};
diff --git a/chromium/sync/engine/sync_scheduler_unittest.cc b/chromium/sync/engine/sync_scheduler_unittest.cc
index a5a00e98bcd..e5876554e81 100644
--- a/chromium/sync/engine/sync_scheduler_unittest.cc
+++ b/chromium/sync/engine/sync_scheduler_unittest.cc
@@ -97,7 +97,7 @@ ModelSafeRoutingInfo TypesToRoutingInfo(ModelTypeSet types) {
static const size_t kMinNumSamples = 5;
class SyncSchedulerTest : public testing::Test {
public:
- SyncSchedulerTest() : weak_ptr_factory_(this), syncer_(NULL), delay_(NULL) {}
+ SyncSchedulerTest() : syncer_(NULL), delay_(NULL), weak_ptr_factory_(this) {}
class MockDelayProvider : public BackoffDelayProvider {
public:
@@ -222,7 +222,6 @@ class SyncSchedulerTest : public testing::Test {
}
base::MessageLoop loop_;
- base::WeakPtrFactory<SyncSchedulerTest> weak_ptr_factory_;
TestDirectorySetterUpper dir_maker_;
CancelationSignal cancelation_signal_;
scoped_ptr<MockConnectionManager> connection_;
@@ -233,6 +232,7 @@ class SyncSchedulerTest : public testing::Test {
std::vector<scoped_refptr<FakeModelWorker> > workers_;
scoped_refptr<ExtensionsActivity> extensions_activity_;
ModelSafeRoutingInfo routing_info_;
+ base::WeakPtrFactory<SyncSchedulerTest> weak_ptr_factory_;
};
void RecordSyncShareImpl(SyncShareTimes* times) {
@@ -256,6 +256,10 @@ ACTION_P2(RecordSyncShareMultiple, times, quit_after) {
return true;
}
+ACTION_P(StopScheduler, scheduler) {
+ scheduler->Stop();
+}
+
ACTION(AddFailureAndQuitLoopNow) {
ADD_FAILURE();
QuitLoopNow();
@@ -307,14 +311,18 @@ TEST_F(SyncSchedulerTest, Config) {
StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
model_types,
TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- ASSERT_TRUE(scheduler()->ScheduleConfiguration(params));
- ASSERT_EQ(1, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ PumpLoop();
+ ASSERT_EQ(1, ready_counter.times_called());
+ ASSERT_EQ(0, retry_counter.times_called());
}
// Simulate a failure and make sure the config request is retried.
@@ -329,16 +337,28 @@ TEST_F(SyncSchedulerTest, ConfigWithBackingOff) {
EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
+ RecordSyncShare(&times)))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
RecordSyncShare(&times)));
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
model_types,
TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- ASSERT_FALSE(scheduler()->ScheduleConfiguration(params));
- ASSERT_EQ(0, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ RunLoop();
+ ASSERT_EQ(0, ready_counter.times_called());
+ ASSERT_EQ(1, retry_counter.times_called());
+
+ // RunLoop() will trigger TryCanaryJob which will retry configuration.
+ // Since retry_task was already called it shouldn't be called again.
+ RunLoop();
+ ASSERT_EQ(0, ready_counter.times_called());
+ ASSERT_EQ(1, retry_counter.times_called());
Mock::VerifyAndClearExpectations(syncer());
@@ -347,7 +367,39 @@ TEST_F(SyncSchedulerTest, ConfigWithBackingOff) {
RecordSyncShare(&times)));
RunLoop();
- ASSERT_EQ(1, counter.times_called());
+ ASSERT_EQ(1, ready_counter.times_called());
+}
+
+// Simuilate SyncSchedulerImpl::Stop being called in the middle of Configure.
+// This can happen if server returns NOT_MY_BIRTHDAY.
+TEST_F(SyncSchedulerTest, ConfigWithStop) {
+ UseMockDelayProvider();
+ EXPECT_CALL(*delay(), GetDelay(_))
+ .WillRepeatedly(Return(TimeDelta::FromMilliseconds(1)));
+ SyncShareTimes times;
+ const ModelTypeSet model_types(BOOKMARKS);
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+
+ // Make ConfigureSyncShare call scheduler->Stop(). It is not supposed to call
+ // retry_task or dereference configuration params.
+ EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
+ StopScheduler(scheduler()),
+ RecordSyncShare(&times)));
+
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
+ ConfigurationParams params(
+ GetUpdatesCallerInfo::RECONFIGURATION,
+ model_types,
+ TypesToRoutingInfo(model_types),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ PumpLoop();
+ ASSERT_EQ(0, ready_counter.times_called());
+ ASSERT_EQ(0, retry_counter.times_called());
}
// Issue a nudge when the config has failed. Make sure both the config and
@@ -365,14 +417,18 @@ TEST_F(SyncSchedulerTest, NudgeWithConfigWithBackingOff) {
EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
RecordSyncShare(&times)));
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
model_types,
TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- ASSERT_FALSE(scheduler()->ScheduleConfiguration(params));
- ASSERT_EQ(0, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ RunLoop();
+ ASSERT_EQ(0, ready_counter.times_called());
+ ASSERT_EQ(1, retry_counter.times_called());
Mock::VerifyAndClearExpectations(syncer());
// Ask for a nudge while dealing with repeated configure failure.
@@ -385,7 +441,7 @@ TEST_F(SyncSchedulerTest, NudgeWithConfigWithBackingOff) {
// for the first retry attempt from the config job (after
// waiting ~+/- 50ms).
Mock::VerifyAndClearExpectations(syncer());
- ASSERT_EQ(0, counter.times_called());
+ ASSERT_EQ(0, ready_counter.times_called());
// Let the next configure retry succeed.
EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
@@ -398,6 +454,7 @@ TEST_F(SyncSchedulerTest, NudgeWithConfigWithBackingOff) {
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
RecordSyncShare(&times)));
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ PumpLoop();
}
// Test that nudges are coalesced.
@@ -599,14 +656,19 @@ TEST_F(SyncSchedulerTest, ThrottlingDoesThrottle) {
StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
types,
TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- ASSERT_FALSE(scheduler()->ScheduleConfiguration(params));
- ASSERT_EQ(0, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ PumpLoop();
+ ASSERT_EQ(0, ready_counter.times_called());
+ ASSERT_EQ(1, retry_counter.times_called());
+
}
TEST_F(SyncSchedulerTest, ThrottlingExpiresFromPoll) {
@@ -655,7 +717,8 @@ TEST_F(SyncSchedulerTest, ThrottlingExpiresFromNudge) {
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
scheduler()->ScheduleLocalNudge(zero(), types, FROM_HERE);
- PumpLoop();
+ PumpLoop(); // To get PerformDelayedNudge called.
+ PumpLoop(); // To get TrySyncSessionJob called
EXPECT_TRUE(scheduler()->IsCurrentlyThrottled());
RunLoop();
EXPECT_FALSE(scheduler()->IsCurrentlyThrottled());
@@ -682,14 +745,18 @@ TEST_F(SyncSchedulerTest, ThrottlingExpiresFromConfigure) {
const ModelTypeSet types(BOOKMARKS);
StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
types,
TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- EXPECT_FALSE(scheduler()->ScheduleConfiguration(params));
- EXPECT_EQ(0, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ PumpLoop();
+ EXPECT_EQ(0, ready_counter.times_called());
+ EXPECT_EQ(1, retry_counter.times_called());
EXPECT_TRUE(scheduler()->IsCurrentlyThrottled());
RunLoop();
@@ -719,7 +786,8 @@ TEST_F(SyncSchedulerTest, TypeThrottlingBlocksNudge) {
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
scheduler()->ScheduleLocalNudge(zero(), types, FROM_HERE);
- PumpLoop();
+ PumpLoop(); // To get PerformDelayedNudge called.
+ PumpLoop(); // To get TrySyncSessionJob called
EXPECT_TRUE(GetThrottledTypes().HasAll(types));
// This won't cause a sync cycle because the types are throttled.
@@ -753,7 +821,8 @@ TEST_F(SyncSchedulerTest, TypeThrottlingDoesBlockOtherSources) {
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
scheduler()->ScheduleLocalNudge(zero(), throttled_types, FROM_HERE);
- PumpLoop();
+ PumpLoop(); // To get PerformDelayedNudge called.
+ PumpLoop(); // To get TrySyncSessionJob called
EXPECT_TRUE(GetThrottledTypes().HasAll(throttled_types));
// Ignore invalidations for throttled types.
@@ -797,14 +866,19 @@ TEST_F(SyncSchedulerTest, ConfigurationMode) {
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureSuccess),
RecordSyncShare(&times)))
.RetiresOnSaturation();
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
config_types,
TypesToRoutingInfo(config_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- ASSERT_TRUE(scheduler()->ScheduleConfiguration(params));
- ASSERT_EQ(1, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ RunLoop();
+ ASSERT_EQ(1, ready_counter.times_called());
+ ASSERT_EQ(0, retry_counter.times_called());
+
Mock::VerifyAndClearExpectations(syncer());
// Switch to NORMAL_MODE to ensure NUDGES were properly saved and run.
@@ -819,7 +893,8 @@ TEST_F(SyncSchedulerTest, ConfigurationMode) {
context()->set_routing_info(routing_info());
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- PumpLoop();
+ RunLoop();
+ Mock::VerifyAndClearExpectations(syncer());
}
class BackoffTriggersSyncSchedulerTest : public SyncSchedulerTest {
@@ -895,12 +970,14 @@ TEST_F(BackoffTriggersSyncSchedulerTest, FailGetEncryptionKey) {
StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
ModelTypeSet types(BOOKMARKS);
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
types,
TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
scheduler()->ScheduleConfiguration(params);
RunLoop();
@@ -947,14 +1024,19 @@ TEST_F(SyncSchedulerTest, BackoffDropsJobs) {
StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
types,
TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
- ASSERT_FALSE(scheduler()->ScheduleConfiguration(params));
- ASSERT_EQ(0, counter.times_called());
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
+ scheduler()->ScheduleConfiguration(params);
+ PumpLoop();
+ ASSERT_EQ(0, ready_counter.times_called());
+ ASSERT_EQ(1, retry_counter.times_called());
+
}
// Test that backoff is shaping traffic properly with consecutive errors.
@@ -1112,8 +1194,8 @@ TEST_F(SyncSchedulerTest, ServerConnectionChangeDuringBackoff) {
Return(true)));
scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
-
- PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
+ PumpLoop(); // To get PerformDelayedNudge called.
+ PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
ASSERT_TRUE(scheduler()->IsBackingOff());
// Before we run the scheduled canary, trigger a server connection change.
@@ -1145,11 +1227,13 @@ TEST_F(SyncSchedulerTest, ConnectionChangeCanaryPreemptedByNudge) {
scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
- PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
+ PumpLoop(); // To get PerformDelayedNudge called.
+ PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
ASSERT_TRUE(scheduler()->IsBackingOff());
// Before we run the scheduled canary, trigger a server connection change.
scheduler()->OnConnectionStatusChange();
+ PumpLoop();
connection()->SetServerReachable();
connection()->UpdateConnectionStatus();
scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
@@ -1168,12 +1252,14 @@ TEST_F(SyncSchedulerTest, DoubleCanaryInConfigure) {
connection()->UpdateConnectionStatus();
ModelTypeSet model_types(BOOKMARKS);
- CallbackCounter counter;
+ CallbackCounter ready_counter;
+ CallbackCounter retry_counter;
ConfigurationParams params(
GetUpdatesCallerInfo::RECONFIGURATION,
model_types,
TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&counter)));
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
+ base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
scheduler()->ScheduleConfiguration(params);
scheduler()->OnConnectionStatusChange();
@@ -1206,6 +1292,7 @@ TEST_F(SyncSchedulerTest, PollFromCanaryAfterAuthError) {
RecordSyncShare(&times)));
scheduler()->OnCredentialsUpdated();
connection()->SetServerStatus(HttpResponse::SERVER_CONNECTION_OK);
+ RunLoop();
StopSyncScheduler();
}
diff --git a/chromium/sync/engine/syncer.cc b/chromium/sync/engine/syncer.cc
index cc379d9d82b..13e1f792618 100644
--- a/chromium/sync/engine/syncer.cc
+++ b/chromium/sync/engine/syncer.cc
@@ -11,18 +11,16 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "sync/engine/apply_control_data_updates.h"
-#include "sync/engine/apply_updates_and_resolve_conflicts_command.h"
-#include "sync/engine/build_commit_command.h"
#include "sync/engine/commit.h"
#include "sync/engine/conflict_resolver.h"
#include "sync/engine/download.h"
#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/process_commit_response_command.h"
#include "sync/engine/syncer_types.h"
#include "sync/internal_api/public/base/cancelation_signal.h"
#include "sync/internal_api/public/base/unique_position.h"
#include "sync/internal_api/public/util/syncer_error.h"
#include "sync/sessions/nudge_tracker.h"
+#include "sync/syncable/directory.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable-inl.h"
@@ -62,8 +60,9 @@ bool Syncer::NormalSyncShare(ModelTypeSet request_types,
if (nudge_tracker.IsGetUpdatesRequired() ||
session->context()->ShouldFetchUpdatesBeforeCommit()) {
if (!DownloadAndApplyUpdates(
+ request_types,
session,
- base::Bind(&BuildNormalDownloadUpdates,
+ base::Bind(&download::BuildNormalDownloadUpdates,
session,
kCreateMobileBookmarksFolder,
request_types,
@@ -73,7 +72,7 @@ bool Syncer::NormalSyncShare(ModelTypeSet request_types,
}
VLOG(1) << "Committing from types " << ModelTypeSetToString(request_types);
- SyncerError commit_result = BuildAndPostCommits(request_types, this, session);
+ SyncerError commit_result = BuildAndPostCommits(request_types, session);
session->mutable_status_controller()->set_commit_result(commit_result);
return HandleCycleEnd(session, nudge_tracker.updates_source());
@@ -86,8 +85,9 @@ bool Syncer::ConfigureSyncShare(
HandleCycleBegin(session);
VLOG(1) << "Configuring types " << ModelTypeSetToString(request_types);
DownloadAndApplyUpdates(
+ request_types,
session,
- base::Bind(&BuildDownloadUpdatesForConfigure,
+ base::Bind(&download::BuildDownloadUpdatesForConfigure,
session,
kCreateMobileBookmarksFolder,
source,
@@ -100,8 +100,9 @@ bool Syncer::PollSyncShare(ModelTypeSet request_types,
HandleCycleBegin(session);
VLOG(1) << "Polling types " << ModelTypeSetToString(request_types);
DownloadAndApplyUpdates(
+ request_types,
session,
- base::Bind(&BuildDownloadUpdatesForPoll,
+ base::Bind(&download::BuildDownloadUpdatesForPoll,
session,
kCreateMobileBookmarksFolder,
request_types));
@@ -111,10 +112,13 @@ bool Syncer::PollSyncShare(ModelTypeSet request_types,
void Syncer::ApplyUpdates(SyncSession* session) {
TRACE_EVENT0("sync", "ApplyUpdates");
- ApplyControlDataUpdates(session);
+ ApplyControlDataUpdates(session->context()->directory());
- ApplyUpdatesAndResolveConflictsCommand apply_updates;
- apply_updates.Execute(session);
+ UpdateHandlerMap* handler_map = session->context()->update_handler_map();
+ for (UpdateHandlerMap::iterator it = handler_map->begin();
+ it != handler_map->end(); ++it) {
+ it->second->ApplyUpdates(session->mutable_status_controller());
+ }
session->context()->set_hierarchy_conflict_detected(
session->status_controller().num_hierarchy_conflicts() > 0);
@@ -123,27 +127,63 @@ void Syncer::ApplyUpdates(SyncSession* session) {
}
bool Syncer::DownloadAndApplyUpdates(
+ ModelTypeSet request_types,
SyncSession* session,
base::Callback<void(sync_pb::ClientToServerMessage*)> build_fn) {
- while (!session->status_controller().ServerSaysNothingMoreToDownload()) {
+ SyncerError download_result = UNSET;
+ do {
TRACE_EVENT0("sync", "DownloadUpdates");
sync_pb::ClientToServerMessage msg;
build_fn.Run(&msg);
- SyncerError download_result = ExecuteDownloadUpdates(session, &msg);
+ download_result =
+ download::ExecuteDownloadUpdates(request_types, session, &msg);
session->mutable_status_controller()->set_last_download_updates_result(
download_result);
- if (download_result != SYNCER_OK) {
- return false;
- }
- }
+ } while (download_result == SERVER_MORE_TO_DOWNLOAD);
+
+ // Exit without applying if we're shutting down or an error was detected.
+ if (download_result != SYNCER_OK)
+ return false;
if (ExitRequested())
return false;
+
ApplyUpdates(session);
if (ExitRequested())
return false;
return true;
}
+SyncerError Syncer::BuildAndPostCommits(ModelTypeSet requested_types,
+ sessions::SyncSession* session) {
+ // The ExitRequested() check is unnecessary, since we should start getting
+ // errors from the ServerConnectionManager if an exist has been requested.
+ // However, it doesn't hurt to check it anyway.
+ while (!ExitRequested()) {
+ scoped_ptr<Commit> commit(
+ Commit::Init(
+ requested_types,
+ session->context()->max_commit_batch_size(),
+ session->context()->account_name(),
+ session->context()->directory()->cache_guid(),
+ session->context()->commit_contributor_map(),
+ session->context()->extensions_activity()));
+ if (!commit) {
+ break;
+ }
+
+ SyncerError error = commit->PostAndProcessResponse(
+ session,
+ session->mutable_status_controller(),
+ session->context()->extensions_activity());
+ commit->CleanUp();
+ if (error != SYNCER_OK) {
+ return error;
+ }
+ }
+
+ return SYNCER_OK;
+}
+
void Syncer::HandleCycleBegin(SyncSession* session) {
session->mutable_status_controller()->UpdateStartTime();
session->SendEventNotification(SyncEngineEvent::SYNC_CYCLE_BEGIN);
diff --git a/chromium/sync/engine/syncer.h b/chromium/sync/engine/syncer.h
index 132f6ef7b0c..6154f910447 100644
--- a/chromium/sync/engine/syncer.h
+++ b/chromium/sync/engine/syncer.h
@@ -23,16 +23,15 @@ namespace syncer {
class CancelationSignal;
-// A Syncer provides a control interface for driving the individual steps
-// of the sync cycle. Each cycle (hopefully) moves the client into closer
-// synchronization with the server. The individual steps are modeled
-// as SyncerCommands, and the ordering of the steps is expressed using
-// the SyncerStep enum.
+// A Syncer provides a control interface for driving the sync cycle. These
+// cycles consist of downloading updates, parsing the response (aka. process
+// updates), applying updates while resolving conflicts, and committing local
+// changes. Some of these steps may be skipped if they're deemed to be
+// unnecessary.
//
-// A Syncer instance expects to run on a dedicated thread. Calls
-// to SyncShare() may take an unbounded amount of time, as SyncerCommands
-// may block on network i/o, on lock contention, or on tasks posted to
-// other threads.
+// A Syncer instance expects to run on a dedicated thread. Calls to SyncShare()
+// may take an unbounded amount of time because it may block on network I/O, on
+// lock contention, or on tasks posted to other threads.
class SYNC_EXPORT_PRIVATE Syncer {
public:
typedef std::vector<int64> UnsyncedMetaHandles;
@@ -70,9 +69,18 @@ class SYNC_EXPORT_PRIVATE Syncer {
private:
void ApplyUpdates(sessions::SyncSession* session);
bool DownloadAndApplyUpdates(
+ ModelTypeSet request_types,
sessions::SyncSession* session,
base::Callback<void(sync_pb::ClientToServerMessage*)> build_fn);
+ // This function will commit batches of unsynced items to the server until the
+ // number of unsynced and ready to commit items reaches zero or an error is
+ // encountered. A request to exit early will be treated as an error and will
+ // abort any blocking operations.
+ SyncerError BuildAndPostCommits(
+ ModelTypeSet request_types,
+ sessions::SyncSession* session);
+
void HandleCycleBegin(sessions::SyncSession* session);
bool HandleCycleEnd(
sessions::SyncSession* session,
diff --git a/chromium/sync/engine/syncer_command.cc b/chromium/sync/engine/syncer_command.cc
deleted file mode 100644
index 5dfd8215c32..00000000000
--- a/chromium/sync/engine/syncer_command.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/syncer_command.h"
-
-namespace syncer {
-
-SyncerCommand::SyncerCommand() {}
-SyncerCommand::~SyncerCommand() {}
-
-SyncerError SyncerCommand::Execute(sessions::SyncSession* session) {
- SyncerError result = ExecuteImpl(session);
- return result;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer_command.h b/chromium/sync/engine/syncer_command.h
deleted file mode 100644
index 303aad53907..00000000000
--- a/chromium/sync/engine/syncer_command.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNCER_COMMAND_H_
-#define SYNC_ENGINE_SYNCER_COMMAND_H_
-
-#include "base/basictypes.h"
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-
-namespace syncer {
-
-namespace sessions {
-class SyncSession;
-}
-
-// Implementation of a simple command pattern intended to be driven by the
-// Syncer. SyncerCommand is abstract and all subclasses must implement
-// ExecuteImpl(). This is done so that chunks of syncer operation can be unit
-// tested.
-//
-// Example Usage:
-//
-// SyncSession session = ...;
-// SyncerCommand *cmd = SomeCommandFactory.createCommand(...);
-// cmd->Execute(session);
-// delete cmd;
-
-class SYNC_EXPORT_PRIVATE SyncerCommand {
- public:
- SyncerCommand();
- virtual ~SyncerCommand();
-
- // Execute dispatches to a derived class's ExecuteImpl.
- SyncerError Execute(sessions::SyncSession* session);
-
- // ExecuteImpl is where derived classes actually do work.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) = 0;
- private:
- DISALLOW_COPY_AND_ASSIGN(SyncerCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNCER_COMMAND_H_
diff --git a/chromium/sync/engine/syncer_proto_util.cc b/chromium/sync/engine/syncer_proto_util.cc
index 863a8c44386..bfd8151bef0 100644
--- a/chromium/sync/engine/syncer_proto_util.cc
+++ b/chromium/sync/engine/syncer_proto_util.cc
@@ -276,13 +276,6 @@ bool SyncerProtoUtil::PostAndProcessHeaders(ServerConnectionManager* scm,
return false;
}
- std::string new_token = params.response.update_client_auth_header;
- if (!new_token.empty()) {
- SyncEngineEvent event(SyncEngineEvent::UPDATED_TOKEN);
- event.updated_token = new_token;
- session->context()->NotifyListeners(event);
- }
-
if (response->ParseFromString(params.buffer_out)) {
// TODO(tim): This is an egregious layering violation (bug 35060).
switch (response->error_code()) {
diff --git a/chromium/sync/engine/syncer_proto_util_unittest.cc b/chromium/sync/engine/syncer_proto_util_unittest.cc
index ff37e56a51d..39f4fddbd47 100644
--- a/chromium/sync/engine/syncer_proto_util_unittest.cc
+++ b/chromium/sync/engine/syncer_proto_util_unittest.cc
@@ -45,7 +45,6 @@ class MockDelegate : public sessions::SyncSession::Delegate {
MOCK_METHOD1(OnReceivedSessionsCommitDelay, void(const base::TimeDelta&));
MOCK_METHOD1(OnReceivedClientInvalidationHintBufferSize, void(int));
MOCK_METHOD1(OnSyncProtocolError, void(const sessions::SyncSessionSnapshot&));
- MOCK_METHOD0(OnShouldStopSyncingPermanently, void());
MOCK_METHOD1(OnSilencedUntil, void(const base::TimeTicks&));
};
@@ -256,7 +255,7 @@ TEST_F(SyncerProtoUtilTest, AddRequestBirthday) {
class DummyConnectionManager : public ServerConnectionManager {
public:
DummyConnectionManager(CancelationSignal* signal)
- : ServerConnectionManager("unused", 0, false, false, signal),
+ : ServerConnectionManager("unused", 0, false, signal),
send_error_(false),
access_denied_(false) {}
diff --git a/chromium/sync/engine/syncer_types.h b/chromium/sync/engine/syncer_types.h
index ada4d82f96f..36f3dbcf8dd 100644
--- a/chromium/sync/engine/syncer_types.h
+++ b/chromium/sync/engine/syncer_types.h
@@ -45,26 +45,6 @@ enum UpdateAttemptResponse {
CONFLICT_SIMPLE
};
-enum ServerUpdateProcessingResult {
- // Success. Update applied and stored in SERVER_* fields or dropped if
- // irrelevant.
- SUCCESS_PROCESSED,
-
- // Success. Update details stored in SERVER_* fields, but wasn't applied.
- SUCCESS_STORED,
-
- // Update is illegally inconsistent with earlier updates. e.g. A bookmark
- // becoming a folder.
- FAILED_INCONSISTENT,
-
- // Update is illegal when considered alone. e.g. broken UTF-8 in the name.
- FAILED_CORRUPT,
-
- // Only used by VerifyUpdate. Indicates that an update is valid. As
- // VerifyUpdate cannot return SUCCESS_STORED, we reuse the value.
- SUCCESS_VALID = SUCCESS_STORED
-};
-
// Different results from the verify phase will yield different methods of
// processing in the ProcessUpdates phase. The SKIP result means the entry
// doesn't go to the ProcessUpdates phase.
diff --git a/chromium/sync/engine/syncer_unittest.cc b/chromium/sync/engine/syncer_unittest.cc
index b1a96463536..19aff7c3b11 100644
--- a/chromium/sync/engine/syncer_unittest.cc
+++ b/chromium/sync/engine/syncer_unittest.cc
@@ -25,7 +25,6 @@
#include "build/build_config.h"
#include "sync/engine/get_commit_ids.h"
#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/process_updates_command.h"
#include "sync/engine/sync_scheduler_impl.h"
#include "sync/engine/syncer.h"
#include "sync/engine/syncer_proto_util.h"
@@ -51,6 +50,7 @@
#include "sync/test/engine/test_syncable_utils.h"
#include "sync/test/fake_encryptor.h"
#include "sync/test/fake_sync_encryption_handler.h"
+#include "sync/test/sessions/mock_debug_info_getter.h"
#include "sync/util/cryptographer.h"
#include "sync/util/extensions_activity.h"
#include "sync/util/time.h"
@@ -105,6 +105,7 @@ using syncable::SPECIFICS;
using syncable::SYNCING;
using syncable::UNITTEST;
+using sessions::MockDebugInfoGetter;
using sessions::StatusController;
using sessions::SyncSessionContext;
using sessions::SyncSession;
@@ -149,8 +150,6 @@ class SyncerTest : public testing::Test,
int size) OVERRIDE {
last_client_invalidation_hint_buffer_size_ = size;
}
- virtual void OnShouldStopSyncingPermanently() OVERRIDE {
- }
virtual void OnSyncProtocolError(
const sessions::SyncSessionSnapshot& snapshot) OVERRIDE {
}
@@ -182,23 +181,27 @@ class SyncerTest : public testing::Test,
saw_syncer_event_ = true;
}
- void SyncShareNudge() {
+ void ResetSession() {
session_.reset(SyncSession::Build(context_.get(), this));
+ }
+
+ void SyncShareNudge() {
+ ResetSession();
// Pretend we've seen a local change, to make the nudge_tracker look normal.
nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
EXPECT_TRUE(
syncer_->NormalSyncShare(
- GetRoutingInfoTypes(context_->routing_info()),
+ context_->enabled_types(),
nudge_tracker_,
session_.get()));
}
void SyncShareConfigure() {
- session_.reset(SyncSession::Build(context_.get(), this));
+ ResetSession();
EXPECT_TRUE(syncer_->ConfigureSyncShare(
- GetRoutingInfoTypes(context_->routing_info()),
+ context_->enabled_types(),
sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
session_.get()));
}
@@ -207,6 +210,7 @@ class SyncerTest : public testing::Test,
dir_maker_.SetUp();
mock_server_.reset(new MockConnectionManager(directory(),
&cancelation_signal_));
+ debug_info_getter_.reset(new MockDebugInfoGetter);
EnableDatatype(BOOKMARKS);
EnableDatatype(NIGORI);
EnableDatatype(PREFERENCES);
@@ -225,7 +229,7 @@ class SyncerTest : public testing::Test,
new SyncSessionContext(
mock_server_.get(), directory(), workers,
extensions_activity_,
- listeners, NULL, &traffic_recorder_,
+ listeners, debug_info_getter_.get(), &traffic_recorder_,
true, // enable keystore encryption
false, // force enable pre-commit GU avoidance experiment
"fake_invalidator_client_id"));
@@ -395,35 +399,6 @@ class SyncerTest : public testing::Test,
}
}
- void DoTruncationTest(const vector<int64>& unsynced_handle_view,
- const vector<int64>& expected_handle_order) {
- for (size_t limit = expected_handle_order.size() + 2; limit > 0; --limit) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
-
- ModelSafeRoutingInfo routes;
- GetModelSafeRoutingInfo(&routes);
- ModelTypeSet types = GetRoutingInfoTypes(routes);
- sessions::OrderedCommitSet output_set(routes);
- GetCommitIds(&wtrans, types, limit, &output_set);
- size_t truncated_size = std::min(limit, expected_handle_order.size());
- ASSERT_EQ(truncated_size, output_set.Size());
- for (size_t i = 0; i < truncated_size; ++i) {
- ASSERT_EQ(expected_handle_order[i], output_set.GetCommitHandleAt(i))
- << "At index " << i << " with batch size limited to " << limit;
- }
- sessions::OrderedCommitSet::Projection proj;
- proj = output_set.GetCommitIdProjection(GROUP_PASSIVE);
- ASSERT_EQ(truncated_size, proj.size());
- for (size_t i = 0; i < truncated_size; ++i) {
- SCOPED_TRACE(::testing::Message("Projection mismatch with i = ") << i);
- int64 projected = output_set.GetCommitHandleAt(proj[i]);
- ASSERT_EQ(expected_handle_order[proj[i]], projected);
- // Since this projection is the identity, the following holds.
- ASSERT_EQ(expected_handle_order[i], projected);
- }
- }
- }
-
const StatusController& status() {
return session_->status_controller();
}
@@ -490,6 +465,18 @@ class SyncerTest : public testing::Test,
return directory()->GetCryptographer(trans);
}
+ // Configures SyncSessionContext and NudgeTracker so Syncer won't call
+ // GetUpdates prior to Commit. This method can be used to ensure a Commit is
+ // not preceeded by GetUpdates.
+ void ConfigureNoGetUpdatesRequired() {
+ context_->set_server_enabled_pre_commit_update_avoidance(true);
+ nudge_tracker_.OnInvalidationsEnabled();
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+
+ ASSERT_FALSE(context_->ShouldFetchUpdatesBeforeCommit());
+ ASSERT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
+ }
+
base::MessageLoop message_loop_;
// Some ids to aid tests. Only the root one's value is specific. The rest
@@ -522,6 +509,7 @@ class SyncerTest : public testing::Test,
ModelTypeSet enabled_datatypes_;
TrafficRecorder traffic_recorder_;
sessions::NudgeTracker nudge_tracker_;
+ scoped_ptr<MockDebugInfoGetter> debug_info_getter_;
DISALLOW_COPY_AND_ASSIGN(SyncerTest);
};
@@ -540,64 +528,6 @@ TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
// regression for a very old bug.
}
-TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
- syncable::Id root = ids_.root();
- // Create two server entries.
- mock_server_->AddUpdateDirectory(ids_.MakeServer("x"), root, "X", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(ids_.MakeServer("w"), root, "W", 10, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
-
- // Create some new client entries.
- CreateUnsyncedDirectory("C", ids_.MakeLocal("c"));
- CreateUnsyncedDirectory("B", ids_.MakeLocal("b"));
- CreateUnsyncedDirectory("D", ids_.MakeLocal("d"));
- CreateUnsyncedDirectory("E", ids_.MakeLocal("e"));
- CreateUnsyncedDirectory("J", ids_.MakeLocal("j"));
-
- vector<int64> expected_order;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry_x(&wtrans, GET_BY_ID, ids_.MakeServer("x"));
- MutableEntry entry_b(&wtrans, GET_BY_ID, ids_.MakeLocal("b"));
- MutableEntry entry_c(&wtrans, GET_BY_ID, ids_.MakeLocal("c"));
- MutableEntry entry_d(&wtrans, GET_BY_ID, ids_.MakeLocal("d"));
- MutableEntry entry_e(&wtrans, GET_BY_ID, ids_.MakeLocal("e"));
- MutableEntry entry_w(&wtrans, GET_BY_ID, ids_.MakeServer("w"));
- MutableEntry entry_j(&wtrans, GET_BY_ID, ids_.MakeLocal("j"));
- entry_x.PutIsUnsynced(true);
- entry_b.PutParentId(entry_x.GetId());
- entry_d.PutParentId(entry_b.GetId());
- entry_c.PutParentId(entry_x.GetId());
- entry_c.PutPredecessor(entry_b.GetId());
- entry_e.PutParentId(entry_c.GetId());
- entry_w.PutPredecessor(entry_x.GetId());
- entry_w.PutIsUnsynced(true);
- entry_w.PutServerVersion(20);
- entry_w.PutIsUnappliedUpdate(true); // Fake a conflict.
- entry_j.PutPredecessor(entry_w.GetId());
-
- // The expected order is "x", "b", "c", "d", "e", "j", truncated
- // appropriately.
- expected_order.push_back(entry_x.GetMetahandle());
- expected_order.push_back(entry_b.GetMetahandle());
- expected_order.push_back(entry_c.GetMetahandle());
- expected_order.push_back(entry_d.GetMetahandle());
- expected_order.push_back(entry_e.GetMetahandle());
- expected_order.push_back(entry_j.GetMetahandle());
- }
-
- // The arrangement is now: x (b (d) c (e)) w j
- // Entry "w" is in conflict, so it is not eligible for commit.
- vector<int64> unsynced_handle_view;
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- GetUnsyncedEntries(&rtrans, &unsynced_handle_view);
- }
- DoTruncationTest(unsynced_handle_view, expected_order);
-}
-
TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
const ModelTypeSet throttled_types(BOOKMARKS);
sync_pb::EntitySpecifics bookmark_data;
@@ -617,9 +547,11 @@ TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
}
// Now sync without enabling bookmarks.
+ mock_server_->ExpectGetUpdatesRequestTypes(
+ Difference(context_->enabled_types(), ModelTypeSet(BOOKMARKS)));
+ ResetSession();
syncer_->NormalSyncShare(
- Difference(GetRoutingInfoTypes(context_->routing_info()),
- ModelTypeSet(BOOKMARKS)),
+ Difference(context_->enabled_types(), ModelTypeSet(BOOKMARKS)),
nudge_tracker_,
session_.get());
@@ -632,10 +564,7 @@ TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
}
// Sync again with bookmarks enabled.
- syncer_->NormalSyncShare(
- GetRoutingInfoTypes(context_->routing_info()),
- nudge_tracker_,
- session_.get());
+ mock_server_->ExpectGetUpdatesRequestTypes(context_->enabled_types());
SyncShareNudge();
{
// It should have been committed.
@@ -2605,6 +2534,131 @@ TEST_F(SyncerTest, CommitManyItemsInOneGo_CommitConflict) {
directory()->unsynced_entity_count());
}
+// Tests that sending debug info events works.
+TEST_F(SyncerTest, SendDebugInfoEventsOnGetUpdates_HappyCase) {
+ debug_info_getter_->AddDebugEvent();
+ debug_info_getter_->AddDebugEvent();
+
+ SyncShareNudge();
+
+ // Verify we received one GetUpdates request with two debug info events.
+ EXPECT_EQ(1U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_get_updates());
+ EXPECT_EQ(2, mock_server_->last_request().debug_info().events_size());
+
+ SyncShareNudge();
+
+ // See that we received another GetUpdates request, but that it contains no
+ // debug info events.
+ EXPECT_EQ(2U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_get_updates());
+ EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
+
+ debug_info_getter_->AddDebugEvent();
+
+ SyncShareNudge();
+
+ // See that we received another GetUpdates request and it contains one debug
+ // info event.
+ EXPECT_EQ(3U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_get_updates());
+ EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
+}
+
+// Tests that debug info events are dropped on server error.
+TEST_F(SyncerTest, SendDebugInfoEventsOnGetUpdates_PostFailsDontDrop) {
+ debug_info_getter_->AddDebugEvent();
+ debug_info_getter_->AddDebugEvent();
+
+ mock_server_->FailNextPostBufferToPathCall();
+ SyncShareNudge();
+
+ // Verify we attempted to send one GetUpdates request with two debug info
+ // events.
+ EXPECT_EQ(1U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_get_updates());
+ EXPECT_EQ(2, mock_server_->last_request().debug_info().events_size());
+
+ SyncShareNudge();
+
+ // See that the client resent the two debug info events.
+ EXPECT_EQ(2U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_get_updates());
+ EXPECT_EQ(2, mock_server_->last_request().debug_info().events_size());
+
+ // The previous send was successful so this next one shouldn't generate any
+ // debug info events.
+ SyncShareNudge();
+ EXPECT_EQ(3U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_get_updates());
+ EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
+}
+
+// Tests that sending debug info events on Commit works.
+TEST_F(SyncerTest, SendDebugInfoEventsOnCommit_HappyCase) {
+ // Make sure GetUpdate isn't call as it would "steal" debug info events before
+ // Commit has a chance to send them.
+ ConfigureNoGetUpdatesRequired();
+
+ // Generate a debug info event and trigger a commit.
+ debug_info_getter_->AddDebugEvent();
+ CreateUnsyncedDirectory("X", "id_X");
+ SyncShareNudge();
+
+ // Verify that the last request received is a Commit and that it contains a
+ // debug info event.
+ EXPECT_EQ(1U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_commit());
+ EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
+
+ // Generate another commit, but no debug info event.
+ CreateUnsyncedDirectory("Y", "id_Y");
+ SyncShareNudge();
+
+ // See that it was received and contains no debug info events.
+ EXPECT_EQ(2U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_commit());
+ EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
+}
+
+// Tests that debug info events are not dropped on server error.
+TEST_F(SyncerTest, SendDebugInfoEventsOnCommit_PostFailsDontDrop) {
+ // Make sure GetUpdate isn't call as it would "steal" debug info events before
+ // Commit has a chance to send them.
+ ConfigureNoGetUpdatesRequired();
+
+ mock_server_->FailNextPostBufferToPathCall();
+
+ // Generate a debug info event and trigger a commit.
+ debug_info_getter_->AddDebugEvent();
+ CreateUnsyncedDirectory("X", "id_X");
+ SyncShareNudge();
+
+ // Verify that the last request sent is a Commit and that it contains a debug
+ // info event.
+ EXPECT_EQ(1U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_commit());
+ EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
+
+ // Try again.
+ SyncShareNudge();
+
+ // Verify that we've received another Commit and that it contains a debug info
+ // event (just like the previous one).
+ EXPECT_EQ(2U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_commit());
+ EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
+
+ // Generate another commit and try again.
+ CreateUnsyncedDirectory("Y", "id_Y");
+ SyncShareNudge();
+
+ // See that it was received and contains no debug info events.
+ EXPECT_EQ(3U, mock_server_->requests().size());
+ ASSERT_TRUE(mock_server_->last_request().has_commit());
+ EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
+}
+
TEST_F(SyncerTest, HugeConflict) {
int item_count = 300; // We should be able to do 300 or 3000 w/o issue.
@@ -3249,8 +3303,6 @@ TEST_F(SyncerTest, UpdateWhereParentIsNotAFolder) {
}
}
-const char kRootId[] = "0";
-
TEST_F(SyncerTest, DirectoryUpdateTest) {
Id in_root_id = ids_.NewServerId();
Id in_in_root_id = ids_.NewServerId();
diff --git a/chromium/sync/engine/syncer_util.cc b/chromium/sync/engine/syncer_util.cc
index 3960baaa6fc..2235734aedb 100644
--- a/chromium/sync/engine/syncer_util.cc
+++ b/chromium/sync/engine/syncer_util.cc
@@ -23,8 +23,10 @@
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/entry.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable_changes_version.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
#include "sync/syncable/syncable_proto_util.h"
#include "sync/syncable/syncable_read_transaction.h"
#include "sync/syncable/syncable_util.h"
@@ -305,7 +307,7 @@ namespace {
void UpdateBookmarkSpecifics(const std::string& singleton_tag,
const std::string& url,
const std::string& favicon_bytes,
- MutableEntry* local_entry) {
+ syncable::ModelNeutralMutableEntry* local_entry) {
// In the new-style protocol, the server no longer sends bookmark info for
// the "google_chrome" folder. Mimic that here.
if (singleton_tag == "google_chrome")
@@ -319,8 +321,9 @@ void UpdateBookmarkSpecifics(const std::string& singleton_tag,
local_entry->PutServerSpecifics(pb);
}
-void UpdateBookmarkPositioning(const sync_pb::SyncEntity& update,
- MutableEntry* local_entry) {
+void UpdateBookmarkPositioning(
+ const sync_pb::SyncEntity& update,
+ syncable::ModelNeutralMutableEntry* local_entry) {
// Update our unique bookmark tag. In many cases this will be identical to
// the tag we already have. However, clients that have recently upgraded to
// versions that support unique positions will have incorrect tags. See the
@@ -348,7 +351,7 @@ void UpdateBookmarkPositioning(const sync_pb::SyncEntity& update,
} // namespace
void UpdateServerFieldsFromUpdate(
- MutableEntry* target,
+ syncable::ModelNeutralMutableEntry* target,
const sync_pb::SyncEntity& update,
const std::string& name) {
if (update.deleted()) {
@@ -418,12 +421,14 @@ void UpdateServerFieldsFromUpdate(
}
// Creates a new Entry iff no Entry exists with the given id.
-void CreateNewEntry(syncable::WriteTransaction *trans,
+void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
const syncable::Id& id) {
- syncable::MutableEntry entry(trans, GET_BY_ID, id);
+ syncable::Entry entry(trans, GET_BY_ID, id);
if (!entry.good()) {
- syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
- id);
+ syncable::ModelNeutralMutableEntry new_entry(
+ trans,
+ syncable::CREATE_NEW_UPDATE_ITEM,
+ id);
}
}
@@ -481,6 +486,7 @@ VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) {
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
+ syncable::BaseWriteTransaction* trans,
std::set<syncable::Id>* deleted_folders) {
// There's two options here.
// 1. Scan deleted unsynced entries looking up their pre-delete tree for any
@@ -492,27 +498,22 @@ void MarkDeletedChildrenSynced(
if (deleted_folders->empty())
return;
Directory::Metahandles handles;
- {
- syncable::ReadTransaction trans(FROM_HERE, dir);
- dir->GetUnsyncedMetaHandles(&trans, &handles);
- }
+ dir->GetUnsyncedMetaHandles(trans, &handles);
if (handles.empty())
return;
Directory::Metahandles::iterator it;
for (it = handles.begin() ; it != handles.end() ; ++it) {
- // Single transaction / entry we deal with.
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- MutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it);
if (!entry.GetIsUnsynced() || !entry.GetIsDel())
continue;
syncable::Id id = entry.GetParentId();
- while (id != trans.root_id()) {
+ while (id != trans->root_id()) {
if (deleted_folders->find(id) != deleted_folders->end()) {
// We've synced the deletion of this deleted entries parent.
entry.PutIsUnsynced(false);
break;
}
- Entry parent(&trans, GET_BY_ID, id);
+ Entry parent(trans, GET_BY_ID, id);
if (!parent.good() || !parent.GetIsDel())
break;
id = parent.GetParentId();
@@ -539,12 +540,12 @@ VerifyResult VerifyNewEntry(
// Assumes we have an existing entry; check here for updates that break
// consistency rules.
VerifyResult VerifyUpdateConsistency(
- syncable::WriteTransaction* trans,
+ syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target,
const bool deleted,
const bool is_directory,
- ModelType model_type) {
+ ModelType model_type,
+ syncable::ModelNeutralMutableEntry* target) {
CHECK(target->good());
const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
@@ -612,9 +613,9 @@ VerifyResult VerifyUpdateConsistency(
// Assumes we have an existing entry; verify an update that seems to be
// expressing an 'undelete'
-VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target) {
+ syncable::ModelNeutralMutableEntry* target) {
// TODO(nick): We hit this path for items deleted items that the server
// tells us to re-create; only deleted items with positive base versions
// will hit this path. However, it's not clear how such an undeletion
diff --git a/chromium/sync/engine/syncer_util.h b/chromium/sync/engine/syncer_util.h
index 45b3b46574d..575ab11d37e 100644
--- a/chromium/sync/engine/syncer_util.h
+++ b/chromium/sync/engine/syncer_util.h
@@ -27,6 +27,7 @@ namespace syncer {
namespace syncable {
class BaseTransaction;
+class ModelNeutralWriteTransaction;
} // namespace syncable
class Cryptographer;
@@ -66,12 +67,12 @@ std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update);
// Pass in name to avoid redundant UTF8 conversion.
void UpdateServerFieldsFromUpdate(
- syncable::MutableEntry* local_entry,
+ syncable::ModelNeutralMutableEntry* local_entry,
const sync_pb::SyncEntity& server_entry,
const std::string& name);
// Creates a new Entry iff no Entry exists with the given id.
-void CreateNewEntry(syncable::WriteTransaction *trans,
+void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
const syncable::Id& id);
// This function is called on an entry when we can update the user-facing data
@@ -87,21 +88,23 @@ VerifyResult VerifyNewEntry(const sync_pb::SyncEntity& update,
// Assumes we have an existing entry; check here for updates that break
// consistency rules.
-VerifyResult VerifyUpdateConsistency(syncable::WriteTransaction* trans,
- const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target,
- const bool deleted,
- const bool is_directory,
- ModelType model_type);
+VerifyResult VerifyUpdateConsistency(
+ syncable::ModelNeutralWriteTransaction* trans,
+ const sync_pb::SyncEntity& update,
+ const bool deleted,
+ const bool is_directory,
+ ModelType model_type,
+ syncable::ModelNeutralMutableEntry* target);
// Assumes we have an existing entry; verify an update that seems to be
// expressing an 'undelete'
-VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target);
+ syncable::ModelNeutralMutableEntry* target);
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
+ syncable::BaseWriteTransaction* trans,
std::set<syncable::Id>* deleted_folders);
} // namespace syncer
diff --git a/chromium/sync/engine/update_applicator.cc b/chromium/sync/engine/update_applicator.cc
index 3edf1f84b34..e8731cecf4c 100644
--- a/chromium/sync/engine/update_applicator.cc
+++ b/chromium/sync/engine/update_applicator.cc
@@ -19,12 +19,8 @@ namespace syncer {
using syncable::ID;
-UpdateApplicator::UpdateApplicator(Cryptographer* cryptographer,
- const ModelSafeRoutingInfo& routes,
- ModelSafeGroup group_filter)
+UpdateApplicator::UpdateApplicator(Cryptographer* cryptographer)
: cryptographer_(cryptographer),
- group_filter_(group_filter),
- routing_info_(routes),
updates_applied_(0),
encryption_conflicts_(0),
hierarchy_conflicts_(0) {
@@ -58,11 +54,6 @@ void UpdateApplicator::AttemptApplications(
for (std::vector<int64>::iterator i = to_apply.begin();
i != to_apply.end(); ++i) {
- syncable::Entry read_entry(trans, syncable::GET_BY_HANDLE, *i);
- if (SkipUpdate(read_entry)) {
- continue;
- }
-
syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *i);
UpdateAttemptResponse result = AttemptToUpdateEntry(
trans, &entry, cryptographer_);
@@ -103,23 +94,4 @@ void UpdateApplicator::AttemptApplications(
}
}
-bool UpdateApplicator::SkipUpdate(const syncable::Entry& entry) {
- ModelType type = entry.GetServerModelType();
- ModelSafeGroup g = GetGroupForModelType(type, routing_info_);
- // The set of updates passed to the UpdateApplicator should already
- // be group-filtered.
- if (g != group_filter_) {
- NOTREACHED();
- return true;
- }
- if (g == GROUP_PASSIVE &&
- !routing_info_.count(type) &&
- type != UNSPECIFIED &&
- type != TOP_LEVEL_FOLDER) {
- DVLOG(1) << "Skipping update application, type not permitted.";
- return true;
- }
- return false;
-}
-
} // namespace syncer
diff --git a/chromium/sync/engine/update_applicator.h b/chromium/sync/engine/update_applicator.h
index b04325cc221..ff8fa157163 100644
--- a/chromium/sync/engine/update_applicator.h
+++ b/chromium/sync/engine/update_applicator.h
@@ -35,9 +35,7 @@ class Cryptographer;
class UpdateApplicator {
public:
- UpdateApplicator(Cryptographer* cryptographer,
- const ModelSafeRoutingInfo& routes,
- ModelSafeGroup group_filter);
+ UpdateApplicator(Cryptographer* cryptographer);
~UpdateApplicator();
// Attempt to apply the specified updates.
@@ -67,10 +65,6 @@ class UpdateApplicator {
// Used to decrypt sensitive sync nodes.
Cryptographer* cryptographer_;
- ModelSafeGroup group_filter_;
-
- const ModelSafeRoutingInfo routing_info_;
-
DISALLOW_COPY_AND_ASSIGN(UpdateApplicator);
int updates_applied_;
diff --git a/chromium/sync/internal_api/debug_info_event_listener.cc b/chromium/sync/internal_api/debug_info_event_listener.cc
index 51e6c0f4d3b..f46c4ee2ef9 100644
--- a/chromium/sync/internal_api/debug_info_event_listener.cc
+++ b/chromium/sync/internal_api/debug_info_event_listener.cc
@@ -89,11 +89,6 @@ void DebugInfoEventListener::OnStopSyncingPermanently() {
CreateAndAddEvent(sync_pb::DebugEventInfo::STOP_SYNCING_PERMANENTLY);
}
-void DebugInfoEventListener::OnUpdatedToken(const std::string& token) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::UPDATED_TOKEN);
-}
-
void DebugInfoEventListener::OnEncryptedTypesChanged(
ModelTypeSet encrypted_types,
bool encrypt_everything) {
@@ -135,40 +130,42 @@ void DebugInfoEventListener::OnNudgeFromDatatype(ModelType datatype) {
}
void DebugInfoEventListener::OnIncomingNotification(
- const ObjectIdInvalidationMap& invalidations) {
+ const ObjectIdInvalidationMap& invalidation_map) {
DCHECK(thread_checker_.CalledOnValidThread());
sync_pb::DebugEventInfo event_info;
- ModelTypeSet types = ObjectIdSetToModelTypeSet(ObjectIdInvalidationMapToSet(
- invalidations));
-
- for (ObjectIdInvalidationMap::const_iterator it = invalidations.begin();
- it != invalidations.end(); ++it) {
- ModelType type = UNSPECIFIED;
- if (ObjectIdToRealModelType(it->first, &type)) {
- event_info.add_datatypes_notified_from_server(
- GetSpecificsFieldNumberFromModelType(type));
- }
+ ModelTypeSet types =
+ ObjectIdSetToModelTypeSet(invalidation_map.GetObjectIds());
+
+ for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
+ event_info.add_datatypes_notified_from_server(
+ GetSpecificsFieldNumberFromModelType(it.Get()));
}
AddEventToQueue(event_info);
}
-void DebugInfoEventListener::GetAndClearDebugInfo(
- sync_pb::DebugInfo* debug_info) {
+void DebugInfoEventListener::GetDebugInfo(sync_pb::DebugInfo* debug_info) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_LE(events_.size(), kMaxEntries);
- while (!events_.empty()) {
+
+ for (DebugEventInfoQueue::const_iterator iter = events_.begin();
+ iter != events_.end();
+ ++iter) {
sync_pb::DebugEventInfo* event_info = debug_info->add_events();
- const sync_pb::DebugEventInfo& debug_event_info = events_.front();
- event_info->CopyFrom(debug_event_info);
- events_.pop();
+ event_info->CopyFrom(*iter);
}
debug_info->set_events_dropped(events_dropped_);
debug_info->set_cryptographer_ready(cryptographer_ready_);
debug_info->set_cryptographer_has_pending_keys(
cryptographer_has_pending_keys_);
+}
+
+void DebugInfoEventListener::ClearDebugInfo() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_LE(events_.size(), kMaxEntries);
+ events_.clear();
events_dropped_ = false;
}
@@ -264,10 +261,10 @@ void DebugInfoEventListener::AddEventToQueue(
DVLOG(1) << "DebugInfoEventListener::AddEventToQueue Dropping an old event "
<< "because of full queue";
- events_.pop();
+ events_.pop_front();
events_dropped_ = true;
}
- events_.push(event_info);
+ events_.push_back(event_info);
}
} // namespace syncer
diff --git a/chromium/sync/internal_api/debug_info_event_listener.h b/chromium/sync/internal_api/debug_info_event_listener.h
index c3aa9d0929a..15cc0a66972 100644
--- a/chromium/sync/internal_api/debug_info_event_listener.h
+++ b/chromium/sync/internal_api/debug_info_event_listener.h
@@ -5,11 +5,12 @@
#ifndef SYNC_INTERNAL_API_DEBUG_INFO_EVENT_LISTENER_H_
#define SYNC_INTERNAL_API_DEBUG_INFO_EVENT_LISTENER_H_
-#include <queue>
+#include <deque>
#include <string>
#include "base/compiler_specific.h"
#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/data_type_debug_info_listener.h"
#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
#include "sync/internal_api/public/sync_encryption_handler.h"
@@ -22,8 +23,9 @@
namespace syncer {
// In order to track datatype association results, we need at least as many
-// entries as datatypes.
-const unsigned int kMaxEntries = 25;
+// entries as datatypes. Reserve additional space for other kinds of events that
+// are likely to happen during first sync or startup.
+const unsigned int kMaxEntries = MODEL_TYPE_COUNT + 10;
// Listens to events and records them in a queue. And passes the events to
// syncer when requested.
@@ -47,7 +49,6 @@ class SYNC_EXPORT_PRIVATE DebugInfoEventListener
virtual void OnConnectionStatusChange(
ConnectionStatus connection_status) OVERRIDE;
virtual void OnStopSyncingPermanently() OVERRIDE;
- virtual void OnUpdatedToken(const std::string& token) OVERRIDE;
virtual void OnActionableError(
const SyncProtocolError& sync_error) OVERRIDE;
@@ -74,7 +75,10 @@ class SYNC_EXPORT_PRIVATE DebugInfoEventListener
void OnIncomingNotification(const ObjectIdInvalidationMap& invalidations);
// DebugInfoGetter implementation.
- virtual void GetAndClearDebugInfo(sync_pb::DebugInfo* debug_info) OVERRIDE;
+ virtual void GetDebugInfo(sync_pb::DebugInfo* debug_info) OVERRIDE;
+
+ // DebugInfoGetter implementation.
+ virtual void ClearDebugInfo() OVERRIDE;
// DataTypeDebugInfoListener implementation.
virtual void OnDataTypeConfigureComplete(
@@ -87,11 +91,14 @@ class SYNC_EXPORT_PRIVATE DebugInfoEventListener
private:
FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyEventsAdded);
FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyQueueSize);
- FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyGetAndClearEvents);
+ FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyGetEvents);
+ FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyClearEvents);
void AddEventToQueue(const sync_pb::DebugEventInfo& event_info);
void CreateAndAddEvent(sync_pb::DebugEventInfo::SingletonEventType type);
- std::queue<sync_pb::DebugEventInfo> events_;
+
+ typedef std::deque<sync_pb::DebugEventInfo> DebugEventInfoQueue;
+ DebugEventInfoQueue events_;
// True indicates we had to drop one or more events to keep our limit of
// |kMaxEntries|.
@@ -103,10 +110,10 @@ class SYNC_EXPORT_PRIVATE DebugInfoEventListener
// Cryptographer is initialized and does not have pending keys.
bool cryptographer_ready_;
- base::WeakPtrFactory<DebugInfoEventListener> weak_ptr_factory_;
-
base::ThreadChecker thread_checker_;
+ base::WeakPtrFactory<DebugInfoEventListener> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(DebugInfoEventListener);
};
diff --git a/chromium/sync/internal_api/debug_info_event_listener_unittest.cc b/chromium/sync/internal_api/debug_info_event_listener_unittest.cc
index 131728bff49..31f16f87b63 100644
--- a/chromium/sync/internal_api/debug_info_event_listener_unittest.cc
+++ b/chromium/sync/internal_api/debug_info_event_listener_unittest.cc
@@ -29,23 +29,33 @@ TEST_F(DebugInfoEventListenerTest, VerifyQueueSize) {
sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
}
sync_pb::DebugInfo debug_info;
- debug_info_event_listener.GetAndClearDebugInfo(&debug_info);
+ debug_info_event_listener.GetDebugInfo(&debug_info);
+ debug_info_event_listener.ClearDebugInfo();
ASSERT_TRUE(debug_info.events_dropped());
ASSERT_EQ(static_cast<int>(kMaxEntries), debug_info.events_size());
}
-TEST_F(DebugInfoEventListenerTest, VerifyGetAndClearEvents) {
+TEST_F(DebugInfoEventListenerTest, VerifyGetEvents) {
DebugInfoEventListener debug_info_event_listener;
debug_info_event_listener.CreateAndAddEvent(
sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
sync_pb::DebugInfo debug_info;
- debug_info_event_listener.GetAndClearDebugInfo(&debug_info);
- ASSERT_EQ(debug_info_event_listener.events_.size(), 0U);
+ debug_info_event_listener.GetDebugInfo(&debug_info);
+ ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
ASSERT_EQ(debug_info.events_size(), 1);
ASSERT_TRUE(debug_info.events(0).has_singleton_event());
ASSERT_EQ(debug_info.events(0).singleton_event(),
sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
}
+TEST_F(DebugInfoEventListenerTest, VerifyClearEvents) {
+ DebugInfoEventListener debug_info_event_listener;
+ debug_info_event_listener.CreateAndAddEvent(
+ sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
+ ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
+ debug_info_event_listener.ClearDebugInfo();
+ ASSERT_EQ(debug_info_event_listener.events_.size(), 0U);
+}
+
} // namespace syncer
diff --git a/chromium/sync/internal_api/http_bridge_network_resources.cc b/chromium/sync/internal_api/http_bridge_network_resources.cc
new file mode 100644
index 00000000000..72716bd2b34
--- /dev/null
+++ b/chromium/sync/internal_api/http_bridge_network_resources.cc
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/internal_api/public/http_bridge_network_resources.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "net/url_request/url_request_context_getter.h"
+#include "sync/internal_api/public/base/cancelation_signal.h"
+#include "sync/internal_api/public/http_bridge.h"
+#include "sync/internal_api/public/http_post_provider_factory.h"
+#include "sync/internal_api/public/network_time_update_callback.h"
+
+namespace syncer {
+
+HttpBridgeNetworkResources::~HttpBridgeNetworkResources() {}
+
+scoped_ptr<HttpPostProviderFactory>
+ HttpBridgeNetworkResources::GetHttpPostProviderFactory(
+ net::URLRequestContextGetter* baseline_context_getter,
+ const NetworkTimeUpdateCallback& network_time_update_callback,
+ CancelationSignal* cancelation_signal) {
+ return make_scoped_ptr<HttpPostProviderFactory>(
+ new HttpBridgeFactory(baseline_context_getter,
+ network_time_update_callback,
+ cancelation_signal));
+}
+
+} // namespace syncer
diff --git a/chromium/sync/internal_api/js_mutation_event_observer.h b/chromium/sync/internal_api/js_mutation_event_observer.h
index 9d233515031..6c92646e391 100644
--- a/chromium/sync/internal_api/js_mutation_event_observer.h
+++ b/chromium/sync/internal_api/js_mutation_event_observer.h
@@ -55,13 +55,14 @@ class SYNC_EXPORT_PRIVATE JsMutationEventObserver
ModelTypeSet models_with_changes) OVERRIDE;
private:
- base::WeakPtrFactory<JsMutationEventObserver> weak_ptr_factory_;
WeakHandle<JsEventHandler> event_handler_;
void HandleJsEvent(
const tracked_objects::Location& from_here,
const std::string& name, const JsEventDetails& details);
+ base::WeakPtrFactory<JsMutationEventObserver> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(JsMutationEventObserver);
};
diff --git a/chromium/sync/internal_api/js_sync_manager_observer.cc b/chromium/sync/internal_api/js_sync_manager_observer.cc
index 01ddda834a3..9f8848d8499 100644
--- a/chromium/sync/internal_api/js_sync_manager_observer.cc
+++ b/chromium/sync/internal_api/js_sync_manager_observer.cc
@@ -49,15 +49,6 @@ void JsSyncManagerObserver::OnConnectionStatusChange(ConnectionStatus status) {
"onConnectionStatusChange", JsEventDetails(&details));
}
-void JsSyncManagerObserver::OnUpdatedToken(const std::string& token) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("token", "<redacted>");
- HandleJsEvent(FROM_HERE, "onUpdatedToken", JsEventDetails(&details));
-}
-
void JsSyncManagerObserver::OnActionableError(
const SyncProtocolError& sync_error) {
if (!event_handler_.IsInitialized()) {
diff --git a/chromium/sync/internal_api/js_sync_manager_observer.h b/chromium/sync/internal_api/js_sync_manager_observer.h
index bda6d8c3011..17a40e7bd70 100644
--- a/chromium/sync/internal_api/js_sync_manager_observer.h
+++ b/chromium/sync/internal_api/js_sync_manager_observer.h
@@ -35,7 +35,6 @@ class SYNC_EXPORT_PRIVATE JsSyncManagerObserver : public SyncManager::Observer {
virtual void OnSyncCycleCompleted(
const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
virtual void OnConnectionStatusChange(ConnectionStatus status) OVERRIDE;
- virtual void OnUpdatedToken(const std::string& token) OVERRIDE;
virtual void OnInitializationComplete(
const WeakHandle<JsBackend>& js_backend,
const WeakHandle<DataTypeDebugInfoListener>& debug_info_listener,
diff --git a/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc b/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc
index 65cb77e4695..e4b8c64966f 100644
--- a/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc
+++ b/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc
@@ -129,19 +129,5 @@ TEST_F(JsSyncManagerObserverTest, OnConnectionStatusChange) {
PumpLoop();
}
-TEST_F(JsSyncManagerObserverTest, SensitiveNotifiations) {
- base::DictionaryValue redacted_token_details;
- redacted_token_details.SetString("token", "<redacted>");
- base::DictionaryValue redacted_bootstrap_token_details;
- redacted_bootstrap_token_details.SetString("bootstrapToken", "<redacted>");
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onUpdatedToken",
- HasDetailsAsDictionary(redacted_token_details)));
-
- js_sync_manager_observer_.OnUpdatedToken("sensitive_token");
- PumpLoop();
-}
-
} // namespace
} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/DEPS b/chromium/sync/internal_api/public/base/DEPS
index 6eae52a42c9..9d46a8a6a63 100644
--- a/chromium/sync/internal_api/public/base/DEPS
+++ b/chromium/sync/internal_api/public/base/DEPS
@@ -1,7 +1,12 @@
include_rules = [
+ # Invalidations headers depend on this. We should move them to sync/notifier
+ # then remove this rule.
+ "+google/cacheinvalidation",
+
"-sync",
"+sync/base",
"+sync/internal_api/public/base",
+ "+sync/internal_api/public/util",
+ "+sync/notifier",
"+sync/protocol",
- "+sync/notifier"
]
diff --git a/chromium/sync/internal_api/public/base/ack_handle.cc b/chromium/sync/internal_api/public/base/ack_handle.cc
new file mode 100644
index 00000000000..f5ddf121e3b
--- /dev/null
+++ b/chromium/sync/internal_api/public/base/ack_handle.cc
@@ -0,0 +1,67 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/internal_api/public/base/ack_handle.h"
+
+#include <cstddef>
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/values.h"
+
+namespace syncer {
+
+namespace {
+// Hopefully enough bytes for uniqueness.
+const size_t kBytesInHandle = 16;
+} // namespace
+
+AckHandle AckHandle::CreateUnique() {
+ // This isn't a valid UUID, so we don't attempt to format it like one.
+ uint8 random_bytes[kBytesInHandle];
+ base::RandBytes(random_bytes, sizeof(random_bytes));
+ return AckHandle(base::HexEncode(random_bytes, sizeof(random_bytes)),
+ base::Time::Now());
+}
+
+AckHandle AckHandle::InvalidAckHandle() {
+ return AckHandle(std::string(), base::Time());
+}
+
+bool AckHandle::Equals(const AckHandle& other) const {
+ return state_ == other.state_ && timestamp_ == other.timestamp_;
+}
+
+scoped_ptr<base::DictionaryValue> AckHandle::ToValue() const {
+ scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
+ value->SetString("state", state_);
+ value->SetString("timestamp",
+ base::Int64ToString(timestamp_.ToInternalValue()));
+ return value.Pass();
+}
+
+bool AckHandle::ResetFromValue(const base::DictionaryValue& value) {
+ if (!value.GetString("state", &state_))
+ return false;
+ std::string timestamp_as_string;
+ if (!value.GetString("timestamp", &timestamp_as_string))
+ return false;
+ int64 timestamp_value;
+ if (!base::StringToInt64(timestamp_as_string, &timestamp_value))
+ return false;
+ timestamp_ = base::Time::FromInternalValue(timestamp_value);
+ return true;
+}
+
+bool AckHandle::IsValid() const {
+ return !state_.empty();
+}
+
+AckHandle::AckHandle(const std::string& state, base::Time timestamp)
+ : state_(state), timestamp_(timestamp) {
+}
+
+AckHandle::~AckHandle() {
+}
+
+} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/ack_handle.h b/chromium/sync/internal_api/public/base/ack_handle.h
new file mode 100644
index 00000000000..99d03af9eb5
--- /dev/null
+++ b/chromium/sync/internal_api/public/base/ack_handle.h
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
+#define SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "sync/base/sync_export.h"
+
+namespace base {
+class DictionaryValue;
+}
+
+namespace syncer {
+
+// Opaque class that represents a local ack handle. We don't reuse the
+// invalidation ack handles to avoid unnecessary dependencies.
+class SYNC_EXPORT AckHandle {
+ public:
+ static AckHandle CreateUnique();
+ static AckHandle InvalidAckHandle();
+
+ bool Equals(const AckHandle& other) const;
+
+ scoped_ptr<base::DictionaryValue> ToValue() const;
+ bool ResetFromValue(const base::DictionaryValue& value);
+
+ bool IsValid() const;
+
+ ~AckHandle();
+
+ private:
+ // Explicitly copyable and assignable for STL containers.
+ AckHandle(const std::string& state, base::Time timestamp);
+
+ std::string state_;
+ base::Time timestamp_;
+};
+
+} // namespace syncer
+
+#endif // SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
diff --git a/chromium/sync/internal_api/public/base/invalidation.cc b/chromium/sync/internal_api/public/base/invalidation.cc
index b503d07c4af..ff7a5a78fd4 100644
--- a/chromium/sync/internal_api/public/base/invalidation.cc
+++ b/chromium/sync/internal_api/public/base/invalidation.cc
@@ -5,100 +5,178 @@
#include "sync/internal_api/public/base/invalidation.h"
#include <cstddef>
+
+#include "base/json/json_string_value_serializer.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/values.h"
+#include "sync/notifier/ack_handler.h"
+#include "sync/notifier/dropped_invalidation_tracker.h"
+#include "sync/notifier/invalidation_util.h"
namespace syncer {
namespace {
-// Hopefully enough bytes for uniqueness.
-const size_t kBytesInHandle = 16;
-} // namespace
+const char kObjectIdKey[] = "objectId";
+const char kIsUnknownVersionKey[] = "isUnknownVersion";
+const char kVersionKey[] = "version";
+const char kPayloadKey[] = "payload";
+const int64 kInvalidVersion = -1;
+}
-AckHandle AckHandle::CreateUnique() {
- // This isn't a valid UUID, so we don't attempt to format it like one.
- uint8 random_bytes[kBytesInHandle];
- base::RandBytes(random_bytes, sizeof(random_bytes));
- return AckHandle(base::HexEncode(random_bytes, sizeof(random_bytes)),
- base::Time::Now());
+Invalidation Invalidation::Init(
+ const invalidation::ObjectId& id,
+ int64 version,
+ const std::string& payload) {
+ return Invalidation(id, false, version, payload, AckHandle::CreateUnique());
}
-AckHandle AckHandle::InvalidAckHandle() {
- return AckHandle(std::string(), base::Time());
+Invalidation Invalidation::InitUnknownVersion(
+ const invalidation::ObjectId& id) {
+ return Invalidation(id, true, kInvalidVersion,
+ std::string(), AckHandle::CreateUnique());
}
-bool AckHandle::Equals(const AckHandle& other) const {
- return state_ == other.state_ && timestamp_ == other.timestamp_;
+Invalidation Invalidation::InitFromDroppedInvalidation(
+ const Invalidation& dropped) {
+ return Invalidation(dropped.id_, true, kInvalidVersion,
+ std::string(), dropped.ack_handle_);
}
-scoped_ptr<base::DictionaryValue> AckHandle::ToValue() const {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetString("state", state_);
- value->SetString("timestamp",
- base::Int64ToString(timestamp_.ToInternalValue()));
- return value.Pass();
+scoped_ptr<Invalidation> Invalidation::InitFromValue(
+ const base::DictionaryValue& value) {
+ invalidation::ObjectId id;
+
+ const base::DictionaryValue* object_id_dict;
+ if (!value.GetDictionary(kObjectIdKey, &object_id_dict)
+ || !ObjectIdFromValue(*object_id_dict, &id)) {
+ DLOG(WARNING) << "Failed to parse id";
+ return scoped_ptr<Invalidation>();
+ }
+ bool is_unknown_version;
+ if (!value.GetBoolean(kIsUnknownVersionKey, &is_unknown_version)) {
+ DLOG(WARNING) << "Failed to parse is_unknown_version flag";
+ return scoped_ptr<Invalidation>();
+ }
+ if (is_unknown_version) {
+ return scoped_ptr<Invalidation>(new Invalidation(
+ id,
+ true,
+ kInvalidVersion,
+ std::string(),
+ AckHandle::CreateUnique()));
+ } else {
+ int64 version;
+ std::string version_as_string;
+ if (!value.GetString(kVersionKey, &version_as_string)
+ || !base::StringToInt64(version_as_string, &version)) {
+ DLOG(WARNING) << "Failed to parse version";
+ return scoped_ptr<Invalidation>();
+ }
+ std::string payload;
+ if (!value.GetString(kPayloadKey, &payload)) {
+ DLOG(WARNING) << "Failed to parse payload";
+ return scoped_ptr<Invalidation>();
+ }
+ return scoped_ptr<Invalidation>(new Invalidation(
+ id,
+ false,
+ version,
+ payload,
+ AckHandle::CreateUnique()));
+ }
+}
+
+Invalidation::~Invalidation() {}
+
+invalidation::ObjectId Invalidation::object_id() const {
+ return id_;
+}
+
+bool Invalidation::is_unknown_version() const {
+ return is_unknown_version_;
}
-bool AckHandle::ResetFromValue(const base::DictionaryValue& value) {
- if (!value.GetString("state", &state_))
- return false;
- std::string timestamp_as_string;
- if (!value.GetString("timestamp", &timestamp_as_string))
- return false;
- int64 timestamp_value;
- if (!base::StringToInt64(timestamp_as_string, &timestamp_value))
- return false;
- timestamp_ = base::Time::FromInternalValue(timestamp_value);
- return true;
+int64 Invalidation::version() const {
+ DCHECK(!is_unknown_version_);
+ return version_;
}
-bool AckHandle::IsValid() const {
- return !state_.empty();
+const std::string& Invalidation::payload() const {
+ DCHECK(!is_unknown_version_);
+ return payload_;
}
-AckHandle::AckHandle(const std::string& state, base::Time timestamp)
- : state_(state), timestamp_(timestamp) {
+const AckHandle& Invalidation::ack_handle() const {
+ return ack_handle_;
}
-AckHandle::~AckHandle() {
+void Invalidation::set_ack_handler(syncer::WeakHandle<AckHandler> handler) {
+ ack_handler_ = handler;
}
-const int64 Invalidation::kUnknownVersion = -1;
+bool Invalidation::SupportsAcknowledgement() const {
+ return ack_handler_.IsInitialized();
+}
-Invalidation::Invalidation()
- : version(kUnknownVersion), ack_handle(AckHandle::InvalidAckHandle()) {
+void Invalidation::Acknowledge() const {
+ if (SupportsAcknowledgement()) {
+ ack_handler_.Call(FROM_HERE,
+ &AckHandler::Acknowledge,
+ id_,
+ ack_handle_);
+ }
}
-Invalidation::~Invalidation() {
+void Invalidation::Drop(DroppedInvalidationTracker* tracker) const {
+ DCHECK(tracker->object_id() == object_id());
+ tracker->RecordDropEvent(ack_handler_, ack_handle_);
+ if (SupportsAcknowledgement()) {
+ ack_handler_.Call(FROM_HERE,
+ &AckHandler::Drop,
+ id_,
+ ack_handle_);
+ }
}
bool Invalidation::Equals(const Invalidation& other) const {
- return (version == other.version) && (payload == other.payload) &&
- ack_handle.Equals(other.ack_handle);
+ return id_ == other.id_
+ && is_unknown_version_ == other.is_unknown_version_
+ && version_ == other.version_
+ && payload_ == other.payload_;
}
scoped_ptr<base::DictionaryValue> Invalidation::ToValue() const {
scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetString("version", base::Int64ToString(version));
- value->SetString("payload", payload);
- value->Set("ackHandle", ack_handle.ToValue().release());
+ value->Set(kObjectIdKey, ObjectIdToValue(id_).release());
+ if (is_unknown_version_) {
+ value->SetBoolean(kIsUnknownVersionKey, true);
+ } else {
+ value->SetBoolean(kIsUnknownVersionKey, false);
+ value->SetString(kVersionKey, base::Int64ToString(version_));
+ value->SetString(kPayloadKey, payload_);
+ }
return value.Pass();
}
-bool Invalidation::ResetFromValue(const base::DictionaryValue& value) {
- const base::DictionaryValue* ack_handle_value = NULL;
- std::string version_as_string;
- if (value.GetString("version", &version_as_string)) {
- if (!base::StringToInt64(version_as_string, &version))
- return false;
- } else {
- version = kUnknownVersion;
- }
- return
- value.GetString("payload", &payload) &&
- value.GetDictionary("ackHandle", &ack_handle_value) &&
- ack_handle.ResetFromValue(*ack_handle_value);
+std::string Invalidation::ToString() const {
+ std::string output;
+ JSONStringValueSerializer serializer(&output);
+ serializer.set_pretty_print(true);
+ serializer.Serialize(*ToValue().get());
+ return output;
}
+Invalidation::Invalidation(
+ const invalidation::ObjectId& id,
+ bool is_unknown_version,
+ int64 version,
+ const std::string& payload,
+ AckHandle ack_handle)
+ : id_(id),
+ is_unknown_version_(is_unknown_version),
+ version_(version),
+ payload_(payload),
+ ack_handle_(ack_handle) {}
+
} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/invalidation.h b/chromium/sync/internal_api/public/base/invalidation.h
index 851dbed7473..cf26112e224 100644
--- a/chromium/sync/internal_api/public/base/invalidation.h
+++ b/chromium/sync/internal_api/public/base/invalidation.h
@@ -9,59 +9,115 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
+#include "base/values.h"
+#include "google/cacheinvalidation/include/types.h"
#include "sync/base/sync_export.h"
-
-namespace base {
-class DictionaryValue;
-} // namespace
+#include "sync/internal_api/public/base/ack_handle.h"
+#include "sync/internal_api/public/util/weak_handle.h"
namespace syncer {
-// Opaque class that represents a local ack handle. We don't reuse the
-// invalidation ack handles to avoid unnecessary dependencies.
-class SYNC_EXPORT AckHandle {
+class DroppedInvalidationTracker;
+class AckHandler;
+
+// Represents a local invalidation, and is roughly analogous to
+// invalidation::Invalidation. Unlike invalidation::Invalidation, this class
+// supports "local" ack-tracking and simple serialization to pref values.
+class SYNC_EXPORT Invalidation {
public:
- static AckHandle CreateUnique();
- static AckHandle InvalidAckHandle();
+ // Factory functions.
+ static Invalidation Init(
+ const invalidation::ObjectId& id,
+ int64 version,
+ const std::string& payload);
+ static Invalidation InitUnknownVersion(const invalidation::ObjectId& id);
+ static Invalidation InitFromDroppedInvalidation(const Invalidation& dropped);
+ static scoped_ptr<Invalidation> InitFromValue(
+ const base::DictionaryValue& value);
- bool Equals(const AckHandle& other) const;
+ ~Invalidation();
- scoped_ptr<base::DictionaryValue> ToValue() const;
- bool ResetFromValue(const base::DictionaryValue& value);
+ // Compares two invalidations. The comparison ignores ack-tracking state.
+ bool Equals(const Invalidation& other) const;
- bool IsValid() const;
+ invalidation::ObjectId object_id() const;
+ bool is_unknown_version() const;
+
+ // Safe to call only if is_unknown_version() returns false.
+ int64 version() const;
+
+ // Safe to call only if is_unknown_version() returns false.
+ const std::string& payload() const;
+
+ const AckHandle& ack_handle() const;
+
+ // Sets the AckHandler to be used to track this Invalidation.
+ //
+ // This should be set by the class that generates the invalidation. Clients
+ // of the Invalidations API should not need to call this.
+ //
+ // Note that some sources of invalidations do not support ack tracking, and do
+ // not set the ack_handler. This will be hidden from users of this class.
+ void set_ack_handler(syncer::WeakHandle<AckHandler> ack_handler);
+
+ // Returns whether or not this instance supports ack tracking. This will
+ // depend on whether or not the source of invaliadations supports
+ // invalidations.
+ //
+ // Clients can safely ignore this flag. They can assume that all
+ // invalidations support ack tracking. If they're wrong, then invalidations
+ // will be less reliable, but their behavior will be no less correct.
+ bool SupportsAcknowledgement() const;
+
+ // Acknowledges the receipt of this invalidation.
+ //
+ // Clients should call this on a received invalidation when they have fully
+ // processed the invalidation and persisted the results to disk. Once this
+ // function is called, the invalidations system is under no obligation to
+ // re-deliver this invalidation in the event of a crash or restart.
+ void Acknowledge() const;
+
+ // Informs the ack tracker that this invalidation will not be serviced.
+ //
+ // If a client's buffer reaches its limit and it is forced to start dropping
+ // invalidations, it should call this function before dropping its
+ // invalidations in order to allow the ack tracker to drop the invalidation,
+ // too.
+ //
+ // The drop record will be tracked by the specified
+ // DroppedInvalidationTracker. The caller should hang on to this tracker. It
+ // will need to use it when it recovers from this drop event, or if it needs
+ // to record another drop event for the same ObjectID. Refer to the
+ // documentation of DroppedInvalidationTracker for more details.
+ void Drop(DroppedInvalidationTracker* tracker) const;
- ~AckHandle();
+ scoped_ptr<base::DictionaryValue> ToValue() const;
+ std::string ToString() const;
private:
- // Explicitly copyable and assignable for STL containers.
- AckHandle(const std::string& state, base::Time timestamp);
+ Invalidation(const invalidation::ObjectId& id,
+ bool is_unknown_version,
+ int64 version,
+ const std::string& payload,
+ AckHandle ack_handle);
- std::string state_;
- base::Time timestamp_;
-};
+ // The ObjectId to which this invalidation belongs.
+ invalidation::ObjectId id_;
-// Represents a local invalidation, and is roughly analogous to
-// invalidation::Invalidation. It contains a version (which may be
-// kUnknownVersion), a payload (which may be empty) and an
-// associated ack handle that an InvalidationHandler implementation can use to
-// acknowledge receipt of the invalidation. It does not embed the object ID,
-// since it is typically associated with it through ObjectIdInvalidationMap.
-struct SYNC_EXPORT Invalidation {
- static const int64 kUnknownVersion;
-
- Invalidation();
- ~Invalidation();
+ // This flag is set to true if this is an unknown version invalidation.
+ bool is_unknown_version_;
- bool Equals(const Invalidation& other) const;
+ // The version number of this invalidation. Should not be accessed if this is
+ // an unkown version invalidation.
+ int64 version_;
- scoped_ptr<base::DictionaryValue> ToValue() const;
- bool ResetFromValue(const base::DictionaryValue& value);
+ // The payaload associated with this invalidation. Should not be accessed if
+ // this is an unknown version invalidation.
+ std::string payload_;
- int64 version;
- std::string payload;
- AckHandle ack_handle;
+ // A locally generated unique ID used to manage local acknowledgements.
+ AckHandle ack_handle_;
+ syncer::WeakHandle<AckHandler> ack_handler_;
};
} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/invalidation_test_util.cc b/chromium/sync/internal_api/public/base/invalidation_test_util.cc
index 3f3910be72f..3c610dadedc 100644
--- a/chromium/sync/internal_api/public/base/invalidation_test_util.cc
+++ b/chromium/sync/internal_api/public/base/invalidation_test_util.cc
@@ -75,7 +75,18 @@ InvalidationEqMatcher::InvalidationEqMatcher(
bool InvalidationEqMatcher::MatchAndExplain(
const Invalidation& actual, MatchResultListener* listener) const {
- return expected_.payload == actual.payload;
+ if (!(expected_.object_id() == actual.object_id())) {
+ return false;
+ }
+ if (expected_.is_unknown_version() && actual.is_unknown_version()) {
+ return true;
+ } else if (expected_.is_unknown_version() != actual.is_unknown_version()) {
+ return false;
+ } else {
+ // Neither is unknown version.
+ return expected_.payload() == actual.payload()
+ && expected_.version() == actual.version();
+ }
}
void InvalidationEqMatcher::DescribeTo(::std::ostream* os) const {
@@ -99,12 +110,8 @@ Matcher<const AckHandle&> Eq(const AckHandle& expected) {
return MakeMatcher(new AckHandleEqMatcher(expected));
}
-void PrintTo(const Invalidation& state, ::std::ostream* os) {
- std::string printable_payload;
- base::JsonDoubleQuote(state.payload,
- true /* put_in_quotes */,
- &printable_payload);
- *os << "{ payload: " << printable_payload << " }";
+void PrintTo(const Invalidation& inv, ::std::ostream* os) {
+ *os << "{ payload: " << inv.ToString() << " }";
}
Matcher<const Invalidation&> Eq(const Invalidation& expected) {
diff --git a/chromium/sync/internal_api/public/base/invalidation_test_util.h b/chromium/sync/internal_api/public/base/invalidation_test_util.h
index 9376a287b55..e7c08caae0d 100644
--- a/chromium/sync/internal_api/public/base/invalidation_test_util.h
+++ b/chromium/sync/internal_api/public/base/invalidation_test_util.h
@@ -12,7 +12,7 @@
namespace syncer {
class AckHandle;
-struct Invalidation;
+class Invalidation;
void PrintTo(const AckHandle& ack_handle, ::std::ostream* os);
::testing::Matcher<const AckHandle&> Eq(const AckHandle& expected);
diff --git a/chromium/sync/internal_api/public/base/model_type.h b/chromium/sync/internal_api/public/base/model_type.h
index 247d351375d..c618c45043d 100644
--- a/chromium/sync/internal_api/public/base/model_type.h
+++ b/chromium/sync/internal_api/public/base/model_type.h
@@ -99,6 +99,10 @@ enum ModelType {
// by this user and can have restrictions applied. MANAGED_USERS and
// MANAGED_USER_SETTINGS can not be encrypted.
MANAGED_USERS,
+ // Distilled articles.
+ ARTICLES,
+ // App List items
+ APP_LIST,
// ---- Proxy types ----
// Proxy types are excluded from the sync protocol, but are still considered
diff --git a/chromium/sync/internal_api/public/base/model_type_test_util.cc b/chromium/sync/internal_api/public/base/model_type_test_util.cc
index 242b398bc0b..d9621bbbab9 100644
--- a/chromium/sync/internal_api/public/base/model_type_test_util.cc
+++ b/chromium/sync/internal_api/public/base/model_type_test_util.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "sync/internal_api/public/base/model_type_test_util.h"
+#include "sync/internal_api/public/base/ack_handle.h"
namespace syncer {
@@ -12,16 +13,9 @@ ObjectIdInvalidationMap BuildInvalidationMap(
const std::string& payload) {
ObjectIdInvalidationMap map;
invalidation::ObjectId id;
- Invalidation invalidation;
-
bool result = RealModelTypeToObjectId(type, &id);
- DCHECK(result)
- << "Conversion of model type to object id failed: "
- << ModelTypeToString(type);
- invalidation.version = version;
- invalidation.payload = payload;
-
- map.insert(std::make_pair(id, invalidation));
+ DCHECK(result);
+ map.Insert(Invalidation::Init(id, version, payload));
return map;
}
diff --git a/chromium/sync/notifier/object_id_invalidation_map_test_util.cc b/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc
index f2f82853e4e..777fc69f74b 100644
--- a/chromium/sync/notifier/object_id_invalidation_map_test_util.cc
+++ b/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
+#include "sync/internal_api/public/base/object_id_invalidation_map_test_util.h"
#include <algorithm>
@@ -24,7 +24,7 @@ class ObjectIdInvalidationMapEqMatcher
explicit ObjectIdInvalidationMapEqMatcher(
const ObjectIdInvalidationMap& expected);
- virtual bool MatchAndExplain(const ObjectIdInvalidationMap& actual,
+ virtual bool MatchAndExplain(const ObjectIdInvalidationMap& lhs,
MatchResultListener* listener) const;
virtual void DescribeTo(::std::ostream* os) const;
virtual void DescribeNegationTo(::std::ostream* os) const;
@@ -39,37 +39,57 @@ ObjectIdInvalidationMapEqMatcher::ObjectIdInvalidationMapEqMatcher(
const ObjectIdInvalidationMap& expected) : expected_(expected) {
}
+namespace {
+
+struct InvalidationEqPredicate {
+ InvalidationEqPredicate(const Invalidation& inv1)
+ : inv1_(inv1) { }
+
+ bool operator()(const Invalidation& inv2) {
+ return inv1_.Equals(inv2);
+ }
+
+ const Invalidation& inv1_;
+};
+
+}
+
bool ObjectIdInvalidationMapEqMatcher::MatchAndExplain(
const ObjectIdInvalidationMap& actual,
MatchResultListener* listener) const {
- ObjectIdInvalidationMap expected_only;
- ObjectIdInvalidationMap actual_only;
- typedef std::pair<invalidation::ObjectId,
- std::pair<Invalidation, Invalidation> >
- ValueDifference;
- std::vector<ValueDifference> value_differences;
-
- std::set_difference(expected_.begin(), expected_.end(),
- actual.begin(), actual.end(),
- std::inserter(expected_only, expected_only.begin()),
- expected_.value_comp());
- std::set_difference(actual.begin(), actual.end(),
- expected_.begin(), expected_.end(),
- std::inserter(actual_only, actual_only.begin()),
- actual.value_comp());
-
- for (ObjectIdInvalidationMap::const_iterator it = expected_.begin();
- it != expected_.end(); ++it) {
- ObjectIdInvalidationMap::const_iterator find_it =
- actual.find(it->first);
- if (find_it != actual.end() &&
- !Matches(Eq(it->second))(find_it->second)) {
- value_differences.push_back(std::make_pair(
- it->first, std::make_pair(it->second, find_it->second)));
+
+ std::vector<syncer::Invalidation> expected_invalidations;
+ std::vector<syncer::Invalidation> actual_invalidations;
+
+ expected_.GetAllInvalidations(&expected_invalidations);
+ actual.GetAllInvalidations(&actual_invalidations);
+
+ std::vector<syncer::Invalidation> expected_only;
+ std::vector<syncer::Invalidation> actual_only;
+
+ for (std::vector<syncer::Invalidation>::iterator it =
+ expected_invalidations.begin();
+ it != expected_invalidations.end(); ++it) {
+ if (std::find_if(actual_invalidations.begin(),
+ actual_invalidations.end(),
+ InvalidationEqPredicate(*it))
+ == actual_invalidations.end()) {
+ expected_only.push_back(*it);
}
}
- if (expected_only.empty() && actual_only.empty() && value_differences.empty())
+ for (std::vector<syncer::Invalidation>::iterator it =
+ actual_invalidations.begin();
+ it != actual_invalidations.end(); ++it) {
+ if (std::find_if(expected_invalidations.begin(),
+ expected_invalidations.end(),
+ InvalidationEqPredicate(*it))
+ == expected_invalidations.end()) {
+ actual_only.push_back(*it);
+ }
+ }
+
+ if (expected_only.empty() && actual_only.empty())
return true;
bool printed_header = false;
@@ -86,12 +106,6 @@ bool ObjectIdInvalidationMapEqMatcher::MatchAndExplain(
printed_header = true;
}
- if (!value_differences.empty()) {
- *listener << (printed_header ? ",\nand" : "which")
- << " differ in the following values: "
- << PrintToString(value_differences);
- }
-
return false;
}
@@ -99,8 +113,8 @@ void ObjectIdInvalidationMapEqMatcher::DescribeTo(::std::ostream* os) const {
*os << " is equal to " << PrintToString(expected_);
}
-void ObjectIdInvalidationMapEqMatcher::DescribeNegationTo
-(::std::ostream* os) const {
+void ObjectIdInvalidationMapEqMatcher::DescribeNegationTo(
+ ::std::ostream* os) const {
*os << " isn't equal to " << PrintToString(expected_);
}
diff --git a/chromium/sync/notifier/object_id_invalidation_map_test_util.h b/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h
index 0217da8d65c..5d71979d1fd 100644
--- a/chromium/sync/notifier/object_id_invalidation_map_test_util.h
+++ b/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h
@@ -1,13 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_TEST_UTILH_
-#define SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_TEST_UTILH_
+#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
+#define SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
// Convince googletest to use the correct overload for PrintTo().
#include "sync/internal_api/public/base/invalidation_test_util.h"
-#include "sync/internal_api/public/base/model_type.h"
#include "sync/notifier/object_id_invalidation_map.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -18,4 +17,4 @@ namespace syncer {
} // namespace syncer
-#endif // SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_TEST_UTILH_
+#endif // SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
diff --git a/chromium/sync/internal_api/public/base/ordinal.h b/chromium/sync/internal_api/public/base/ordinal.h
index 91897599e78..cb67b4518b1 100644
--- a/chromium/sync/internal_api/public/base/ordinal.h
+++ b/chromium/sync/internal_api/public/base/ordinal.h
@@ -260,8 +260,8 @@ bool Ordinal<Traits>::EqualsOrBothInvalid(const Ordinal& other) const {
template <typename Traits>
std::string Ordinal<Traits>::ToDebugString() const {
- std::string debug_string;
- base::JsonDoubleQuote(bytes_, false /* put_in_quotes */, &debug_string);
+ std::string debug_string =
+ base::EscapeBytesAsInvalidJSONString(bytes_, false /* put_in_quotes */);
if (!is_valid_) {
debug_string = "INVALID[" + debug_string + "]";
}
@@ -401,7 +401,7 @@ int Ordinal<Traits>::AddDigitValue(std::string* bytes,
DCHECK_GE(i, 0U);
DCHECK_LT(i, bytes->length());
- for (int j = i; j >= 0 && digit_value > 0; --j) {
+ for (int j = static_cast<int>(i); j >= 0 && digit_value > 0; --j) {
int byte_j_value = GetDigitValue(*bytes, j) + digit_value;
digit_value = byte_j_value / kRadix;
DCHECK_LE(digit_value, 1);
diff --git a/chromium/sync/internal_api/public/base/progress_marker_map.cc b/chromium/sync/internal_api/public/base/progress_marker_map.cc
index b2810134ebf..ea1f177b7d3 100644
--- a/chromium/sync/internal_api/public/base/progress_marker_map.cc
+++ b/chromium/sync/internal_api/public/base/progress_marker_map.cc
@@ -16,9 +16,8 @@ scoped_ptr<base::DictionaryValue> ProgressMarkerMapToValue(
for (ProgressMarkerMap::const_iterator it = marker_map.begin();
it != marker_map.end(); ++it) {
std::string printable_payload;
- base::JsonDoubleQuote(it->second,
- false /* put_in_quotes */,
- &printable_payload);
+ base::EscapeJSONString(
+ it->second, false /* put_in_quotes */, &printable_payload);
value->SetString(ModelTypeToString(it->first), printable_payload);
}
return value.Pass();
diff --git a/chromium/sync/internal_api/public/base/unique_position.cc b/chromium/sync/internal_api/public/base/unique_position.cc
index 80c7187e6b5..40bab6e175d 100644
--- a/chromium/sync/internal_api/public/base/unique_position.cc
+++ b/chromium/sync/internal_api/public/base/unique_position.cc
@@ -154,38 +154,10 @@ void UniquePosition::ToProto(sync_pb::UniquePosition* proto) const {
// This is the current preferred foramt.
proto->set_custom_compressed_v1(compressed_);
- // Some older clients (M28) don't know how to read that format. We don't want
- // to break them until they're obsolete. We'll serialize to the old-style in
- // addition to the new so they won't be confused.
- std::string bytes = Uncompress(compressed_);
- if (bytes.size() < kCompressBytesThreshold) {
- // If it's small, then just write it. This is the common case.
- proto->set_value(bytes);
- } else {
- // We've got a large one. Compress it.
- proto->set_uncompressed_length(bytes.size());
- std::string* compressed = proto->mutable_compressed_value();
-
- uLongf compressed_len = compressBound(bytes.size());
- compressed->resize(compressed_len);
- int result = compress(reinterpret_cast<Bytef*>(string_as_array(compressed)),
- &compressed_len,
- reinterpret_cast<const Bytef*>(bytes.data()),
- bytes.size());
- if (result != Z_OK) {
- NOTREACHED() << "Failed to compress position: " << result;
- // Maybe we can write an uncompressed version?
- proto->Clear();
- proto->set_value(bytes);
- } else if (compressed_len >= bytes.size()) {
- // Oops, we made it bigger. Just write the uncompressed version instead.
- proto->Clear();
- proto->set_value(bytes);
- } else {
- // Success! Don't forget to adjust the string's length.
- compressed->resize(compressed_len);
- }
- }
+ // Older clients used to write other formats. We don't bother doing that
+ // anymore because that form of backwards compatibility is expensive. We no
+ // longer want to pay that price just too support clients that have been
+ // obsolete for a long time. See the proto definition for details.
}
void UniquePosition::SerializeToString(std::string* blob) const {
diff --git a/chromium/sync/internal_api/public/base/unique_position_unittest.cc b/chromium/sync/internal_api/public/base/unique_position_unittest.cc
index 5007a4b7cd0..4c0a66047f1 100644
--- a/chromium/sync/internal_api/public/base/unique_position_unittest.cc
+++ b/chromium/sync/internal_api/public/base/unique_position_unittest.cc
@@ -392,7 +392,7 @@ class SuffixGenerator {
// random anyway.
std::string input = cache_guid_ + base::Int64ToString(next_id_--);
std::string output;
- CHECK(base::Base64Encode(base::SHA1HashString(input), &output));
+ base::Base64Encode(base::SHA1HashString(input), &output);
return output;
}
@@ -655,25 +655,16 @@ TEST_F(CompressedPositionTest, SerializeAndDeserialize) {
UniquePosition deserialized = UniquePosition::FromProto(proto);
EXPECT_PRED_FORMAT2(Equals, *it, deserialized);
-
}
}
-// Test that redundant serialization for legacy clients is correct, too.
-TEST_F(CompressedPositionTest, SerializeAndLegacyDeserialize) {
- for (std::vector<UniquePosition>::const_iterator it = positions_.begin();
- it != positions_.end(); ++it) {
- SCOPED_TRACE("iteration: " + it->ToDebugString());
- sync_pb::UniquePosition proto;
-
- it->ToProto(&proto);
-
- // Clear default encoding to force it to use legacy as fallback.
- proto.clear_custom_compressed_v1();
- UniquePosition deserialized = UniquePosition::FromProto(proto);
-
- EXPECT_PRED_FORMAT2(Equals, *it, deserialized);
- }
+// Test that deserialization failures of protobufs where we know none of its
+// fields is not catastrophic. This may happen if all the fields currently
+// known to this client become deprecated in the future.
+TEST_F(CompressedPositionTest, DeserializeProtobufFromTheFuture) {
+ sync_pb::UniquePosition proto;
+ UniquePosition deserialized = UniquePosition::FromProto(proto);
+ EXPECT_FALSE(deserialized.IsValid());
}
// Make sure the comparison functions are working correctly.
diff --git a/chromium/sync/internal_api/public/http_bridge.h b/chromium/sync/internal_api/public/http_bridge.h
index be49aa7bcb2..74c7005fcea 100644
--- a/chromium/sync/internal_api/public/http_bridge.h
+++ b/chromium/sync/internal_api/public/http_bridge.h
@@ -8,7 +8,6 @@
#include <string>
#include "base/basictypes.h"
-#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
@@ -22,6 +21,7 @@
#include "sync/internal_api/public/base/cancelation_observer.h"
#include "sync/internal_api/public/http_post_provider_factory.h"
#include "sync/internal_api/public/http_post_provider_interface.h"
+#include "sync/internal_api/public/network_time_update_callback.h"
#include "url/gurl.h"
class HttpBridgeTest;
@@ -40,15 +40,6 @@ namespace syncer {
class CancelationSignal;
-// Callback for updating the network time.
-// Params:
-// const base::Time& network_time - the new network time.
-// const base::TimeDelta& resolution - how precise the reading is.
-// const base::TimeDelta& latency - the http request's latency.
-typedef base::Callback<void(const base::Time&,
- const base::TimeDelta&,
- const base::TimeDelta&)> NetworkTimeUpdateCallback;
-
// A bridge between the syncer and Chromium HTTP layers.
// Provides a way for the sync backend to use Chromium directly for HTTP
// requests rather than depending on a third party provider (e.g libcurl).
diff --git a/chromium/sync/internal_api/public/http_bridge_network_resources.h b/chromium/sync/internal_api/public/http_bridge_network_resources.h
new file mode 100644
index 00000000000..06d8284ba5c
--- /dev/null
+++ b/chromium/sync/internal_api/public/http_bridge_network_resources.h
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_NETWORK_RESOURCES_H_
+#define SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_NETWORK_RESOURCES_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/network_resources.h"
+#include "sync/internal_api/public/network_time_update_callback.h"
+
+namespace net {
+class URLRequestContextGetter;
+} // namespace net
+
+namespace syncer {
+
+class CancelationSignal;
+class HttpPostProviderFactory;
+
+class SYNC_EXPORT HttpBridgeNetworkResources : public NetworkResources {
+ public:
+ virtual ~HttpBridgeNetworkResources();
+
+ // NetworkResources
+ virtual scoped_ptr<HttpPostProviderFactory> GetHttpPostProviderFactory(
+ net::URLRequestContextGetter* baseline_context_getter,
+ const NetworkTimeUpdateCallback& network_time_update_callback,
+ CancelationSignal* cancelation_signal) OVERRIDE;
+};
+
+} // namespace syncer
+
+#endif // SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_NETWORK_RESOURCES_H_
diff --git a/chromium/sync/internal_api/public/network_resources.h b/chromium/sync/internal_api/public/network_resources.h
new file mode 100644
index 00000000000..448188c61a4
--- /dev/null
+++ b/chromium/sync/internal_api/public/network_resources.h
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_PUBLIC_NETWORK_RESOURCES_H_
+#define SYNC_INTERNAL_API_PUBLIC_NETWORK_RESOURCES_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/network_time_update_callback.h"
+
+namespace net {
+class URLRequestContextGetter;
+} // namespace net
+
+namespace syncer {
+
+class CancelationSignal;
+class HttpPostProviderFactory;
+
+class SYNC_EXPORT NetworkResources {
+ public:
+ virtual ~NetworkResources() {}
+
+ virtual scoped_ptr<HttpPostProviderFactory> GetHttpPostProviderFactory(
+ net::URLRequestContextGetter* baseline_context_getter,
+ const NetworkTimeUpdateCallback& network_time_update_callback,
+ CancelationSignal* cancelation_signal) = 0;
+};
+
+} // namespace syncer
+
+#endif // SYNC_INTERNAL_API_PUBLIC_NETWORK_RESOURCES_H_
diff --git a/chromium/sync/internal_api/public/network_time_update_callback.h b/chromium/sync/internal_api/public/network_time_update_callback.h
new file mode 100644
index 00000000000..1efa2414982
--- /dev/null
+++ b/chromium/sync/internal_api/public/network_time_update_callback.h
@@ -0,0 +1,28 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_PUBLIC_NETWORK_TIME_UPDATE_CALLBACK_H_
+#define SYNC_INTERNAL_API_PUBLIC_NETWORK_TIME_UPDATE_CALLBACK_H_
+
+#include "base/callback.h"
+#include "base/time/time.h"
+
+namespace syncer {
+
+// TODO(pvalenzuela): Avoid duplication of this typedef by defining it in a
+// common location. This is duplicated here because its original definition in
+// NetworkTimeTracker cannot be depended on.
+//
+// Callback for updating the network time.
+// Params:
+// const base::Time& network_time - the new network time.
+// const base::TimeDelta& resolution - how precise the reading is.
+// const base::TimeDelta& latency - the http request's latency.
+typedef base::Callback<void(const base::Time&,
+ const base::TimeDelta&,
+ const base::TimeDelta&)> NetworkTimeUpdateCallback;
+
+} // namespace syncer
+
+#endif // SYNC_INTERNAL_API_PUBLIC_NETWORK_TIME_UPDATE_CALLBACK_H_
diff --git a/chromium/sync/internal_api/public/sessions/model_neutral_state.cc b/chromium/sync/internal_api/public/sessions/model_neutral_state.cc
index b1a17069f2b..fa2b0192ff1 100644
--- a/chromium/sync/internal_api/public/sessions/model_neutral_state.cc
+++ b/chromium/sync/internal_api/public/sessions/model_neutral_state.cc
@@ -23,7 +23,6 @@ ModelNeutralState::ModelNeutralState()
last_download_updates_result(UNSET),
commit_result(UNSET),
items_committed(false),
- debug_info_sent(false),
num_server_changes_remaining(0) {
}
diff --git a/chromium/sync/internal_api/public/sessions/model_neutral_state.h b/chromium/sync/internal_api/public/sessions/model_neutral_state.h
index 4abf15816ee..4979d3ad101 100644
--- a/chromium/sync/internal_api/public/sessions/model_neutral_state.h
+++ b/chromium/sync/internal_api/public/sessions/model_neutral_state.h
@@ -22,15 +22,9 @@ struct SYNC_EXPORT ModelNeutralState {
ModelNeutralState();
~ModelNeutralState();
- // We GetUpdates for some combination of types at once.
- // requested_update_types stores the set of types which were requested.
- ModelTypeSet updates_request_types;
-
// The set of types for which commits were sent to the server.
ModelTypeSet commit_request_types;
- sync_pb::ClientToServerResponse updates_response;
-
int num_successful_commits;
// This is needed for monitoring extensions activity.
@@ -67,9 +61,6 @@ struct SYNC_EXPORT ModelNeutralState {
// Set to true by PostCommitMessageCommand if any commits were successful.
bool items_committed;
- // True indicates debug info has been sent once this session.
- bool debug_info_sent;
-
// Number of changes remaining, according to the server.
// Take it as an estimate unless it's value is zero, in which case there
// really is nothing more to download.
diff --git a/chromium/sync/internal_api/public/sync_manager.h b/chromium/sync/internal_api/public/sync_manager.h
index 735e5ed4387..a5e6926d695 100644
--- a/chromium/sync/internal_api/public/sync_manager.h
+++ b/chromium/sync/internal_api/public/sync_manager.h
@@ -53,6 +53,7 @@ class SyncSessionSnapshot;
// Used by SyncManager::OnConnectionStatusChange().
enum ConnectionStatus {
+ CONNECTION_NOT_ATTEMPTED,
CONNECTION_OK,
CONNECTION_AUTH_ERROR,
CONNECTION_SERVER_ERROR
@@ -176,9 +177,6 @@ class SYNC_EXPORT SyncManager : public syncer::InvalidationHandler {
// changed.
virtual void OnConnectionStatusChange(ConnectionStatus status) = 0;
- // Called when a new auth token is provided by the sync server.
- virtual void OnUpdatedToken(const std::string& token) = 0;
-
// Called when initialization is complete to the point that SyncManager can
// process changes. This does not necessarily mean authentication succeeded
// or that the SyncManager is online.
@@ -324,7 +322,6 @@ class SYNC_EXPORT SyncManager : public syncer::InvalidationHandler {
Encryptor* encryptor,
scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler,
ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
- bool use_oauth2_token,
CancelationSignal* cancelation_signal) = 0;
// Throw an unrecoverable error from a transaction (mostly used for
diff --git a/chromium/sync/internal_api/public/util/syncer_error.cc b/chromium/sync/internal_api/public/util/syncer_error.cc
index 9c2609cd40d..e7cb66fbf48 100644
--- a/chromium/sync/internal_api/public/util/syncer_error.cc
+++ b/chromium/sync/internal_api/public/util/syncer_error.cc
@@ -27,6 +27,7 @@ const char* GetSyncerErrorString(SyncerError value) {
ENUM_CASE(SERVER_RETURN_CONFLICT);
ENUM_CASE(SERVER_RESPONSE_VALIDATION_FAILED);
ENUM_CASE(SERVER_RETURN_DISABLED_BY_ADMIN);
+ ENUM_CASE(SERVER_MORE_TO_DOWNLOAD);
ENUM_CASE(SYNCER_OK);
}
NOTREACHED();
@@ -35,7 +36,9 @@ const char* GetSyncerErrorString(SyncerError value) {
#undef ENUM_CASE
bool SyncerErrorIsError(SyncerError error) {
- return error != UNSET && error != SYNCER_OK;
+ return error != UNSET
+ && error != SYNCER_OK
+ && error != SERVER_MORE_TO_DOWNLOAD;
}
} // namespace syncer
diff --git a/chromium/sync/internal_api/public/util/syncer_error.h b/chromium/sync/internal_api/public/util/syncer_error.h
index 471bc7e2b4b..02da22c1935 100644
--- a/chromium/sync/internal_api/public/util/syncer_error.h
+++ b/chromium/sync/internal_api/public/util/syncer_error.h
@@ -9,15 +9,7 @@
namespace syncer {
-// This enum describes all the ways a SyncerCommand can fail.
-//
-// SyncerCommands do many different things, but they share a common function
-// signature. This enum, the return value for all SyncerCommands, must be able
-// to describe any possible failure for all SyncerComand.
-//
-// For convenience, functions which are invoked only by SyncerCommands may also
-// return a SyncerError. It saves us having to write a conversion function, and
-// it makes refactoring easier.
+// This enum describes all the possible results of a sync cycle.
enum SYNC_EXPORT_PRIVATE SyncerError {
UNSET = 0, // Default value.
CANNOT_DO_WORK, // A model worker could not process a work item.
@@ -39,6 +31,8 @@ enum SYNC_EXPORT_PRIVATE SyncerError {
SERVER_RESPONSE_VALIDATION_FAILED,
SERVER_RETURN_DISABLED_BY_ADMIN,
+ SERVER_MORE_TO_DOWNLOAD,
+
SYNCER_OK
};
diff --git a/chromium/sync/internal_api/sync_encryption_handler_impl.cc b/chromium/sync/internal_api/sync_encryption_handler_impl.cc
index b5870238f8b..34bf0335c8f 100644
--- a/chromium/sync/internal_api/sync_encryption_handler_impl.cc
+++ b/chromium/sync/internal_api/sync_encryption_handler_impl.cc
@@ -210,12 +210,12 @@ SyncEncryptionHandlerImpl::SyncEncryptionHandlerImpl(
Encryptor* encryptor,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping)
- : weak_ptr_factory_(this),
- user_share_(user_share),
+ : user_share_(user_share),
vault_unsafe_(encryptor, SensitiveTypes()),
encrypt_everything_(false),
passphrase_type_(IMPLICIT_PASSPHRASE),
- nigori_overwrite_count_(0) {
+ nigori_overwrite_count_(0),
+ weak_ptr_factory_(this) {
// Restore the cryptographer's previous keys. Note that we don't add the
// keystore keys into the cryptographer here, in case a migration was pending.
vault_unsafe_.cryptographer.Bootstrap(restored_key_for_bootstrapping);
@@ -692,8 +692,7 @@ bool SyncEncryptionHandlerImpl::SetKeystoreKeys(
// Note: in order to Pack the keys, they must all be base64 encoded (else
// JSON serialization fails).
- if (!base::Base64Encode(raw_keystore_key, &keystore_key_))
- return false;
+ base::Base64Encode(raw_keystore_key, &keystore_key_);
// Go through and save the old keystore keys. We always persist all keystore
// keys the server sends us.
diff --git a/chromium/sync/internal_api/sync_encryption_handler_impl.h b/chromium/sync/internal_api/sync_encryption_handler_impl.h
index 4b4e99b9ae3..89621c74058 100644
--- a/chromium/sync/internal_api/sync_encryption_handler_impl.h
+++ b/chromium/sync/internal_api/sync_encryption_handler_impl.h
@@ -265,8 +265,6 @@ class SYNC_EXPORT_PRIVATE SyncEncryptionHandlerImpl
base::ThreadChecker thread_checker_;
- base::WeakPtrFactory<SyncEncryptionHandlerImpl> weak_ptr_factory_;
-
ObserverList<SyncEncryptionHandler::Observer> observers_;
// The current user share (for creating transactions).
@@ -307,6 +305,8 @@ class SYNC_EXPORT_PRIVATE SyncEncryptionHandlerImpl
// before support for this field was added.
base::Time custom_passphrase_time_;
+ base::WeakPtrFactory<SyncEncryptionHandlerImpl> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SyncEncryptionHandlerImpl);
};
diff --git a/chromium/sync/internal_api/sync_manager_impl.cc b/chromium/sync/internal_api/sync_manager_impl.cc
index f79213dfdc7..7e2d34bc2de 100644
--- a/chromium/sync/internal_api/sync_manager_impl.cc
+++ b/chromium/sync/internal_api/sync_manager_impl.cc
@@ -40,6 +40,7 @@
#include "sync/js/js_reply_handler.h"
#include "sync/notifier/invalidation_util.h"
#include "sync/notifier/invalidator.h"
+#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/protocol/proto_value_conversions.h"
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/directory.h"
@@ -168,14 +169,14 @@ class NudgeStrategy {
SyncManagerImpl::SyncManagerImpl(const std::string& name)
: name_(name),
- weak_ptr_factory_(this),
change_delegate_(NULL),
initialized_(false),
observing_network_connectivity_changes_(false),
invalidator_state_(DEFAULT_INVALIDATION_ERROR),
traffic_recorder_(kMaxMessagesToRecord, kMaxMessageSizeToRecord),
encryptor_(NULL),
- report_unrecoverable_error_function_(NULL) {
+ report_unrecoverable_error_function_(NULL),
+ weak_ptr_factory_(this) {
// Pre-fill |notification_info_map_|.
for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
notification_info_map_.insert(
@@ -330,11 +331,11 @@ void SyncManagerImpl::ConfigureSyncer(
ConfigurationParams params(GetSourceFromReason(reason),
to_download,
new_routing_info,
- ready_task);
+ ready_task,
+ retry_task);
scheduler_->Start(SyncScheduler::CONFIGURATION_MODE);
- if (!scheduler_->ScheduleConfiguration(params))
- retry_task.Run();
+ scheduler_->ScheduleConfiguration(params);
}
void SyncManagerImpl::Init(
@@ -355,7 +356,6 @@ void SyncManagerImpl::Init(
Encryptor* encryptor,
scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler,
ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
- bool use_oauth2_token,
CancelationSignal* cancelation_signal) {
CHECK(!initialized_);
DCHECK(thread_checker_.CalledOnValidThread());
@@ -410,18 +410,13 @@ void SyncManagerImpl::Init(
DVLOG(1) << "Username: " << username;
if (!OpenDirectory(username)) {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnInitializationComplete(
- MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()),
- MakeWeakHandle(
- debug_info_event_listener_.GetWeakPtr()),
- false, ModelTypeSet()));
+ NotifyInitializationFailure();
LOG(ERROR) << "Sync manager initialization failed!";
return;
}
connection_manager_.reset(new SyncAPIServerConnectionManager(
- sync_server_and_path, port, use_ssl, use_oauth2_token,
+ sync_server_and_path, port, use_ssl,
post_factory.release(), cancelation_signal));
connection_manager_->set_client_id(directory()->cache_guid());
connection_manager_->AddListener(this);
@@ -462,6 +457,10 @@ void SyncManagerImpl::Init(
UpdateCredentials(credentials);
+ NotifyInitializationSuccess();
+}
+
+void SyncManagerImpl::NotifyInitializationSuccess() {
FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
OnInitializationComplete(
MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()),
@@ -469,6 +468,14 @@ void SyncManagerImpl::Init(
true, InitialSyncEndedTypes()));
}
+void SyncManagerImpl::NotifyInitializationFailure() {
+ FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
+ OnInitializationComplete(
+ MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()),
+ MakeWeakHandle(debug_info_event_listener_.GetWeakPtr()),
+ false, ModelTypeSet()));
+}
+
void SyncManagerImpl::OnPassphraseRequired(
PassphraseRequiredReason reason,
const sync_pb::EncryptedData& pending_keys) {
@@ -927,8 +934,8 @@ void SyncManagerImpl::OnSyncEngineEvent(const SyncEngineEvent& event) {
// whether we should sync again.
if (event.what_happened == SyncEngineEvent::SYNC_CYCLE_ENDED) {
if (!initialized_) {
- LOG(INFO) << "OnSyncCycleCompleted not sent because sync api is not "
- << "initialized";
+ DVLOG(1) << "OnSyncCycleCompleted not sent because sync api is not "
+ << "initialized";
return;
}
@@ -943,12 +950,6 @@ void SyncManagerImpl::OnSyncEngineEvent(const SyncEngineEvent& event) {
return;
}
- if (event.what_happened == SyncEngineEvent::UPDATED_TOKEN) {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnUpdatedToken(event.updated_token));
- return;
- }
-
if (event.what_happened == SyncEngineEvent::ACTIONABLE_ERROR) {
FOR_EACH_OBSERVER(
SyncManager::Observer, observers_,
@@ -1005,7 +1006,7 @@ base::DictionaryValue* SyncManagerImpl::NotificationInfoToValue(
for (NotificationInfoMap::const_iterator it = notification_info.begin();
it != notification_info.end(); ++it) {
- const std::string& model_type_str = ModelTypeToString(it->first);
+ const std::string model_type_str = ModelTypeToString(it->first);
value->Set(model_type_str, it->second.ToValue());
}
@@ -1148,13 +1149,22 @@ JsArgList SyncManagerImpl::GetChildNodeIds(const JsArgList& args) {
void SyncManagerImpl::UpdateNotificationInfo(
const ObjectIdInvalidationMap& invalidation_map) {
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
ModelType type = UNSPECIFIED;
- if (ObjectIdToRealModelType(it->first, &type)) {
+ if (!ObjectIdToRealModelType(*it, &type)) {
+ continue;
+ }
+ const SingleObjectInvalidationSet& type_invalidations =
+ invalidation_map.ForObject(*it);
+ for (SingleObjectInvalidationSet::const_iterator inv_it =
+ type_invalidations.begin(); inv_it != type_invalidations.end();
+ ++inv_it) {
NotificationInfo* info = &notification_info_map_[type];
info->total_count++;
- info->payload = it->second.payload;
+ std::string payload =
+ inv_it->is_unknown_version() ? "UNKNOWN" : inv_it->payload();
+ info->payload = payload;
}
}
}
@@ -1185,7 +1195,7 @@ void SyncManagerImpl::OnIncomingInvalidation(
DCHECK(thread_checker_.CalledOnValidThread());
// We should never receive IDs from non-sync objects.
- ObjectIdSet ids = ObjectIdInvalidationMapToSet(invalidation_map);
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
ModelType type;
if (!ObjectIdToRealModelType(*it, &type)) {
@@ -1193,7 +1203,7 @@ void SyncManagerImpl::OnIncomingInvalidation(
}
}
- if (invalidation_map.empty()) {
+ if (invalidation_map.Empty()) {
LOG(WARNING) << "Sync received invalidation without any type information.";
} else {
allstatus_.IncrementNudgeCounter(NUDGE_SOURCE_NOTIFICATION);
@@ -1209,7 +1219,8 @@ void SyncManagerImpl::OnIncomingInvalidation(
base::DictionaryValue details;
base::ListValue* changed_types = new base::ListValue();
details.Set("changedTypes", changed_types);
- ObjectIdSet id_set = ObjectIdInvalidationMapToSet(invalidation_map);
+
+ ObjectIdSet id_set = invalidation_map.GetObjectIds();
ModelTypeSet nudged_types = ObjectIdSetToModelTypeSet(id_set);
DCHECK(!nudged_types.Empty());
for (ModelTypeSet::Iterator it = nudged_types.First();
diff --git a/chromium/sync/internal_api/sync_manager_impl.h b/chromium/sync/internal_api/sync_manager_impl.h
index 0283065f8a6..5d5cfaab552 100644
--- a/chromium/sync/internal_api/sync_manager_impl.h
+++ b/chromium/sync/internal_api/sync_manager_impl.h
@@ -81,7 +81,6 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler,
ReportUnrecoverableErrorFunction
report_unrecoverable_error_function,
- bool use_oauth2_token,
CancelationSignal* cancelation_signal) OVERRIDE;
virtual void ThrowUnrecoverableError() OVERRIDE;
virtual ModelTypeSet InitialSyncEndedTypes() OVERRIDE;
@@ -181,6 +180,11 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
bool GetHasInvalidAuthTokenForTest() const;
+ protected:
+ // Helper functions. Virtual for testing.
+ virtual void NotifyInitializationSuccess();
+ virtual void NotifyInitializationFailure();
+
private:
friend class SyncManagerTest;
FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, NudgeDelayTest);
@@ -285,8 +289,6 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
base::ThreadChecker thread_checker_;
- base::WeakPtrFactory<SyncManagerImpl> weak_ptr_factory_;
-
// Thread-safe handle used by
// HandleCalculateChangesChangeEventFromSyncApi(), which can be
// called from any thread. Valid only between between calls to
@@ -367,6 +369,8 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
// with the cryptographer.
scoped_ptr<SyncEncryptionHandlerImpl> sync_encryption_handler_;
+ base::WeakPtrFactory<SyncManagerImpl> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SyncManagerImpl);
};
diff --git a/chromium/sync/internal_api/sync_manager_impl_unittest.cc b/chromium/sync/internal_api/sync_manager_impl_unittest.cc
index dc47365437d..91be3b80dfd 100644
--- a/chromium/sync/internal_api/sync_manager_impl_unittest.cc
+++ b/chromium/sync/internal_api/sync_manager_impl_unittest.cc
@@ -66,6 +66,7 @@
#include "sync/syncable/syncable_util.h"
#include "sync/syncable/syncable_write_transaction.h"
#include "sync/test/callback_counter.h"
+#include "sync/test/engine/fake_model_worker.h"
#include "sync/test/engine/fake_sync_scheduler.h"
#include "sync/test/engine/test_id_factory.h"
#include "sync/test/fake_encryptor.h"
@@ -96,8 +97,6 @@ using syncable::kEncryptedString;
namespace {
-const char kTestChromeVersion[] = "test chrome version";
-
void ExpectInt64Value(int64 expected_value,
const base::DictionaryValue& value,
const std::string& key) {
@@ -809,7 +808,8 @@ class SyncManagerTest : public testing::Test,
sync_manager_.AddObserver(&manager_observer_);
EXPECT_CALL(manager_observer_, OnInitializationComplete(_, _, _, _)).
- WillOnce(SaveArg<0>(&js_backend_));
+ WillOnce(DoAll(SaveArg<0>(&js_backend_),
+ SaveArg<2>(&initialization_succeeded_)));
EXPECT_FALSE(js_backend_.IsInitialized());
@@ -817,6 +817,12 @@ class SyncManagerTest : public testing::Test,
ModelSafeRoutingInfo routing_info;
GetModelSafeRoutingInfo(&routing_info);
+ // This works only because all routing info types are GROUP_PASSIVE.
+ // If we had types in other groups, we would need additional workers
+ // to support them.
+ scoped_refptr<ModelSafeWorker> worker = new FakeModelWorker(GROUP_PASSIVE);
+ workers.push_back(worker.get());
+
// Takes ownership of |fake_invalidator_|.
sync_manager_.Init(
temp_dir_.path(),
@@ -837,17 +843,18 @@ class SyncManagerTest : public testing::Test,
scoped_ptr<UnrecoverableErrorHandler>(
new TestUnrecoverableErrorHandler).Pass(),
NULL,
- false,
&cancelation_signal_);
sync_manager_.GetEncryptionHandler()->AddObserver(&encryption_observer_);
EXPECT_TRUE(js_backend_.IsInitialized());
- for (ModelSafeRoutingInfo::iterator i = routing_info.begin();
- i != routing_info.end(); ++i) {
- type_roots_[i->first] = MakeServerNodeForType(
- sync_manager_.GetUserShare(), i->first);
+ if (initialization_succeeded_) {
+ for (ModelSafeRoutingInfo::iterator i = routing_info.begin();
+ i != routing_info.end(); ++i) {
+ type_roots_[i->first] = MakeServerNodeForType(
+ sync_manager_.GetUserShare(), i->first);
+ }
}
PumpLoop();
}
@@ -987,9 +994,7 @@ class SyncManagerTest : public testing::Test,
DCHECK(sync_manager_.thread_checker_.CalledOnValidThread());
ObjectIdSet id_set = ModelTypeSetToObjectIdSet(model_types);
ObjectIdInvalidationMap invalidation_map =
- ObjectIdSetToInvalidationMap(id_set,
- Invalidation::kUnknownVersion,
- std::string());
+ ObjectIdInvalidationMap::InvalidateAll(id_set);
sync_manager_.OnIncomingInvalidation(invalidation_map);
}
@@ -1023,6 +1028,7 @@ class SyncManagerTest : public testing::Test,
SyncManagerImpl sync_manager_;
CancelationSignal cancelation_signal_;
WeakHandle<JsBackend> js_backend_;
+ bool initialization_succeeded_;
StrictMock<SyncManagerObserverMock> manager_observer_;
StrictMock<SyncEncryptionHandlerObserverMock> encryption_observer_;
InternalComponentsFactory::Switches switches_;
@@ -2785,7 +2791,7 @@ class MockSyncScheduler : public FakeSyncScheduler {
virtual ~MockSyncScheduler() {}
MOCK_METHOD1(Start, void(SyncScheduler::Mode));
- MOCK_METHOD1(ScheduleConfiguration, bool(const ConfigurationParams&));
+ MOCK_METHOD1(ScheduleConfiguration, void(const ConfigurationParams&));
};
class ComponentsFactory : public TestInternalComponentsFactory {
@@ -2843,7 +2849,7 @@ TEST_F(SyncManagerTestWithMockScheduler, BasicConfiguration) {
ConfigurationParams params;
EXPECT_CALL(*scheduler(), Start(SyncScheduler::CONFIGURATION_MODE));
EXPECT_CALL(*scheduler(), ScheduleConfiguration(_)).
- WillOnce(DoAll(SaveArg<0>(&params), Return(true)));
+ WillOnce(SaveArg<0>(&params));
// Set data for all types.
ModelTypeSet protocol_types = ProtocolTypes();
@@ -2895,7 +2901,7 @@ TEST_F(SyncManagerTestWithMockScheduler, ReConfiguration) {
ConfigurationParams params;
EXPECT_CALL(*scheduler(), Start(SyncScheduler::CONFIGURATION_MODE));
EXPECT_CALL(*scheduler(), ScheduleConfiguration(_)).
- WillOnce(DoAll(SaveArg<0>(&params), Return(true)));
+ WillOnce(SaveArg<0>(&params));
// Set data for all types except those recently disabled (so we can verify
// only those recently disabled are purged) .
@@ -2936,38 +2942,6 @@ TEST_F(SyncManagerTestWithMockScheduler, ReConfiguration) {
ProtocolTypes()).Equals(disabled_types));
}
-// Test that the retry callback is invoked on configuration failure.
-TEST_F(SyncManagerTestWithMockScheduler, ConfigurationRetry) {
- ConfigureReason reason = CONFIGURE_REASON_RECONFIGURATION;
- ModelTypeSet types_to_download(BOOKMARKS, PREFERENCES);
- ModelSafeRoutingInfo new_routing_info;
- GetModelSafeRoutingInfo(&new_routing_info);
-
- ConfigurationParams params;
- EXPECT_CALL(*scheduler(), Start(SyncScheduler::CONFIGURATION_MODE));
- EXPECT_CALL(*scheduler(), ScheduleConfiguration(_)).
- WillOnce(DoAll(SaveArg<0>(&params), Return(false)));
-
- CallbackCounter ready_task_counter, retry_task_counter;
- sync_manager_.ConfigureSyncer(
- reason,
- types_to_download,
- ModelTypeSet(),
- ModelTypeSet(),
- ModelTypeSet(),
- new_routing_info,
- base::Bind(&CallbackCounter::Callback,
- base::Unretained(&ready_task_counter)),
- base::Bind(&CallbackCounter::Callback,
- base::Unretained(&retry_task_counter)));
- EXPECT_EQ(0, ready_task_counter.times_called());
- EXPECT_EQ(1, retry_task_counter.times_called());
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- params.source);
- EXPECT_TRUE(types_to_download.Equals(params.types_to_download));
- EXPECT_EQ(new_routing_info, params.routing_info);
-}
-
// Test that PurgePartiallySyncedTypes purges only those types that have not
// fully completed their initial download and apply.
TEST_F(SyncManagerTest, PurgePartiallySyncedTypes) {
@@ -3519,4 +3493,28 @@ TEST_F(SyncManagerChangeProcessingTest, DeletionsAndChanges) {
EXPECT_LT(folder_b_pos, folder_a_pos);
}
+// During initialization SyncManagerImpl loads sqlite database. If it fails to
+// do so it should fail initialization. This test verifies this behavior.
+// Test reuses SyncManagerImpl initialization from SyncManagerTest but overrides
+// InternalComponentsFactory to return DirectoryBackingStore that always fails
+// to load.
+class SyncManagerInitInvalidStorageTest : public SyncManagerTest {
+ public:
+ SyncManagerInitInvalidStorageTest() {
+ }
+
+ virtual InternalComponentsFactory* GetFactory() OVERRIDE {
+ return new TestInternalComponentsFactory(GetSwitches(), STORAGE_INVALID);
+ }
+};
+
+// SyncManagerInitInvalidStorageTest::GetFactory will return
+// DirectoryBackingStore that ensures that SyncManagerImpl::OpenDirectory fails.
+// SyncManagerImpl initialization is done in SyncManagerTest::SetUp. This test's
+// task is to ensure that SyncManagerImpl reported initialization failure in
+// OnInitializationComplete callback.
+TEST_F(SyncManagerInitInvalidStorageTest, FailToOpenDatabase) {
+ EXPECT_FALSE(initialization_succeeded_);
+}
+
} // namespace
diff --git a/chromium/sync/internal_api/syncapi_server_connection_manager.cc b/chromium/sync/internal_api/syncapi_server_connection_manager.cc
index ccfa6e6b458..35ca1e2a5ec 100644
--- a/chromium/sync/internal_api/syncapi_server_connection_manager.cc
+++ b/chromium/sync/internal_api/syncapi_server_connection_manager.cc
@@ -31,8 +31,7 @@ bool SyncAPIBridgedConnection::Init(const char* path,
std::string sync_server;
int sync_server_port = 0;
bool use_ssl = false;
- bool use_oauth2_token = false;
- GetServerParams(&sync_server, &sync_server_port, &use_ssl, &use_oauth2_token);
+ GetServerParams(&sync_server, &sync_server_port, &use_ssl);
std::string connection_url = MakeConnectionURL(sync_server, path, use_ssl);
HttpPostProviderInterface* http = post_provider_;
@@ -40,10 +39,7 @@ bool SyncAPIBridgedConnection::Init(const char* path,
if (!auth_token.empty()) {
std::string headers;
- if (use_oauth2_token)
- headers = "Authorization: Bearer " + auth_token;
- else
- headers = "Authorization: GoogleLogin auth=" + auth_token;
+ headers = "Authorization: Bearer " + auth_token;
http->SetExtraRequestHeaders(headers.c_str());
}
@@ -74,9 +70,6 @@ bool SyncAPIBridgedConnection::Init(const char* path,
else
response->server_status = HttpResponse::SYNC_SERVER_ERROR;
- response->update_client_auth_header =
- http->GetResponseHeaderValue("Update-Client-Auth");
-
// Write the content into our buffer.
buffer_.assign(http->GetResponseContent(), http->GetResponseContentLength());
return true;
@@ -91,13 +84,11 @@ SyncAPIServerConnectionManager::SyncAPIServerConnectionManager(
const std::string& server,
int port,
bool use_ssl,
- bool use_oauth2_token,
HttpPostProviderFactory* factory,
CancelationSignal* cancelation_signal)
: ServerConnectionManager(server,
port,
use_ssl,
- use_oauth2_token,
cancelation_signal),
post_provider_factory_(factory) {
DCHECK(post_provider_factory_.get());
diff --git a/chromium/sync/internal_api/syncapi_server_connection_manager.h b/chromium/sync/internal_api/syncapi_server_connection_manager.h
index 48050450829..118d31458ca 100644
--- a/chromium/sync/internal_api/syncapi_server_connection_manager.h
+++ b/chromium/sync/internal_api/syncapi_server_connection_manager.h
@@ -54,7 +54,6 @@ class SYNC_EXPORT_PRIVATE SyncAPIServerConnectionManager
SyncAPIServerConnectionManager(const std::string& server,
int port,
bool use_ssl,
- bool use_oauth2_token,
HttpPostProviderFactory* factory,
CancelationSignal* cancelation_signal);
virtual ~SyncAPIServerConnectionManager();
diff --git a/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc b/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc
index 543455e95bc..2cb3dff3c78 100644
--- a/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc
+++ b/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc
@@ -74,7 +74,7 @@ TEST(SyncAPIServerConnectionManagerTest, VeryEarlyAbortPost) {
CancelationSignal signal;
signal.Signal();
SyncAPIServerConnectionManager server(
- "server", 0, true, false, new BlockingHttpPostFactory(), &signal);
+ "server", 0, true, new BlockingHttpPostFactory(), &signal);
ServerConnectionManager::PostBufferParams params;
ScopedServerStatusWatcher watcher(&server, &params.response);
@@ -91,7 +91,7 @@ TEST(SyncAPIServerConnectionManagerTest, VeryEarlyAbortPost) {
TEST(SyncAPIServerConnectionManagerTest, EarlyAbortPost) {
CancelationSignal signal;
SyncAPIServerConnectionManager server(
- "server", 0, true, false, new BlockingHttpPostFactory(), &signal);
+ "server", 0, true, new BlockingHttpPostFactory(), &signal);
ServerConnectionManager::PostBufferParams params;
ScopedServerStatusWatcher watcher(&server, &params.response);
@@ -109,7 +109,7 @@ TEST(SyncAPIServerConnectionManagerTest, EarlyAbortPost) {
TEST(SyncAPIServerConnectionManagerTest, AbortPost) {
CancelationSignal signal;
SyncAPIServerConnectionManager server(
- "server", 0, true, false, new BlockingHttpPostFactory(), &signal);
+ "server", 0, true, new BlockingHttpPostFactory(), &signal);
ServerConnectionManager::PostBufferParams params;
ScopedServerStatusWatcher watcher(&server, &params.response);
diff --git a/chromium/sync/internal_api/write_node.cc b/chromium/sync/internal_api/write_node.cc
index 079987af3a4..55f56b78d3f 100644
--- a/chromium/sync/internal_api/write_node.cc
+++ b/chromium/sync/internal_api/write_node.cc
@@ -58,7 +58,7 @@ void WriteNode::SetTitle(const std::wstring& title) {
new_legal_title = kEncryptedString;
} else {
SyncAPINameToServerName(WideToUTF8(title), &new_legal_title);
- TruncateUTF8ToByteSize(new_legal_title, 255, &new_legal_title);
+ base::TruncateUTF8ToByteSize(new_legal_title, 255, &new_legal_title);
}
std::string current_legal_title;
diff --git a/chromium/sync/js/sync_js_controller_unittest.cc b/chromium/sync/js/sync_js_controller_unittest.cc
index ac46935b300..eca617c2d45 100644
--- a/chromium/sync/js/sync_js_controller_unittest.cc
+++ b/chromium/sync/js/sync_js_controller_unittest.cc
@@ -30,10 +30,15 @@ class SyncJsControllerTest : public testing::Test {
base::MessageLoop message_loop_;
};
+ACTION_P(ReplyToMessage, reply_name) {
+ arg2.Call(FROM_HERE, &JsReplyHandler::HandleJsReply, reply_name, JsArgList());
+}
+
TEST_F(SyncJsControllerTest, Messages) {
InSequence dummy;
// |mock_backend| needs to outlive |sync_js_controller|.
StrictMock<MockJsBackend> mock_backend;
+ StrictMock<MockJsReplyHandler> mock_reply_handler;
SyncJsController sync_js_controller;
base::ListValue arg_list1, arg_list2;
@@ -41,17 +46,23 @@ TEST_F(SyncJsControllerTest, Messages) {
arg_list2.Append(new base::FundamentalValue(5));
JsArgList args1(&arg_list1), args2(&arg_list2);
- // TODO(akalin): Write matchers for WeakHandle and use them here
- // instead of _.
EXPECT_CALL(mock_backend, SetJsEventHandler(_));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _))
+ .WillOnce(ReplyToMessage("test1_reply"));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _))
+ .WillOnce(ReplyToMessage("test2_reply"));
sync_js_controller.AttachJsBackend(mock_backend.AsWeakHandle());
- sync_js_controller.ProcessJsMessage("test1", args2,
- WeakHandle<JsReplyHandler>());
- sync_js_controller.ProcessJsMessage("test2", args1,
- WeakHandle<JsReplyHandler>());
+ sync_js_controller.ProcessJsMessage("test1",
+ args2,
+ mock_reply_handler.AsWeakHandle());
+ sync_js_controller.ProcessJsMessage("test2",
+ args1,
+ mock_reply_handler.AsWeakHandle());
+
+ // The replies should be waiting on our message loop.
+ EXPECT_CALL(mock_reply_handler, HandleJsReply("test1_reply", _));
+ EXPECT_CALL(mock_reply_handler, HandleJsReply("test2_reply", _));
PumpLoop();
// Let destructor of |sync_js_controller| call RemoveBackend().
@@ -60,6 +71,7 @@ TEST_F(SyncJsControllerTest, Messages) {
TEST_F(SyncJsControllerTest, QueuedMessages) {
// |mock_backend| needs to outlive |sync_js_controller|.
StrictMock<MockJsBackend> mock_backend;
+ StrictMock<MockJsReplyHandler> mock_reply_handler;
SyncJsController sync_js_controller;
base::ListValue arg_list1, arg_list2;
@@ -68,20 +80,29 @@ TEST_F(SyncJsControllerTest, QueuedMessages) {
JsArgList args1(&arg_list1), args2(&arg_list2);
// Should queue messages.
- sync_js_controller.ProcessJsMessage("test1", args2,
- WeakHandle<JsReplyHandler>());
- sync_js_controller.ProcessJsMessage("test2", args1,
- WeakHandle<JsReplyHandler>());
+ sync_js_controller.ProcessJsMessage(
+ "test1",
+ args2,
+ mock_reply_handler.AsWeakHandle());
+ sync_js_controller.ProcessJsMessage(
+ "test2",
+ args1,
+ mock_reply_handler.AsWeakHandle());
+ // Should do nothing.
+ PumpLoop();
Mock::VerifyAndClearExpectations(&mock_backend);
- // TODO(akalin): Write matchers for WeakHandle and use them here
- // instead of _.
- EXPECT_CALL(mock_backend, SetJsEventHandler(_));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _));
// Should call the queued messages.
+ EXPECT_CALL(mock_backend, SetJsEventHandler(_));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _))
+ .WillOnce(ReplyToMessage("test1_reply"));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _))
+ .WillOnce(ReplyToMessage("test2_reply"));
+ EXPECT_CALL(mock_reply_handler, HandleJsReply("test1_reply", _));
+ EXPECT_CALL(mock_reply_handler, HandleJsReply("test2_reply", _));
+
sync_js_controller.AttachJsBackend(mock_backend.AsWeakHandle());
PumpLoop();
diff --git a/chromium/sync/notifier/ack_handler.cc b/chromium/sync/notifier/ack_handler.cc
new file mode 100644
index 00000000000..3b31b2b7b2e
--- /dev/null
+++ b/chromium/sync/notifier/ack_handler.cc
@@ -0,0 +1,15 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/ack_handler.h"
+
+#include "sync/internal_api/public/base/invalidation.h"
+
+namespace syncer {
+
+AckHandler::AckHandler() {}
+
+AckHandler::~AckHandler() {}
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/ack_handler.h b/chromium/sync/notifier/ack_handler.h
new file mode 100644
index 00000000000..f1fc16fa702
--- /dev/null
+++ b/chromium/sync/notifier/ack_handler.h
@@ -0,0 +1,42 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_ACK_HANDLER_H_
+#define SYNC_NOTIFIER_ACK_HANDLER_H_
+
+#include <vector>
+
+#include "sync/base/sync_export.h"
+
+namespace invalidation {
+class ObjectId;
+} // namespace invalidation
+
+namespace syncer {
+
+class AckHandle;
+
+// An interface for classes that keep track of invalidation acknowledgements.
+//
+// We don't expect to support more than one "real" implementation of AckHandler,
+// but this interface is very useful for testing and implementation hiding.
+class SYNC_EXPORT AckHandler {
+ public:
+ AckHandler();
+ virtual ~AckHandler() = 0;
+
+ // Record the local acknowledgement of an invalidation identified by |handle|.
+ virtual void Acknowledge(
+ const invalidation::ObjectId& id,
+ const AckHandle& handle) = 0;
+
+ // Record the drop of an invalidation identified by |handle|.
+ virtual void Drop(
+ const invalidation::ObjectId& id,
+ const AckHandle& handle) = 0;
+};
+
+} // namespace syncer
+
+#endif // SYNC_NOTIFIER_ACK_HANDLER_H_
diff --git a/chromium/sync/notifier/ack_tracker.cc b/chromium/sync/notifier/ack_tracker.cc
deleted file mode 100644
index 6461571749c..00000000000
--- a/chromium/sync/notifier/ack_tracker.cc
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/ack_tracker.h"
-
-#include <algorithm>
-#include <iterator>
-#include <utility>
-
-#include "base/callback.h"
-#include "base/stl_util.h"
-#include "base/time/tick_clock.h"
-#include "google/cacheinvalidation/include/types.h"
-
-namespace syncer {
-
-namespace {
-
-// All times are in milliseconds.
-const net::BackoffEntry::Policy kDefaultBackoffPolicy = {
- // Number of initial errors (in sequence) to ignore before applying
- // exponential back-off rules.
- // Note this value is set to 1 to work in conjunction with a hack in
- // AckTracker::Track.
- 1,
-
- // Initial delay. The interpretation of this value depends on
- // always_use_initial_delay. It's either how long we wait between
- // requests before backoff starts, or how much we delay the first request
- // after backoff starts.
- 60 * 1000,
-
- // Factor by which the waiting time will be multiplied.
- 2,
-
- // Fuzzing percentage. ex: 10% will spread requests randomly
- // between 90%-100% of the calculated time.
- 0,
-
- // Maximum amount of time we are willing to delay our request, -1
- // for no maximum.
- 60 * 10 * 1000,
-
- // Time to keep an entry from being discarded even when it
- // has no significant state, -1 to never discard.
- -1,
-
- // If true, we always use a delay of initial_delay_ms, even before
- // we've seen num_errors_to_ignore errors. Otherwise, initial_delay_ms
- // is the first delay once we start exponential backoff.
- //
- // So if we're ignoring 1 error, we'll see (N, N, Nm, Nm^2, ...) if true,
- // and (0, 0, N, Nm, ...) when false, where N is initial_backoff_ms and
- // m is multiply_factor, assuming we've already seen one success.
- true,
-};
-
-scoped_ptr<net::BackoffEntry> CreateDefaultBackoffEntry(
- const net::BackoffEntry::Policy* const policy) {
- return scoped_ptr<net::BackoffEntry>(new net::BackoffEntry(policy));
-}
-
-} // namespace
-
-AckTracker::Delegate::~Delegate() {
-}
-
-AckTracker::Entry::Entry(scoped_ptr<net::BackoffEntry> backoff,
- const ObjectIdSet& ids)
- : backoff(backoff.Pass()), ids(ids) {
-}
-
-AckTracker::Entry::~Entry() {
-}
-
-AckTracker::AckTracker(base::TickClock* tick_clock, Delegate* delegate)
- : create_backoff_entry_callback_(base::Bind(&CreateDefaultBackoffEntry)),
- tick_clock_(tick_clock),
- delegate_(delegate) {
- DCHECK(tick_clock_);
- DCHECK(delegate_);
-}
-
-AckTracker::~AckTracker() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- Clear();
-}
-
-void AckTracker::Clear() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- timer_.Stop();
- STLDeleteValues(&queue_);
-}
-
-void AckTracker::Track(const ObjectIdSet& ids) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!ids.empty());
-
- scoped_ptr<Entry> entry(new Entry(
- create_backoff_entry_callback_.Run(&kDefaultBackoffPolicy), ids));
- // This is a small hack. When net::BackoffRequest is first created,
- // GetReleaseTime() always returns the default base::TimeTicks value: 0.
- // In order to work around that, we mark it as failed right away.
- entry->backoff->InformOfRequest(false /* succeeded */);
- const base::TimeTicks release_time = entry->backoff->GetReleaseTime();
- queue_.insert(std::make_pair(release_time, entry.release()));
- NudgeTimer();
-}
-
-void AckTracker::Ack(const ObjectIdSet& ids) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // We could be clever and maintain a mapping of object IDs to their position
- // in the multimap, but that makes things a lot more complicated.
- for (std::multimap<base::TimeTicks, Entry*>::iterator it = queue_.begin();
- it != queue_.end(); ) {
- ObjectIdSet remaining_ids;
- std::set_difference(it->second->ids.begin(), it->second->ids.end(),
- ids.begin(), ids.end(),
- std::inserter(remaining_ids, remaining_ids.begin()),
- ids.value_comp());
- it->second->ids.swap(remaining_ids);
- if (it->second->ids.empty()) {
- std::multimap<base::TimeTicks, Entry*>::iterator erase_it = it;
- ++it;
- delete erase_it->second;
- queue_.erase(erase_it);
- } else {
- ++it;
- }
- }
- NudgeTimer();
-}
-
-void AckTracker::NudgeTimer() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- if (queue_.empty()) {
- return;
- }
-
- const base::TimeTicks now = tick_clock_->NowTicks();
- // There are two cases when the timer needs to be started:
- // 1. |desired_run_time_| is in the past. By definition, the timer has already
- // fired at this point. Since the queue is non-empty, we need to set the
- // timer to fire again.
- // 2. The timer is already running but we need it to fire sooner if the first
- // entry's timeout occurs before |desired_run_time_|.
- if (desired_run_time_ <= now || queue_.begin()->first < desired_run_time_) {
- base::TimeDelta delay = queue_.begin()->first - now;
- if (delay < base::TimeDelta()) {
- delay = base::TimeDelta();
- }
- timer_.Start(FROM_HERE, delay, this, &AckTracker::OnTimeout);
- desired_run_time_ = queue_.begin()->first;
- }
-}
-
-void AckTracker::OnTimeout() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- OnTimeoutAt(tick_clock_->NowTicks());
-}
-
-void AckTracker::OnTimeoutAt(base::TimeTicks now) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- if (queue_.empty())
- return;
-
- ObjectIdSet expired_ids;
- std::multimap<base::TimeTicks, Entry*>::iterator end =
- queue_.upper_bound(now);
- std::vector<Entry*> expired_entries;
- for (std::multimap<base::TimeTicks, Entry*>::iterator it = queue_.begin();
- it != end; ++it) {
- expired_ids.insert(it->second->ids.begin(), it->second->ids.end());
- it->second->backoff->InformOfRequest(false /* succeeded */);
- expired_entries.push_back(it->second);
- }
- queue_.erase(queue_.begin(), end);
- for (std::vector<Entry*>::const_iterator it = expired_entries.begin();
- it != expired_entries.end(); ++it) {
- queue_.insert(std::make_pair((*it)->backoff->GetReleaseTime(), *it));
- }
- delegate_->OnTimeout(expired_ids);
- NudgeTimer();
-}
-
-// Testing helpers.
-void AckTracker::SetCreateBackoffEntryCallbackForTest(
- const CreateBackoffEntryCallback& create_backoff_entry_callback) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- create_backoff_entry_callback_ = create_backoff_entry_callback;
-}
-
-bool AckTracker::TriggerTimeoutAtForTest(base::TimeTicks now) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- bool no_timeouts_before_now = (queue_.lower_bound(now) == queue_.begin());
- OnTimeoutAt(now);
- return no_timeouts_before_now;
-}
-
-bool AckTracker::IsQueueEmptyForTest() const {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- return queue_.empty();
-}
-
-const base::Timer& AckTracker::GetTimerForTest() const {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- return timer_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/ack_tracker.h b/chromium/sync/notifier/ack_tracker.h
deleted file mode 100644
index 9f2ca1daf8e..00000000000
--- a/chromium/sync/notifier/ack_tracker.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_ACK_TRACKER_H_
-#define SYNC_NOTIFIER_ACK_TRACKER_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/callback_forward.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/thread_checker.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-#include "net/base/backoff_entry.h"
-#include "sync/base/sync_export.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace base {
-class TickClock;
-} // namespace base
-
-namespace syncer {
-
-// A simple class that tracks sets of object IDs that have not yet been
-// acknowledged. Internally, it manages timeouts for the tracked object IDs and
-// periodically triggers a callback for each timeout period. The timeout is a
-// simple exponentially increasing time that starts at 60 seconds and is capped
-// at 600 seconds.
-class SYNC_EXPORT_PRIVATE AckTracker {
- public:
- class SYNC_EXPORT_PRIVATE Delegate {
- public:
- virtual ~Delegate();
-
- // |ids| contains all object IDs that have timed out in this time interval.
- virtual void OnTimeout(const ObjectIdSet& ids) = 0;
- };
-
- typedef base::Callback<scoped_ptr<net::BackoffEntry>(
- const net::BackoffEntry::Policy* const)> CreateBackoffEntryCallback;
-
- AckTracker(base::TickClock* tick_clock, Delegate* delegate);
- ~AckTracker();
-
- // Equivalent to calling Ack() on all currently registered object IDs.
- void Clear();
-
- // Starts tracking timeouts for |ids|. Timeouts will be triggered for each
- // object ID until it is acknowledged. Note that no de-duplication is
- // performed; calling Track() twice on the same set of ids will result in two
- // different timeouts being triggered for those ids.
- void Track(const ObjectIdSet& ids);
- // Marks a set of |ids| as acknowledged.
- void Ack(const ObjectIdSet& ids);
-
- // Testing methods.
- void SetCreateBackoffEntryCallbackForTest(
- const CreateBackoffEntryCallback& create_backoff_entry_callback);
- // Returns true iff there are no timeouts scheduled to occur before |now|.
- // Used in testing to make sure we don't have timeouts set to expire before
- // when they should.
- bool TriggerTimeoutAtForTest(base::TimeTicks now);
- bool IsQueueEmptyForTest() const;
- const base::Timer& GetTimerForTest() const;
-
- private:
- struct Entry {
- Entry(scoped_ptr<net::BackoffEntry> backoff, const ObjectIdSet& ids);
- ~Entry();
-
- scoped_ptr<net::BackoffEntry> backoff;
- ObjectIdSet ids;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Entry);
- };
-
- void NudgeTimer();
- void OnTimeout();
- void OnTimeoutAt(base::TimeTicks now);
-
- static scoped_ptr<net::BackoffEntry> DefaultCreateBackoffEntryStrategy(
- const net::BackoffEntry::Policy* const policy);
-
- // Used for testing purposes.
- CreateBackoffEntryCallback create_backoff_entry_callback_;
-
- base::TickClock* const tick_clock_;
-
- Delegate* const delegate_;
-
- base::OneShotTimer<AckTracker> timer_;
- // The time that the timer should fire at. We use this to determine if we need
- // to start or update |timer_| in NudgeTimer(). We can't simply use
- // timer_.desired_run_time() for this purpose because it always uses
- // base::TimeTicks::Now() as a reference point when Timer::Start() is called,
- // while NudgeTimer() needs a fixed reference point to avoid unnecessarily
- // updating the timer.
- base::TimeTicks desired_run_time_;
- std::multimap<base::TimeTicks, Entry*> queue_;
-
- base::ThreadChecker thread_checker_;
-
- DISALLOW_COPY_AND_ASSIGN(AckTracker);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_ACK_TRACKER_H_
diff --git a/chromium/sync/notifier/ack_tracker_unittest.cc b/chromium/sync/notifier/ack_tracker_unittest.cc
deleted file mode 100644
index c1a677351e2..00000000000
--- a/chromium/sync/notifier/ack_tracker_unittest.cc
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/ack_tracker.h"
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/time/tick_clock.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "google/cacheinvalidation/types.pb.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class FakeTickClock : public base::TickClock {
- public:
- FakeTickClock() {}
-
- virtual ~FakeTickClock() {}
-
- void LeapForward(int seconds) {
- ASSERT_GT(seconds, 0);
- fake_now_ticks_ += base::TimeDelta::FromSeconds(seconds);
- }
-
- // After the next call to Now(), immediately leap forward by |seconds|.
- void DelayedLeapForward(int seconds) {
- ASSERT_GT(seconds, 0);
- delayed_leap_ = base::TimeDelta::FromSeconds(seconds);
- }
-
- virtual base::TimeTicks NowTicks() OVERRIDE {
- base::TimeTicks fake_now_ticks = fake_now_ticks_;
- if (delayed_leap_ > base::TimeDelta()) {
- fake_now_ticks_ += delayed_leap_;
- delayed_leap_ = base::TimeDelta();
- }
- return fake_now_ticks;
- }
-
- private:
- base::TimeTicks fake_now_ticks_;
- base::TimeDelta delayed_leap_;
-};
-
-class FakeBackoffEntry : public net::BackoffEntry {
- public:
- FakeBackoffEntry(const Policy* const policy, base::TickClock* tick_clock)
- : BackoffEntry(policy),
- tick_clock_(tick_clock) {
- }
-
- protected:
- virtual base::TimeTicks ImplGetTimeNow() const OVERRIDE {
- return tick_clock_->NowTicks();
- }
-
- private:
- base::TickClock* const tick_clock_;
-};
-
-class MockDelegate : public AckTracker::Delegate {
- public:
- MOCK_METHOD1(OnTimeout, void(const ObjectIdSet&));
-};
-
-scoped_ptr<net::BackoffEntry> CreateMockEntry(
- base::TickClock* tick_clock,
- const net::BackoffEntry::Policy* const policy) {
- return scoped_ptr<net::BackoffEntry>(new FakeBackoffEntry(
- policy, tick_clock));
-}
-
-} // namespace
-
-class AckTrackerTest : public testing::Test {
- public:
- AckTrackerTest()
- : ack_tracker_(&fake_tick_clock_, &delegate_),
- kIdOne(ipc::invalidation::ObjectSource::TEST, "one"),
- kIdTwo(ipc::invalidation::ObjectSource::TEST, "two") {
- ack_tracker_.SetCreateBackoffEntryCallbackForTest(
- base::Bind(&CreateMockEntry, &fake_tick_clock_));
- }
-
- protected:
- bool TriggerTimeoutNow() {
- return ack_tracker_.TriggerTimeoutAtForTest(fake_tick_clock_.NowTicks());
- }
-
- base::TimeDelta GetTimerDelay() const {
- const base::Timer& timer = ack_tracker_.GetTimerForTest();
- if (!timer.IsRunning())
- ADD_FAILURE() << "Timer is not running!";
- return timer.GetCurrentDelay();
- }
-
- FakeTickClock fake_tick_clock_;
- ::testing::StrictMock<MockDelegate> delegate_;
- AckTracker ack_tracker_;
-
- const invalidation::ObjectId kIdOne;
- const invalidation::ObjectId kIdTwo;
-
- // AckTracker uses base::Timer internally, which depends on the existence of a
- // MessageLoop.
- base::MessageLoop message_loop_;
-};
-
-// Tests that various combinations of Track()/Ack() behave as
-// expected.
-TEST_F(AckTrackerTest, TrackAndAck) {
- ObjectIdSet ids_one;
- ids_one.insert(kIdOne);
- ObjectIdSet ids_two;
- ids_two.insert(kIdTwo);
- ObjectIdSet ids_all;
- ids_all.insert(kIdOne);
- ids_all.insert(kIdTwo);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids_one);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids_two);
- ack_tracker_.Ack(ids_one);
- ack_tracker_.Ack(ids_two);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-
- ack_tracker_.Track(ids_all);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Ack(ids_one);
- ack_tracker_.Ack(ids_two);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-
- ack_tracker_.Track(ids_one);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids_two);
- ack_tracker_.Ack(ids_all);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-
- ack_tracker_.Track(ids_all);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Ack(ids_all);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-TEST_F(AckTrackerTest, DoubleTrack) {
- ObjectIdSet ids;
- ids.insert(kIdOne);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids);
- ack_tracker_.Ack(ids);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-TEST_F(AckTrackerTest, UntrackedAck) {
- ObjectIdSet ids;
- ids.insert(kIdOne);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Ack(ids);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-TEST_F(AckTrackerTest, Clear) {
- ObjectIdSet ids;
- ids.insert(kIdOne);
- ids.insert(kIdOne);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Clear();
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-// Test that timeout behavior for one object ID. The timeout should increase
-// exponentially until it hits the cap.
-TEST_F(AckTrackerTest, SimpleTimeout) {
- ObjectIdSet ids;
- ids.insert(kIdOne);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
- fake_tick_clock_.LeapForward(60);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(120), GetTimerDelay());
- fake_tick_clock_.LeapForward(120);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(240), GetTimerDelay());
- fake_tick_clock_.LeapForward(240);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(480), GetTimerDelay());
- fake_tick_clock_.LeapForward(480);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(600), GetTimerDelay());
- fake_tick_clock_.LeapForward(600);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(600), GetTimerDelay());
- fake_tick_clock_.LeapForward(600);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Ack(ids);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-
- // The backoff time should be reset after an Ack/Track cycle.
- ack_tracker_.Track(ids);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
- fake_tick_clock_.LeapForward(60);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Ack(ids);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-// Tests that a sequence of Track() calls that results in interleaving
-// timeouts occurs as expected.
-TEST_F(AckTrackerTest, InterleavedTimeout) {
- ObjectIdSet ids_one;
- ids_one.insert(kIdOne);
- ObjectIdSet ids_two;
- ids_two.insert(kIdTwo);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids_one);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- fake_tick_clock_.LeapForward(30);
- ack_tracker_.Track(ids_two);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
- fake_tick_clock_.LeapForward(30);
- EXPECT_CALL(delegate_, OnTimeout(ids_one));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(30), GetTimerDelay());
- fake_tick_clock_.LeapForward(30);
- EXPECT_CALL(delegate_, OnTimeout(ids_two));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(90), GetTimerDelay());
- fake_tick_clock_.LeapForward(90);
- EXPECT_CALL(delegate_, OnTimeout(ids_one));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(30), GetTimerDelay());
- fake_tick_clock_.LeapForward(30);
- EXPECT_CALL(delegate_, OnTimeout(ids_two));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- ack_tracker_.Ack(ids_one);
- ack_tracker_.Ack(ids_two);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-// Tests that registering a new object ID properly shortens the timeout when
-// needed.
-TEST_F(AckTrackerTest, ShortenTimeout) {
- ObjectIdSet ids_one;
- ids_one.insert(kIdOne);
- ObjectIdSet ids_two;
- ids_two.insert(kIdTwo);
-
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids_one);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
- fake_tick_clock_.LeapForward(60);
- EXPECT_CALL(delegate_, OnTimeout(ids_one));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- // Without this next register, the next timeout should occur in 120 seconds
- // from the last timeout event.
- EXPECT_EQ(base::TimeDelta::FromSeconds(120), GetTimerDelay());
- fake_tick_clock_.LeapForward(30);
- ack_tracker_.Track(ids_two);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- // Now that we've registered another entry though, we should receive a timeout
- // in 60 seconds.
- EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
- fake_tick_clock_.LeapForward(60);
- EXPECT_CALL(delegate_, OnTimeout(ids_two));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- // Verify that the original timeout for kIdOne still occurs as expected.
- EXPECT_EQ(base::TimeDelta::FromSeconds(30), GetTimerDelay());
- fake_tick_clock_.LeapForward(30);
- EXPECT_CALL(delegate_, OnTimeout(ids_one));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- ack_tracker_.Ack(ids_one);
- ack_tracker_.Ack(ids_two);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-// Tests that a delay between inserting a new object ID registration and start
-// the timer that is greater than the initial timeout period (60 seconds) does
-// not break things. This could happen on a heavily loaded system, for instance.
-TEST_F(AckTrackerTest, ImmediateTimeout) {
- ObjectIdSet ids;
- ids.insert(kIdOne);
-
- fake_tick_clock_.DelayedLeapForward(90);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
- ack_tracker_.Track(ids);
- EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
-
- EXPECT_EQ(base::TimeDelta::FromSeconds(0), GetTimerDelay());
- EXPECT_CALL(delegate_, OnTimeout(ids));
- message_loop_.RunUntilIdle();
-
- // The next timeout should still be scheduled normally.
- EXPECT_EQ(base::TimeDelta::FromSeconds(120), GetTimerDelay());
- fake_tick_clock_.LeapForward(120);
- EXPECT_CALL(delegate_, OnTimeout(ids));
- EXPECT_TRUE(TriggerTimeoutNow());
-
- ack_tracker_.Ack(ids);
- EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/dropped_invalidation_tracker.cc b/chromium/sync/notifier/dropped_invalidation_tracker.cc
new file mode 100644
index 00000000000..8599cc258c0
--- /dev/null
+++ b/chromium/sync/notifier/dropped_invalidation_tracker.cc
@@ -0,0 +1,42 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/dropped_invalidation_tracker.h"
+
+#include "sync/internal_api/public/base/invalidation.h"
+
+namespace syncer {
+
+DroppedInvalidationTracker::DroppedInvalidationTracker(
+ const invalidation::ObjectId& id)
+ : id_(id),
+ drop_ack_handle_(AckHandle::InvalidAckHandle()) {}
+
+DroppedInvalidationTracker::~DroppedInvalidationTracker() {}
+
+const invalidation::ObjectId& DroppedInvalidationTracker::object_id() const {
+ return id_;
+}
+
+void DroppedInvalidationTracker::RecordDropEvent(
+ WeakHandle<AckHandler> handler, AckHandle handle) {
+ drop_ack_handler_ = handler;
+ drop_ack_handle_ = handle;
+}
+
+void DroppedInvalidationTracker::RecordRecoveryFromDropEvent() {
+ if (drop_ack_handler_.IsInitialized()) {
+ drop_ack_handler_.Call(FROM_HERE,
+ &AckHandler::Acknowledge,
+ id_,
+ drop_ack_handle_);
+ }
+ drop_ack_handler_ = syncer::WeakHandle<AckHandler>();
+}
+
+bool DroppedInvalidationTracker::IsRecoveringFromDropEvent() const {
+ return drop_ack_handler_.IsInitialized();
+}
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/dropped_invalidation_tracker.h b/chromium/sync/notifier/dropped_invalidation_tracker.h
new file mode 100644
index 00000000000..877187ed0bc
--- /dev/null
+++ b/chromium/sync/notifier/dropped_invalidation_tracker.h
@@ -0,0 +1,67 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_DROPPED_INVALIDATION_TRACKER_H_
+#define SYNC_NOTIFIER_DROPPED_INVALIDATION_TRACKER_H_
+
+#include "google/cacheinvalidation/include/types.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/ack_handle.h"
+#include "sync/internal_api/public/util/weak_handle.h"
+#include "sync/notifier/ack_handler.h"
+
+namespace syncer {
+
+class Invalidation;
+
+// Helps InvalidationHandlers keep track of dropped invalidations for a given
+// ObjectId.
+//
+// The intent of this class is to hide some of the implementation details around
+// how the invalidations system manages dropping and drop recovery. Any
+// invalidation handler that intends to buffer and occasionally drop
+// invalidations should keep one instance of it per registered ObjectId.
+//
+// When an invalidation handler wishes to drop an invalidation, it must provide
+// an instance of this class to that Invalidation's Drop() method. In order to
+// indicate recovery from a drop, the handler can call this class'
+// RecordRecoveryFromDropEvent().
+class SYNC_EXPORT DroppedInvalidationTracker {
+ public:
+ explicit DroppedInvalidationTracker(const invalidation::ObjectId& id);
+ ~DroppedInvalidationTracker();
+
+ const invalidation::ObjectId& object_id() const;
+
+ // Called by Invalidation::Drop() to keep track of a drop event.
+ //
+ // Takes ownership of the internals belonging to a soon to be discarded
+ // dropped invalidation. See also the comment for this class'
+ // |drop_ack_handler_| member.
+ void RecordDropEvent(WeakHandle<AckHandler> handler, AckHandle handle);
+
+ // Returns true if we're still recovering from a drop event.
+ bool IsRecoveringFromDropEvent() const;
+
+ // Called by the InvalidationHandler when it recovers from the drop event.
+ void RecordRecoveryFromDropEvent();
+
+ private:
+ invalidation::ObjectId id_;
+ AckHandle drop_ack_handle_;
+
+ // A WeakHandle to the enitity responsible for persisting invalidation
+ // acknowledgement state on disk. We can get away with using a WeakHandle
+ // because we don't care if our drop recovery message doesn't gets delivered
+ // in some shutdown cases. If that happens, we'll have to process the
+ // invalidation state again on the next restart. It would be a waste of time
+ // and resources, but otherwise not particularly harmful.
+ WeakHandle<AckHandler> drop_ack_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(DroppedInvalidationTracker);
+};
+
+} // namespace syncer
+
+#endif // SYNC_NOTIFIER_DROPPED_INVALIDATION_TRACKER_H_
diff --git a/chromium/sync/notifier/fake_invalidation_handler.h b/chromium/sync/notifier/fake_invalidation_handler.h
index 7ab89f4580e..81b877a5bcd 100644
--- a/chromium/sync/notifier/fake_invalidation_handler.h
+++ b/chromium/sync/notifier/fake_invalidation_handler.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "sync/notifier/invalidation_handler.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
diff --git a/chromium/sync/notifier/fake_invalidation_state_tracker.cc b/chromium/sync/notifier/fake_invalidation_state_tracker.cc
index 6e147fe4085..47e2f0f0dc7 100644
--- a/chromium/sync/notifier/fake_invalidation_state_tracker.cc
+++ b/chromium/sync/notifier/fake_invalidation_state_tracker.cc
@@ -18,35 +18,6 @@ FakeInvalidationStateTracker::FakeInvalidationStateTracker() {}
FakeInvalidationStateTracker::~FakeInvalidationStateTracker() {}
-int64 FakeInvalidationStateTracker::GetMaxVersion(
- const invalidation::ObjectId& id) const {
- InvalidationStateMap::const_iterator it = state_map_.find(id);
- return (it == state_map_.end()) ? kMinVersion : it->second.version;
-}
-
-InvalidationStateMap
-FakeInvalidationStateTracker::GetAllInvalidationStates() const {
- return state_map_;
-}
-
-void FakeInvalidationStateTracker::SetMaxVersionAndPayload(
- const invalidation::ObjectId& id,
- int64 max_version,
- const std::string& payload) {
- InvalidationStateMap::const_iterator it = state_map_.find(id);
- if ((it != state_map_.end()) && (max_version <= it->second.version)) {
- ADD_FAILURE();
- return;
- }
- state_map_[id].version = max_version;
-}
-
-void FakeInvalidationStateTracker::Forget(const ObjectIdSet& ids) {
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- state_map_.erase(*it);
- }
-}
-
void FakeInvalidationStateTracker::SetInvalidatorClientId(
const std::string& client_id) {
Clear();
@@ -66,31 +37,19 @@ std::string FakeInvalidationStateTracker::GetBootstrapData() const {
return bootstrap_data_;
}
-void FakeInvalidationStateTracker::Clear() {
- invalidator_client_id_ = "";
- state_map_ = InvalidationStateMap();
- bootstrap_data_ = "";
+void FakeInvalidationStateTracker::SetSavedInvalidations(
+ const UnackedInvalidationsMap& states) {
+ unacked_invalidations_map_ = states;
}
-void FakeInvalidationStateTracker::GenerateAckHandles(
- const ObjectIdSet& ids,
- const scoped_refptr<base::TaskRunner>& task_runner,
- base::Callback<void(const AckHandleMap&)> callback) {
- AckHandleMap ack_handles;
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- state_map_[*it].expected = AckHandle::CreateUnique();
- ack_handles.insert(std::make_pair(*it, state_map_[*it].expected));
- }
- if (!task_runner->PostTask(FROM_HERE, base::Bind(callback, ack_handles)))
- ADD_FAILURE();
+UnackedInvalidationsMap
+FakeInvalidationStateTracker::GetSavedInvalidations() const {
+ return unacked_invalidations_map_;
}
-void FakeInvalidationStateTracker::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- InvalidationStateMap::iterator it = state_map_.find(id);
- if (it == state_map_.end())
- ADD_FAILURE();
- it->second.current = ack_handle;
+void FakeInvalidationStateTracker::Clear() {
+ invalidator_client_id_.clear();
+ bootstrap_data_.clear();
}
} // namespace syncer
diff --git a/chromium/sync/notifier/fake_invalidation_state_tracker.h b/chromium/sync/notifier/fake_invalidation_state_tracker.h
index b43699bbd28..d1daaba121f 100644
--- a/chromium/sync/notifier/fake_invalidation_state_tracker.h
+++ b/chromium/sync/notifier/fake_invalidation_state_tracker.h
@@ -19,32 +19,22 @@ class FakeInvalidationStateTracker
FakeInvalidationStateTracker();
virtual ~FakeInvalidationStateTracker();
- int64 GetMaxVersion(const invalidation::ObjectId& id) const;
-
// InvalidationStateTracker implementation.
- virtual InvalidationStateMap GetAllInvalidationStates() const OVERRIDE;
- virtual void SetMaxVersionAndPayload(const invalidation::ObjectId& id,
- int64 max_version,
- const std::string& payload) OVERRIDE;
- virtual void Forget(const ObjectIdSet& ids) OVERRIDE;
virtual void SetInvalidatorClientId(const std::string& client_id) OVERRIDE;
virtual std::string GetInvalidatorClientId() const OVERRIDE;
virtual void SetBootstrapData(const std::string& data) OVERRIDE;
virtual std::string GetBootstrapData() const OVERRIDE;
+ virtual void SetSavedInvalidations(
+ const UnackedInvalidationsMap& states) OVERRIDE;
+ virtual UnackedInvalidationsMap GetSavedInvalidations() const OVERRIDE;
virtual void Clear() OVERRIDE;
- virtual void GenerateAckHandles(
- const ObjectIdSet& ids,
- const scoped_refptr<base::TaskRunner>& task_runner,
- base::Callback<void(const AckHandleMap&)> callback) OVERRIDE;
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE;
static const int64 kMinVersion;
private:
- InvalidationStateMap state_map_;
std::string invalidator_client_id_;
std::string bootstrap_data_;
+ UnackedInvalidationsMap unacked_invalidations_map_;
};
} // namespace syncer
diff --git a/chromium/sync/notifier/fake_invalidator.cc b/chromium/sync/notifier/fake_invalidator.cc
index 0b217f79e1e..3e1ce32250b 100644
--- a/chromium/sync/notifier/fake_invalidator.cc
+++ b/chromium/sync/notifier/fake_invalidator.cc
@@ -4,6 +4,8 @@
#include "sync/notifier/fake_invalidator.h"
+#include "sync/notifier/object_id_invalidation_map.h"
+
namespace syncer {
FakeInvalidator::FakeInvalidator() {}
@@ -49,11 +51,6 @@ void FakeInvalidator::UnregisterHandler(InvalidationHandler* handler) {
registrar_.UnregisterHandler(handler);
}
-void FakeInvalidator::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- // Do nothing.
-}
-
InvalidatorState FakeInvalidator::GetInvalidatorState() const {
return registrar_.GetInvalidatorState();
}
diff --git a/chromium/sync/notifier/fake_invalidator.h b/chromium/sync/notifier/fake_invalidator.h
index 7913694254e..b2173eef4ac 100644
--- a/chromium/sync/notifier/fake_invalidator.h
+++ b/chromium/sync/notifier/fake_invalidator.h
@@ -24,7 +24,6 @@ class FakeInvalidator : public Invalidator {
const std::string& GetUniqueId() const;
const std::string& GetCredentialsEmail() const;
const std::string& GetCredentialsToken() const;
- const ObjectIdInvalidationMap& GetLastSentInvalidationMap() const;
void EmitOnInvalidatorStateChange(InvalidatorState state);
void EmitOnIncomingInvalidation(
@@ -34,8 +33,6 @@ class FakeInvalidator : public Invalidator {
virtual void UpdateRegisteredIds(InvalidationHandler* handler,
const ObjectIdSet& ids) OVERRIDE;
virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
diff --git a/chromium/sync/notifier/invalidation_handler.h b/chromium/sync/notifier/invalidation_handler.h
index be85116fc57..2f5149f262f 100644
--- a/chromium/sync/notifier/invalidation_handler.h
+++ b/chromium/sync/notifier/invalidation_handler.h
@@ -7,10 +7,11 @@
#include "sync/base/sync_export.h"
#include "sync/notifier/invalidator_state.h"
-#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
+class ObjectIdInvalidationMap;
+
class SYNC_EXPORT InvalidationHandler {
public:
// Called when the invalidator state changes.
diff --git a/chromium/sync/notifier/invalidation_notifier.cc b/chromium/sync/notifier/invalidation_notifier.cc
index e3c79a44117..a509409c76d 100644
--- a/chromium/sync/notifier/invalidation_notifier.cc
+++ b/chromium/sync/notifier/invalidation_notifier.cc
@@ -20,17 +20,17 @@ namespace syncer {
InvalidationNotifier::InvalidationNotifier(
scoped_ptr<notifier::PushClient> push_client,
const std::string& invalidator_client_id,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
const std::string& client_info)
: state_(STOPPED),
- initial_invalidation_state_map_(initial_invalidation_state_map),
+ saved_invalidations_(saved_invalidations),
invalidation_state_tracker_(invalidation_state_tracker),
client_info_(client_info),
invalidator_client_id_(invalidator_client_id),
invalidation_bootstrap_data_(invalidation_bootstrap_data),
- invalidation_listener_(&tick_clock_, push_client.Pass()) {
+ invalidation_listener_(push_client.Pass()) {
}
InvalidationNotifier::~InvalidationNotifier() {
@@ -54,12 +54,6 @@ void InvalidationNotifier::UnregisterHandler(InvalidationHandler* handler) {
registrar_.UnregisterHandler(handler);
}
-void InvalidationNotifier::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- DCHECK(CalledOnValidThread());
- invalidation_listener_.Acknowledge(id, ack_handle);
-}
-
InvalidatorState InvalidationNotifier::GetInvalidatorState() const {
DCHECK(CalledOnValidThread());
return registrar_.GetInvalidatorState();
@@ -71,7 +65,7 @@ void InvalidationNotifier::UpdateCredentials(
invalidation_listener_.Start(
base::Bind(&invalidation::CreateInvalidationClient),
invalidator_client_id_, client_info_, invalidation_bootstrap_data_,
- initial_invalidation_state_map_,
+ saved_invalidations_,
invalidation_state_tracker_,
this);
state_ = STARTED;
diff --git a/chromium/sync/notifier/invalidation_notifier.h b/chromium/sync/notifier/invalidation_notifier.h
index b7a98f88810..a11608c1611 100644
--- a/chromium/sync/notifier/invalidation_notifier.h
+++ b/chromium/sync/notifier/invalidation_notifier.h
@@ -18,7 +18,6 @@
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/util/weak_handle.h"
@@ -34,7 +33,6 @@ class PushClient;
namespace syncer {
// This class must live on the IO thread.
-// TODO(dcheng): Think of a name better than InvalidationInvalidator.
class SYNC_EXPORT_PRIVATE InvalidationNotifier
: public Invalidator,
public SyncInvalidationListener::Delegate,
@@ -44,7 +42,7 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
InvalidationNotifier(
scoped_ptr<notifier::PushClient> push_client,
const std::string& invalidator_client_id,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>&
invalidation_state_tracker,
@@ -57,8 +55,6 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
virtual void UpdateRegisteredIds(InvalidationHandler* handler,
const ObjectIdSet& ids) OVERRIDE;
virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
@@ -83,7 +79,7 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
InvalidatorRegistrar registrar_;
// Passed to |invalidation_listener_|.
- const InvalidationStateMap initial_invalidation_state_map_;
+ const UnackedInvalidationsMap saved_invalidations_;
// Passed to |invalidation_listener_|.
const WeakHandle<InvalidationStateTracker>
@@ -98,10 +94,6 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
// The initial bootstrap data to pass to |invalidation_listener_|.
const std::string invalidation_bootstrap_data_;
- // TODO(akalin): Clean up this reference to DefaultTickClock. Ideally, we
- // should simply be using TaskRunner's tick clock. See http://crbug.com/179211
- base::DefaultTickClock tick_clock_;
-
// The invalidation listener.
SyncInvalidationListener invalidation_listener_;
diff --git a/chromium/sync/notifier/invalidation_notifier_unittest.cc b/chromium/sync/notifier/invalidation_notifier_unittest.cc
index 623bfa991a0..bc92e23f9fd 100644
--- a/chromium/sync/notifier/invalidation_notifier_unittest.cc
+++ b/chromium/sync/notifier/invalidation_notifier_unittest.cc
@@ -16,7 +16,6 @@
#include "sync/notifier/fake_invalidation_state_tracker.h"
#include "sync/notifier/invalidation_state_tracker.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -41,7 +40,7 @@ class InvalidationNotifierTestDelegate {
new InvalidationNotifier(
scoped_ptr<notifier::PushClient>(new notifier::FakePushClient()),
invalidator_client_id,
- InvalidationStateMap(),
+ UnackedInvalidationsMap(),
initial_state,
MakeWeakHandle(invalidation_state_tracker),
"fake_client_info"));
diff --git a/chromium/sync/notifier/invalidation_state_tracker.cc b/chromium/sync/notifier/invalidation_state_tracker.cc
deleted file mode 100644
index 335f3b2a61d..00000000000
--- a/chromium/sync/notifier/invalidation_state_tracker.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidation_state_tracker.h"
-
-namespace syncer {
-
-InvalidationState::InvalidationState()
- : version(kint64min),
- expected(AckHandle::InvalidAckHandle()),
- current(AckHandle::InvalidAckHandle()) {
-}
-
-InvalidationState::~InvalidationState() {
-}
-
-bool operator==(const InvalidationState& lhs, const InvalidationState& rhs) {
- return lhs.version == rhs.version &&
- lhs.expected.Equals(rhs.expected) &&
- lhs.current.Equals(rhs.current);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidation_state_tracker.h b/chromium/sync/notifier/invalidation_state_tracker.h
index e3e5bd21a94..81a07eae5c5 100644
--- a/chromium/sync/notifier/invalidation_state_tracker.h
+++ b/chromium/sync/notifier/invalidation_state_tracker.h
@@ -22,6 +22,7 @@
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/invalidation.h"
#include "sync/notifier/invalidation_util.h"
+#include "sync/notifier/unacked_invalidation_set.h"
namespace base {
class TaskRunner;
@@ -29,39 +30,10 @@ class TaskRunner;
namespace syncer {
-struct SYNC_EXPORT InvalidationState {
- InvalidationState();
- ~InvalidationState();
-
- int64 version;
- std::string payload;
- AckHandle expected;
- AckHandle current;
-};
-
-// TODO(dcheng): Remove this in favor of adding an Equals() method.
-SYNC_EXPORT_PRIVATE bool operator==(const InvalidationState& lhs,
- const InvalidationState& rhs);
-
-typedef std::map<invalidation::ObjectId, InvalidationState, ObjectIdLessThan>
- InvalidationStateMap;
-typedef std::map<invalidation::ObjectId, AckHandle, ObjectIdLessThan>
- AckHandleMap;
-
class InvalidationStateTracker {
public:
InvalidationStateTracker() {}
- virtual InvalidationStateMap GetAllInvalidationStates() const = 0;
-
- // |max_version| should be strictly greater than any existing max
- // version for |model_type|.
- virtual void SetMaxVersionAndPayload(const invalidation::ObjectId& id,
- int64 max_version,
- const std::string& payload) = 0;
- // Removes all state tracked for |ids|.
- virtual void Forget(const ObjectIdSet& ids) = 0;
-
// The per-client unique ID used to register the invalidation client with the
// server. This is used to squelch invalidation notifications that originate
// from changes made by this client.
@@ -75,24 +47,15 @@ class InvalidationStateTracker {
virtual void SetBootstrapData(const std::string& data) = 0;
virtual std::string GetBootstrapData() const = 0;
+ // Used to store invalidations that have been acked to the server, but not yet
+ // handled by our clients. We store these invalidations on disk so we won't
+ // lose them if we need to restart.
+ virtual void SetSavedInvalidations(const UnackedInvalidationsMap& states) = 0;
+ virtual UnackedInvalidationsMap GetSavedInvalidations() const = 0;
+
// Erases invalidation versions, client ID, and state stored on disk.
virtual void Clear() = 0;
- // Used for generating our own local ack handles. Generates a new ack handle
- // for each object id in |ids|. The result is returned via |callback| posted
- // to |task_runner|.
- virtual void GenerateAckHandles(
- const ObjectIdSet& ids,
- const scoped_refptr<base::TaskRunner>& task_runner,
- base::Callback<void(const AckHandleMap&)> callback) = 0;
-
- // Records an acknowledgement for |id|. Note that no attempt at ordering is
- // made. Acknowledge() only records the last ack_handle it received, even if
- // the last ack_handle it received was generated before the value currently
- // recorded.
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) = 0;
-
protected:
virtual ~InvalidationStateTracker() {}
};
diff --git a/chromium/sync/notifier/invalidation_util.cc b/chromium/sync/notifier/invalidation_util.cc
index 7cc80d299ba..27acd38c8d4 100644
--- a/chromium/sync/notifier/invalidation_util.cc
+++ b/chromium/sync/notifier/invalidation_util.cc
@@ -12,6 +12,7 @@
#include "base/values.h"
#include "google/cacheinvalidation/include/types.h"
#include "google/cacheinvalidation/types.pb.h"
+#include "sync/internal_api/public/base/invalidation.h"
namespace invalidation {
void PrintTo(const invalidation::ObjectId& id, std::ostream* os) {
@@ -27,6 +28,25 @@ bool ObjectIdLessThan::operator()(const invalidation::ObjectId& lhs,
(lhs.source() == rhs.source() && lhs.name() < rhs.name());
}
+bool InvalidationVersionLessThan::operator()(
+ const Invalidation& a,
+ const Invalidation& b) const {
+ DCHECK(a.object_id() == b.object_id())
+ << "a: " << ObjectIdToString(a.object_id()) << ", "
+ << "b: " << ObjectIdToString(a.object_id());
+
+ if (a.is_unknown_version() && !b.is_unknown_version())
+ return true;
+
+ if (!a.is_unknown_version() && b.is_unknown_version())
+ return false;
+
+ if (a.is_unknown_version() && b.is_unknown_version())
+ return false;
+
+ return a.version() < b.version();
+}
+
bool RealModelTypeToObjectId(ModelType model_type,
invalidation::ObjectId* object_id) {
std::string notification_type;
diff --git a/chromium/sync/notifier/invalidation_util.h b/chromium/sync/notifier/invalidation_util.h
index 670f61209ea..699550e7a69 100644
--- a/chromium/sync/notifier/invalidation_util.h
+++ b/chromium/sync/notifier/invalidation_util.h
@@ -32,11 +32,18 @@ SYNC_EXPORT_PRIVATE void PrintTo(const invalidation::ObjectId& id,
namespace syncer {
+class Invalidation;
+
struct SYNC_EXPORT ObjectIdLessThan {
bool operator()(const invalidation::ObjectId& lhs,
const invalidation::ObjectId& rhs) const;
};
+struct InvalidationVersionLessThan {
+ bool operator()(const syncer::Invalidation& a,
+ const syncer::Invalidation& b) const;
+};
+
typedef std::set<invalidation::ObjectId, ObjectIdLessThan> ObjectIdSet;
SYNC_EXPORT bool RealModelTypeToObjectId(ModelType model_type,
diff --git a/chromium/sync/notifier/invalidator.h b/chromium/sync/notifier/invalidator.h
index ccb69222083..b6a7467ac9f 100644
--- a/chromium/sync/notifier/invalidator.h
+++ b/chromium/sync/notifier/invalidator.h
@@ -15,7 +15,6 @@
#include "sync/internal_api/public/base/model_type.h"
#include "sync/notifier/invalidation_util.h"
#include "sync/notifier/invalidator_state.h"
-#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
class InvalidationHandler;
@@ -69,10 +68,6 @@ class SYNC_EXPORT Invalidator {
// associated with |handler|.
virtual void UnregisterHandler(InvalidationHandler* handler) = 0;
- // Acknowledge that an invalidation for |id| was handled.
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) = 0;
-
// Returns the current invalidator state. When called from within
// InvalidationHandler::OnInvalidatorStateChange(), this must return
// the updated state.
diff --git a/chromium/sync/notifier/invalidator_registrar.cc b/chromium/sync/notifier/invalidator_registrar.cc
index c2a18f9e8fd..1c9c50cbbf2 100644
--- a/chromium/sync/notifier/invalidator_registrar.cc
+++ b/chromium/sync/notifier/invalidator_registrar.cc
@@ -5,9 +5,11 @@
#include "sync/notifier/invalidator_registrar.h"
#include <cstddef>
+#include <iterator>
#include <utility>
#include "base/logging.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
@@ -17,7 +19,7 @@ InvalidatorRegistrar::InvalidatorRegistrar()
InvalidatorRegistrar::~InvalidatorRegistrar() {
DCHECK(thread_checker_.CalledOnValidThread());
CHECK(!handlers_.might_have_observers());
- // |id_to_handler_map_| may be non-empty but that's okay.
+ CHECK(handler_to_ids_map_.empty());
}
void InvalidatorRegistrar::RegisterHandler(InvalidationHandler* handler) {
@@ -33,29 +35,30 @@ void InvalidatorRegistrar::UpdateRegisteredIds(
DCHECK(thread_checker_.CalledOnValidThread());
CHECK(handler);
CHECK(handlers_.HasObserver(handler));
- // Remove all existing entries for |handler|.
- for (IdHandlerMap::iterator it = id_to_handler_map_.begin();
- it != id_to_handler_map_.end(); ) {
- if (it->second == handler) {
- IdHandlerMap::iterator erase_it = it;
- ++it;
- id_to_handler_map_.erase(erase_it);
- } else {
- ++it;
+
+ for (HandlerIdsMap::const_iterator it = handler_to_ids_map_.begin();
+ it != handler_to_ids_map_.end(); ++it) {
+ if (it->first == handler) {
+ continue;
}
- }
- // Now add the entries for |handler|. We keep track of the last insertion
- // point so we only traverse the map once to insert all the new entries.
- IdHandlerMap::iterator insert_it = id_to_handler_map_.begin();
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- insert_it =
- id_to_handler_map_.insert(insert_it, std::make_pair(*it, handler));
- CHECK_EQ(handler, insert_it->second)
+ std::vector<invalidation::ObjectId> intersection;
+ std::set_intersection(
+ it->second.begin(), it->second.end(),
+ ids.begin(), ids.end(),
+ std::inserter(intersection, intersection.end()),
+ ObjectIdLessThan());
+ CHECK(intersection.empty())
<< "Duplicate registration: trying to register "
- << ObjectIdToString(insert_it->first) << " for "
+ << ObjectIdToString(*intersection.begin()) << " for "
<< handler << " when it's already registered for "
- << insert_it->second;
+ << it->first;
+ }
+
+ if (ids.empty()) {
+ handler_to_ids_map_.erase(handler);
+ } else {
+ handler_to_ids_map_[handler] = ids;
}
}
@@ -64,27 +67,26 @@ void InvalidatorRegistrar::UnregisterHandler(InvalidationHandler* handler) {
CHECK(handler);
CHECK(handlers_.HasObserver(handler));
handlers_.RemoveObserver(handler);
+ handler_to_ids_map_.erase(handler);
}
ObjectIdSet InvalidatorRegistrar::GetRegisteredIds(
InvalidationHandler* handler) const {
DCHECK(thread_checker_.CalledOnValidThread());
- ObjectIdSet registered_ids;
- for (IdHandlerMap::const_iterator it = id_to_handler_map_.begin();
- it != id_to_handler_map_.end(); ++it) {
- if (it->second == handler) {
- registered_ids.insert(it->first);
- }
+ HandlerIdsMap::const_iterator lookup = handler_to_ids_map_.find(handler);
+ if (lookup != handler_to_ids_map_.end()) {
+ return lookup->second;
+ } else {
+ return ObjectIdSet();
}
- return registered_ids;
}
ObjectIdSet InvalidatorRegistrar::GetAllRegisteredIds() const {
DCHECK(thread_checker_.CalledOnValidThread());
ObjectIdSet registered_ids;
- for (IdHandlerMap::const_iterator it = id_to_handler_map_.begin();
- it != id_to_handler_map_.end(); ++it) {
- registered_ids.insert(it->first);
+ for (HandlerIdsMap::const_iterator it = handler_to_ids_map_.begin();
+ it != handler_to_ids_map_.end(); ++it) {
+ registered_ids.insert(it->second.begin(), it->second.end());
}
return registered_ids;
}
@@ -97,23 +99,13 @@ void InvalidatorRegistrar::DispatchInvalidationsToHandlers(
return;
}
- typedef std::map<InvalidationHandler*, ObjectIdInvalidationMap> DispatchMap;
- DispatchMap dispatch_map;
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- InvalidationHandler* const handler = ObjectIdToHandler(it->first);
- // Filter out invalidations for IDs with no handler.
- if (handler)
- dispatch_map[handler].insert(*it);
- }
-
- // Emit invalidations only for handlers in |handlers_|.
- ObserverListBase<InvalidationHandler>::Iterator it(handlers_);
- InvalidationHandler* handler = NULL;
- while ((handler = it.GetNext()) != NULL) {
- DispatchMap::const_iterator dispatch_it = dispatch_map.find(handler);
- if (dispatch_it != dispatch_map.end())
- handler->OnIncomingInvalidation(dispatch_it->second);
+ for (HandlerIdsMap::iterator it = handler_to_ids_map_.begin();
+ it != handler_to_ids_map_.end(); ++it) {
+ ObjectIdInvalidationMap to_emit =
+ invalidation_map.GetSubsetWithObjectIds(it->second);
+ if (!to_emit.Empty()) {
+ it->first->OnIncomingInvalidation(to_emit);
+ }
}
}
@@ -142,11 +134,4 @@ void InvalidatorRegistrar::DetachFromThreadForTest() {
thread_checker_.DetachFromThread();
}
-InvalidationHandler* InvalidatorRegistrar::ObjectIdToHandler(
- const invalidation::ObjectId& id) {
- DCHECK(thread_checker_.CalledOnValidThread());
- IdHandlerMap::const_iterator it = id_to_handler_map_.find(id);
- return (it == id_to_handler_map_.end()) ? NULL : it->second;
-}
-
} // namespace syncer
diff --git a/chromium/sync/notifier/invalidator_registrar.h b/chromium/sync/notifier/invalidator_registrar.h
index f2a3c638bda..fb6b3881c3c 100644
--- a/chromium/sync/notifier/invalidator_registrar.h
+++ b/chromium/sync/notifier/invalidator_registrar.h
@@ -13,7 +13,6 @@
#include "sync/base/sync_export.h"
#include "sync/notifier/invalidation_handler.h"
#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
namespace invalidation {
class ObjectId;
@@ -21,6 +20,8 @@ class ObjectId;
namespace syncer {
+class ObjectIdInvalidationMap;
+
// A helper class for implementations of the Invalidator interface. It helps
// keep track of registered handlers and which object ID registrations are
// associated with which handlers, so implementors can just reuse the logic
@@ -76,15 +77,11 @@ class SYNC_EXPORT InvalidatorRegistrar {
void DetachFromThreadForTest();
private:
- typedef std::map<invalidation::ObjectId, InvalidationHandler*,
- ObjectIdLessThan>
- IdHandlerMap;
-
- InvalidationHandler* ObjectIdToHandler(const invalidation::ObjectId& id);
+ typedef std::map<InvalidationHandler*, ObjectIdSet> HandlerIdsMap;
base::ThreadChecker thread_checker_;
ObserverList<InvalidationHandler> handlers_;
- IdHandlerMap id_to_handler_map_;
+ HandlerIdsMap handler_to_ids_map_;
InvalidatorState state_;
DISALLOW_COPY_AND_ASSIGN(InvalidatorRegistrar);
diff --git a/chromium/sync/notifier/invalidator_registrar_unittest.cc b/chromium/sync/notifier/invalidator_registrar_unittest.cc
index ad222477dfb..c527bc5e023 100644
--- a/chromium/sync/notifier/invalidator_registrar_unittest.cc
+++ b/chromium/sync/notifier/invalidator_registrar_unittest.cc
@@ -9,7 +9,6 @@
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/invalidator_registrar.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -43,11 +42,6 @@ class RegistrarInvalidator : public Invalidator {
registrar_.UnregisterHandler(handler);
}
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE {
- // Do nothing.
- }
-
virtual InvalidatorState GetInvalidatorState() const OVERRIDE {
return registrar_.GetInvalidatorState();
}
diff --git a/chromium/sync/notifier/invalidator_test_template.h b/chromium/sync/notifier/invalidator_test_template.h
index 0353000422b..67cd0536eb7 100644
--- a/chromium/sync/notifier/invalidator_test_template.h
+++ b/chromium/sync/notifier/invalidator_test_template.h
@@ -81,11 +81,10 @@
#include "base/compiler_specific.h"
#include "google/cacheinvalidation/include/types.h"
#include "google/cacheinvalidation/types.pb.h"
+#include "sync/internal_api/public/base/object_id_invalidation_map_test_util.h"
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/fake_invalidation_state_tracker.h"
#include "sync/notifier/invalidator.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -135,13 +134,13 @@ TYPED_TEST_P(InvalidatorTest, Basic) {
invalidator->RegisterHandler(&handler);
- ObjectIdInvalidationMap states;
- states[this->id1].payload = "1";
- states[this->id2].payload = "2";
- states[this->id3].payload = "3";
+ ObjectIdInvalidationMap invalidation_map;
+ invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
+ invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
+ invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
// Should be ignored since no IDs are registered to |handler|.
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(0, handler.GetInvalidationCount());
ObjectIdSet ids;
@@ -152,25 +151,26 @@ TYPED_TEST_P(InvalidatorTest, Basic) {
this->delegate_.TriggerOnInvalidatorStateChange(INVALIDATIONS_ENABLED);
EXPECT_EQ(INVALIDATIONS_ENABLED, handler.GetInvalidatorState());
- ObjectIdInvalidationMap expected_states;
- expected_states[this->id1].payload = "1";
- expected_states[this->id2].payload = "2";
+ ObjectIdInvalidationMap expected_invalidations;
+ expected_invalidations.Insert(Invalidation::Init(this->id1, 1, "1"));
+ expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(1, handler.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler.GetLastInvalidationMap()));
ids.erase(this->id1);
ids.insert(this->id3);
invalidator->UpdateRegisteredIds(&handler, ids);
- expected_states.erase(this->id1);
- expected_states[this->id3].payload = "3";
+ expected_invalidations = ObjectIdInvalidationMap();
+ expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
+ expected_invalidations.Insert(Invalidation::Init(this->id3, 3, "3"));
// Removed object IDs should not be notified, newly-added ones should.
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(2, handler.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler.GetLastInvalidationMap()));
this->delegate_.TriggerOnInvalidatorStateChange(TRANSIENT_INVALIDATION_ERROR);
EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR,
@@ -184,7 +184,7 @@ TYPED_TEST_P(InvalidatorTest, Basic) {
invalidator->UnregisterHandler(&handler);
// Should be ignored since |handler| isn't registered anymore.
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(2, handler.GetInvalidationCount());
}
@@ -236,25 +236,26 @@ TYPED_TEST_P(InvalidatorTest, MultipleHandlers) {
EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler4.GetInvalidatorState());
{
- ObjectIdInvalidationMap states;
- states[this->id1].payload = "1";
- states[this->id2].payload = "2";
- states[this->id3].payload = "3";
- states[this->id4].payload = "4";
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ ObjectIdInvalidationMap invalidation_map;
+ invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
+ invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
+ invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
+ invalidation_map.Insert(Invalidation::Init(this->id4, 4, "4"));
- ObjectIdInvalidationMap expected_states;
- expected_states[this->id1].payload = "1";
- expected_states[this->id2].payload = "2";
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
+
+ ObjectIdInvalidationMap expected_invalidations;
+ expected_invalidations.Insert(Invalidation::Init(this->id1, 1, "1"));
+ expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
EXPECT_EQ(1, handler1.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler1.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler1.GetLastInvalidationMap()));
- expected_states.clear();
- expected_states[this->id3].payload = "3";
+ expected_invalidations = ObjectIdInvalidationMap();
+ expected_invalidations.Insert(Invalidation::Init(this->id3, 3, "3"));
EXPECT_EQ(1, handler2.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler2.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler2.GetLastInvalidationMap()));
EXPECT_EQ(0, handler3.GetInvalidationCount());
EXPECT_EQ(0, handler4.GetInvalidationCount());
@@ -306,11 +307,11 @@ TYPED_TEST_P(InvalidatorTest, EmptySetUnregisters) {
EXPECT_EQ(INVALIDATIONS_ENABLED, handler2.GetInvalidatorState());
{
- ObjectIdInvalidationMap states;
- states[this->id1].payload = "1";
- states[this->id2].payload = "2";
- states[this->id3].payload = "3";
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ ObjectIdInvalidationMap invalidation_map;
+ invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
+ invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
+ invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(0, handler1.GetInvalidationCount());
EXPECT_EQ(1, handler2.GetInvalidationCount());
}
diff --git a/chromium/sync/notifier/mock_ack_handler.cc b/chromium/sync/notifier/mock_ack_handler.cc
new file mode 100644
index 00000000000..6a4c834e27f
--- /dev/null
+++ b/chromium/sync/notifier/mock_ack_handler.cc
@@ -0,0 +1,85 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/mock_ack_handler.h"
+
+#include "sync/internal_api/public/base/ack_handle.h"
+#include "sync/internal_api/public/base/invalidation.h"
+
+namespace syncer {
+
+namespace {
+
+struct AckHandleMatcher {
+ AckHandleMatcher(const AckHandle& handle);
+ bool operator()(const syncer::Invalidation& invalidation) const;
+
+ syncer::AckHandle handle_;
+};
+
+AckHandleMatcher::AckHandleMatcher(const AckHandle& handle)
+ : handle_(handle) {}
+
+bool AckHandleMatcher::operator()(
+ const syncer::Invalidation& invalidation) const {
+ return handle_.Equals(invalidation.ack_handle());
+}
+
+} // namespace
+
+MockAckHandler::MockAckHandler() {}
+
+MockAckHandler::~MockAckHandler() {}
+
+void MockAckHandler::RegisterInvalidation(Invalidation* invalidation) {
+ unacked_invalidations_.push_back(*invalidation);
+ invalidation->set_ack_handler(WeakHandleThis());
+}
+
+void MockAckHandler::RegisterUnsentInvalidation(Invalidation* invalidation) {
+ unsent_invalidations_.push_back(*invalidation);
+}
+
+bool MockAckHandler::IsUnacked(const Invalidation& invalidation) const {
+ AckHandleMatcher matcher(invalidation.ack_handle());
+ InvalidationVector::const_iterator it = std::find_if(
+ unacked_invalidations_.begin(),
+ unacked_invalidations_.end(),
+ matcher);
+ return it != unacked_invalidations_.end();
+}
+
+bool MockAckHandler::IsUnsent(const Invalidation& invalidation) const {
+ AckHandleMatcher matcher(invalidation.ack_handle());
+ InvalidationVector::const_iterator it1 = std::find_if(
+ unsent_invalidations_.begin(),
+ unsent_invalidations_.end(),
+ matcher);
+ return it1 != unsent_invalidations_.end();
+}
+
+void MockAckHandler::Acknowledge(
+ const invalidation::ObjectId& id,
+ const AckHandle& handle) {
+ AckHandleMatcher matcher(handle);
+ InvalidationVector::iterator it = std::find_if(
+ unacked_invalidations_.begin(),
+ unacked_invalidations_.end(),
+ matcher);
+ if (it != unacked_invalidations_.end()) {
+ acked_invalidations_.push_back(*it);
+ unacked_invalidations_.erase(it);
+ }
+}
+
+void MockAckHandler::Drop(
+ const invalidation::ObjectId& id,
+ const AckHandle& handle) {
+}
+
+WeakHandle<AckHandler> MockAckHandler::WeakHandleThis() {
+ return WeakHandle<AckHandler>(AsWeakPtr());
+}
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/mock_ack_handler.h b/chromium/sync/notifier/mock_ack_handler.h
new file mode 100644
index 00000000000..bf6ecc939a1
--- /dev/null
+++ b/chromium/sync/notifier/mock_ack_handler.h
@@ -0,0 +1,64 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_MOCK_ACK_HANDLER_H_
+#define SYNC_NOTIFIER_MOCK_ACK_HANDLER_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/util/weak_handle.h"
+#include "sync/notifier/ack_handler.h"
+
+namespace syncer {
+
+class Invalidation;
+
+// This AckHandler implementation colaborates with the FakeInvalidationService
+// to enable unit tests to assert that invalidations are being acked properly.
+class SYNC_EXPORT MockAckHandler
+ : public AckHandler,
+ public base::SupportsWeakPtr<MockAckHandler> {
+ public:
+ MockAckHandler();
+ virtual ~MockAckHandler();
+
+ // Sets up some internal state to track this invalidation, and modifies it so
+ // that its Acknowledge() and Drop() methods will route back to us.
+ void RegisterInvalidation(Invalidation* invalidation);
+
+ // No one was listening for this invalidation, so no one will receive it or
+ // ack it. We keep track of it anyway to let tests make assertions about it.
+ void RegisterUnsentInvalidation(Invalidation* invalidation);
+
+ // Returns true if the specified invalidaition has been delivered, but has not
+ // been acknowledged yet.
+ bool IsUnacked(const Invalidation& invalidation) const;
+
+ // Returns true if the specified invalidation was never delivered.
+ bool IsUnsent(const Invalidation& invalidation) const;
+
+ // Implementation of AckHandler.
+ virtual void Acknowledge(
+ const invalidation::ObjectId& id,
+ const AckHandle& handle) OVERRIDE;
+ virtual void Drop(
+ const invalidation::ObjectId& id,
+ const AckHandle& handle) OVERRIDE;
+
+ private:
+ typedef std::vector<syncer::Invalidation> InvalidationVector;
+
+ WeakHandle<AckHandler> WeakHandleThis();
+
+ InvalidationVector unsent_invalidations_;
+ InvalidationVector unacked_invalidations_;
+ InvalidationVector acked_invalidations_;
+};
+
+} // namespace syncer
+
+#endif // SYNC_NOTIFIER_MOCK_ACK_HANDLER_H_
diff --git a/chromium/sync/notifier/non_blocking_invalidator.cc b/chromium/sync/notifier/non_blocking_invalidator.cc
index d4c602b4e45..bd0596722b0 100644
--- a/chromium/sync/notifier/non_blocking_invalidator.cc
+++ b/chromium/sync/notifier/non_blocking_invalidator.cc
@@ -14,6 +14,7 @@
#include "base/threading/thread.h"
#include "jingle/notifier/listener/push_client.h"
#include "sync/notifier/invalidation_notifier.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
@@ -31,14 +32,12 @@ class NonBlockingInvalidator::Core
void Initialize(
const notifier::NotifierOptions& notifier_options,
const std::string& invalidator_client_id,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
const std::string& client_info);
void Teardown();
void UpdateRegisteredIds(const ObjectIdSet& ids);
- void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle);
void UpdateCredentials(const std::string& email, const std::string& token);
// InvalidationHandler implementation (all called on I/O thread by
@@ -73,7 +72,7 @@ NonBlockingInvalidator::Core::~Core() {
void NonBlockingInvalidator::Core::Initialize(
const notifier::NotifierOptions& notifier_options,
const std::string& invalidator_client_id,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
const std::string& client_info) {
@@ -87,7 +86,7 @@ void NonBlockingInvalidator::Core::Initialize(
new InvalidationNotifier(
notifier::PushClient::CreateDefaultOnIOThread(notifier_options),
invalidator_client_id,
- initial_invalidation_state_map,
+ saved_invalidations,
invalidation_bootstrap_data,
invalidation_state_tracker,
client_info));
@@ -106,12 +105,6 @@ void NonBlockingInvalidator::Core::UpdateRegisteredIds(const ObjectIdSet& ids) {
invalidation_notifier_->UpdateRegisteredIds(this, ids);
}
-void NonBlockingInvalidator::Core::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- invalidation_notifier_->Acknowledge(id, ack_handle);
-}
-
void NonBlockingInvalidator::Core::UpdateCredentials(const std::string& email,
const std::string& token) {
DCHECK(network_task_runner_->BelongsToCurrentThread());
@@ -136,18 +129,17 @@ void NonBlockingInvalidator::Core::OnIncomingInvalidation(
NonBlockingInvalidator::NonBlockingInvalidator(
const notifier::NotifierOptions& notifier_options,
const std::string& invalidator_client_id,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>&
invalidation_state_tracker,
const std::string& client_info)
- : weak_ptr_factory_(this),
- core_(
- new Core(MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()))),
- parent_task_runner_(
- base::ThreadTaskRunnerHandle::Get()),
- network_task_runner_(notifier_options.request_context_getter->
- GetNetworkTaskRunner()) {
+ : parent_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ network_task_runner_(
+ notifier_options.request_context_getter->GetNetworkTaskRunner()),
+ weak_ptr_factory_(this) {
+ core_ = new Core(MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()));
+
if (!network_task_runner_->PostTask(
FROM_HERE,
base::Bind(
@@ -155,7 +147,7 @@ NonBlockingInvalidator::NonBlockingInvalidator(
core_.get(),
notifier_options,
invalidator_client_id,
- initial_invalidation_state_map,
+ saved_invalidations,
invalidation_bootstrap_data,
invalidation_state_tracker,
client_info))) {
@@ -197,20 +189,6 @@ void NonBlockingInvalidator::UnregisterHandler(InvalidationHandler* handler) {
registrar_.UnregisterHandler(handler);
}
-void NonBlockingInvalidator::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(
- &NonBlockingInvalidator::Core::Acknowledge,
- core_.get(),
- id,
- ack_handle))) {
- NOTREACHED();
- }
-}
-
InvalidatorState NonBlockingInvalidator::GetInvalidatorState() const {
DCHECK(parent_task_runner_->BelongsToCurrentThread());
return registrar_.GetInvalidatorState();
diff --git a/chromium/sync/notifier/non_blocking_invalidator.h b/chromium/sync/notifier/non_blocking_invalidator.h
index f2685c702d2..d40166adfc6 100644
--- a/chromium/sync/notifier/non_blocking_invalidator.h
+++ b/chromium/sync/notifier/non_blocking_invalidator.h
@@ -28,8 +28,6 @@ class SingleThreadTaskRunner;
namespace syncer {
-// TODO(akalin): Generalize the interface so it can use any Invalidator.
-// (http://crbug.com/140409).
class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
: public Invalidator,
// InvalidationHandler to "observe" our Core via WeakHandle.
@@ -39,7 +37,7 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
NonBlockingInvalidator(
const notifier::NotifierOptions& notifier_options,
const std::string& invalidator_client_id,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& saved_invalidations,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>&
invalidation_state_tracker,
@@ -52,8 +50,6 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
virtual void UpdateRegisteredIds(InvalidationHandler* handler,
const ObjectIdSet& ids) OVERRIDE;
virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
@@ -66,8 +62,6 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
private:
class Core;
- base::WeakPtrFactory<NonBlockingInvalidator> weak_ptr_factory_;
-
InvalidatorRegistrar registrar_;
// The real guts of NonBlockingInvalidator, which allows this class to live
@@ -76,6 +70,8 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
+ base::WeakPtrFactory<NonBlockingInvalidator> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(NonBlockingInvalidator);
};
diff --git a/chromium/sync/notifier/non_blocking_invalidator_unittest.cc b/chromium/sync/notifier/non_blocking_invalidator_unittest.cc
index f463077c088..0c439c502c6 100644
--- a/chromium/sync/notifier/non_blocking_invalidator_unittest.cc
+++ b/chromium/sync/notifier/non_blocking_invalidator_unittest.cc
@@ -17,7 +17,6 @@
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/invalidation_state_tracker.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -49,7 +48,7 @@ class NonBlockingInvalidatorTestDelegate {
new NonBlockingInvalidator(
invalidator_options,
invalidator_client_id,
- InvalidationStateMap(),
+ UnackedInvalidationsMap(),
initial_state,
MakeWeakHandle(invalidation_state_tracker),
"fake_client_info"));
diff --git a/chromium/sync/notifier/object_id_invalidation_map.cc b/chromium/sync/notifier/object_id_invalidation_map.cc
index bde2e4c2457..1082eaa29be 100644
--- a/chromium/sync/notifier/object_id_invalidation_map.cc
+++ b/chromium/sync/notifier/object_id_invalidation_map.cc
@@ -1,93 +1,121 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sync/notifier/object_id_invalidation_map.h"
-#include <algorithm>
-
-#include "base/compiler_specific.h"
-#include "base/values.h"
+#include "base/json/json_string_value_serializer.h"
namespace syncer {
-ObjectIdSet ObjectIdInvalidationMapToSet(
- const ObjectIdInvalidationMap& invalidation_map) {
- ObjectIdSet ids;
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- ids.insert(it->first);
+// static
+ObjectIdInvalidationMap ObjectIdInvalidationMap::InvalidateAll(
+ const ObjectIdSet& ids) {
+ ObjectIdInvalidationMap invalidate_all;
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
+ invalidate_all.Insert(Invalidation::InitUnknownVersion(*it));
+ }
+ return invalidate_all;
+}
+
+ObjectIdInvalidationMap::ObjectIdInvalidationMap() {}
+
+ObjectIdInvalidationMap::~ObjectIdInvalidationMap() {}
+
+ObjectIdSet ObjectIdInvalidationMap::GetObjectIds() const {
+ ObjectIdSet ret;
+ for (IdToListMap::const_iterator it = map_.begin(); it != map_.end(); ++it) {
+ ret.insert(it->first);
}
- return ids;
+ return ret;
+}
+
+bool ObjectIdInvalidationMap::Empty() const {
+ return map_.empty();
}
-ObjectIdInvalidationMap ObjectIdSetToInvalidationMap(
- const ObjectIdSet& ids, int64 version, const std::string& payload) {
- ObjectIdInvalidationMap invalidation_map;
+void ObjectIdInvalidationMap::Insert(const Invalidation& invalidation) {
+ map_[invalidation.object_id()].Insert(invalidation);
+}
+
+ObjectIdInvalidationMap ObjectIdInvalidationMap::GetSubsetWithObjectIds(
+ const ObjectIdSet& ids) const {
+ IdToListMap new_map;
for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- // TODO(dcheng): Do we need to provide a way to set AckHandle?
- invalidation_map[*it].version = version;
- invalidation_map[*it].payload = payload;
+ IdToListMap::const_iterator lookup = map_.find(*it);
+ if (lookup != map_.end()) {
+ new_map[*it] = lookup->second;
+ }
}
- return invalidation_map;
+ return ObjectIdInvalidationMap(new_map);
}
-namespace {
+const SingleObjectInvalidationSet& ObjectIdInvalidationMap::ForObject(
+ invalidation::ObjectId id) const {
+ IdToListMap::const_iterator lookup = map_.find(id);
+ DCHECK(lookup != map_.end());
+ DCHECK(!lookup->second.IsEmpty());
+ return lookup->second;
+}
-struct ObjectIdInvalidationMapValueEquals {
- bool operator()(const ObjectIdInvalidationMap::value_type& value1,
- const ObjectIdInvalidationMap::value_type& value2) const {
- return
- (value1.first == value2.first) &&
- value1.second.Equals(value2.second);
+void ObjectIdInvalidationMap::GetAllInvalidations(
+ std::vector<syncer::Invalidation>* out) const {
+ for (IdToListMap::const_iterator it = map_.begin(); it != map_.end(); ++it) {
+ out->insert(out->begin(), it->second.begin(), it->second.end());
+ }
+}
+void ObjectIdInvalidationMap::AcknowledgeAll() const {
+ for (IdToListMap::const_iterator it1 = map_.begin();
+ it1 != map_.end(); ++it1) {
+ for (SingleObjectInvalidationSet::const_iterator it2 = it1->second.begin();
+ it2 != it1->second.end(); ++it2) {
+ it2->Acknowledge();
+ }
}
-};
-
-} // namespace
-
-bool ObjectIdInvalidationMapEquals(
- const ObjectIdInvalidationMap& invalidation_map1,
- const ObjectIdInvalidationMap& invalidation_map2) {
- return
- (invalidation_map1.size() == invalidation_map2.size()) &&
- std::equal(invalidation_map1.begin(), invalidation_map1.end(),
- invalidation_map2.begin(),
- ObjectIdInvalidationMapValueEquals());
}
-scoped_ptr<base::ListValue> ObjectIdInvalidationMapToValue(
- const ObjectIdInvalidationMap& invalidation_map) {
+bool ObjectIdInvalidationMap::operator==(
+ const ObjectIdInvalidationMap& other) const {
+ return map_ == other.map_;
+}
+
+scoped_ptr<base::ListValue> ObjectIdInvalidationMap::ToValue() const {
scoped_ptr<base::ListValue> value(new base::ListValue());
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- base::DictionaryValue* entry = new base::DictionaryValue();
- entry->Set("objectId", ObjectIdToValue(it->first).release());
- entry->Set("state", it->second.ToValue().release());
- value->Append(entry);
+ for (IdToListMap::const_iterator it1 = map_.begin();
+ it1 != map_.end(); ++it1) {
+ for (SingleObjectInvalidationSet::const_iterator it2 =
+ it1->second.begin(); it2 != it1->second.end(); ++it2) {
+ value->Append(it2->ToValue().release());
+ }
}
return value.Pass();
}
-bool ObjectIdInvalidationMapFromValue(const base::ListValue& value,
- ObjectIdInvalidationMap* out) {
- out->clear();
- for (base::ListValue::const_iterator it = value.begin();
- it != value.end(); ++it) {
- const base::DictionaryValue* entry = NULL;
- const base::DictionaryValue* id_value = NULL;
- const base::DictionaryValue* invalidation_value = NULL;
- invalidation::ObjectId id;
- Invalidation invalidation;
- if (!(*it)->GetAsDictionary(&entry) ||
- !entry->GetDictionary("objectId", &id_value) ||
- !entry->GetDictionary("state", &invalidation_value) ||
- !ObjectIdFromValue(*id_value, &id) ||
- !invalidation.ResetFromValue(*invalidation_value)) {
+bool ObjectIdInvalidationMap::ResetFromValue(const base::ListValue& value) {
+ map_.clear();
+ for (size_t i = 0; i < value.GetSize(); ++i) {
+ const DictionaryValue* dict;
+ if (!value.GetDictionary(i, &dict)) {
+ return false;
+ }
+ scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
+ if (!invalidation) {
return false;
}
- ignore_result(out->insert(std::make_pair(id, invalidation)));
+ Insert(*invalidation.get());
}
return true;
}
+std::string ObjectIdInvalidationMap::ToString() const {
+ std::string output;
+ JSONStringValueSerializer serializer(&output);
+ serializer.set_pretty_print(true);
+ serializer.Serialize(*ToValue().get());
+ return output;
+}
+
+ObjectIdInvalidationMap::ObjectIdInvalidationMap(const IdToListMap& map)
+ : map_(map) {}
+
} // namespace syncer
diff --git a/chromium/sync/notifier/object_id_invalidation_map.h b/chromium/sync/notifier/object_id_invalidation_map.h
index bb97fb3b133..3494a62aa2a 100644
--- a/chromium/sync/notifier/object_id_invalidation_map.h
+++ b/chromium/sync/notifier/object_id_invalidation_map.h
@@ -1,4 +1,4 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,40 +6,70 @@
#define SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_H_
#include <map>
-#include <string>
+#include <vector>
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "google/cacheinvalidation/include/types.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/invalidation.h"
#include "sync/notifier/invalidation_util.h"
-
-namespace base {
-class ListValue;
-} // namespace base
+#include "sync/notifier/single_object_invalidation_set.h"
namespace syncer {
-typedef std::map<invalidation::ObjectId,
- Invalidation,
- ObjectIdLessThan> ObjectIdInvalidationMap;
+// A set of notifications with some helper methods to organize them by object ID
+// and version number.
+class SYNC_EXPORT ObjectIdInvalidationMap {
+ public:
+ // Creates an invalidation map that includes an 'unknown version'
+ // invalidation for each specified ID in |ids|.
+ static ObjectIdInvalidationMap InvalidateAll(const ObjectIdSet& ids);
+
+ ObjectIdInvalidationMap();
+ ~ObjectIdInvalidationMap();
+
+ // Returns set of ObjectIds for which at least one invalidation is present.
+ ObjectIdSet GetObjectIds() const;
+
+ // Returns true if this map contains no invalidations.
+ bool Empty() const;
+
+ // Returns true if both maps contain the same set of invalidations.
+ bool operator==(const ObjectIdInvalidationMap& other) const;
+
+ // Inserts a new invalidation into this map.
+ void Insert(const Invalidation& invalidation);
+
+ // Returns a new map containing the subset of invaliations from this map
+ // whose IDs were in the specified |ids| set.
+ ObjectIdInvalidationMap GetSubsetWithObjectIds(const ObjectIdSet& ids) const;
+
+ // Returns the subset of invalidations with IDs matching |id|.
+ const SingleObjectInvalidationSet& ForObject(
+ invalidation::ObjectId id) const;
+
+ // Returns the contents of this map in a single vector.
+ void GetAllInvalidations(std::vector<syncer::Invalidation>* out) const;
+
+ // Call Acknowledge() on all contained Invalidations.
+ void AcknowledgeAll() const;
+
+ // Serialize this map to a value.
+ scoped_ptr<base::ListValue> ToValue() const;
+
+ // Deserialize the value into a map and use it to re-initialize this object.
+ bool ResetFromValue(const base::ListValue& value);
-// Converts between ObjectIdInvalidationMaps and ObjectIdSets.
-ObjectIdSet ObjectIdInvalidationMapToSet(
- const ObjectIdInvalidationMap& invalidation_map);
-SYNC_EXPORT ObjectIdInvalidationMap ObjectIdSetToInvalidationMap(
- const ObjectIdSet& ids, int64 version, const std::string& payload);
+ // Prints the contentes of this map as a human-readable string.
+ std::string ToString() const;
-SYNC_EXPORT bool ObjectIdInvalidationMapEquals(
- const ObjectIdInvalidationMap& invalidation_map1,
- const ObjectIdInvalidationMap& invalidation_map2);
+ private:
+ typedef std::map<invalidation::ObjectId,
+ SingleObjectInvalidationSet,
+ ObjectIdLessThan> IdToListMap;
-scoped_ptr<base::ListValue> ObjectIdInvalidationMapToValue(
- const ObjectIdInvalidationMap& invalidation_map);
+ ObjectIdInvalidationMap(const IdToListMap& map);
-bool ObjectIdInvalidationMapFromValue(const base::ListValue& value,
- ObjectIdInvalidationMap* out);
+ IdToListMap map_;
+};
} // namespace syncer
diff --git a/chromium/sync/notifier/object_id_invalidation_map_unittest.cc b/chromium/sync/notifier/object_id_invalidation_map_unittest.cc
new file mode 100644
index 00000000000..1acd920b799
--- /dev/null
+++ b/chromium/sync/notifier/object_id_invalidation_map_unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/object_id_invalidation_map.h"
+
+#include "google/cacheinvalidation/types.pb.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+namespace {
+
+class ObjectIdInvalidationMapTest : public testing::Test {
+ public:
+ ObjectIdInvalidationMapTest()
+ : kIdOne(ipc::invalidation::ObjectSource::TEST, "one"),
+ kIdTwo(ipc::invalidation::ObjectSource::TEST, "two"),
+ kInv1(Invalidation::Init(kIdOne, 10, "ten")) {
+ set1.insert(kIdOne);
+ set2.insert(kIdTwo);
+ all_set.insert(kIdOne);
+ all_set.insert(kIdTwo);
+
+ one_invalidation.Insert(kInv1);
+ invalidate_all = ObjectIdInvalidationMap::InvalidateAll(all_set);
+ }
+
+ protected:
+ const invalidation::ObjectId kIdOne;
+ const invalidation::ObjectId kIdTwo;
+ const Invalidation kInv1;
+
+ ObjectIdSet set1;
+ ObjectIdSet set2;
+ ObjectIdSet all_set;
+ ObjectIdInvalidationMap empty;
+ ObjectIdInvalidationMap one_invalidation;
+ ObjectIdInvalidationMap invalidate_all;
+};
+
+TEST_F(ObjectIdInvalidationMapTest, Empty) {
+ EXPECT_TRUE(empty.Empty());
+ EXPECT_FALSE(one_invalidation.Empty());
+ EXPECT_FALSE(invalidate_all.Empty());
+}
+
+TEST_F(ObjectIdInvalidationMapTest, Equality) {
+ ObjectIdInvalidationMap empty2;
+ EXPECT_TRUE(empty == empty2);
+
+ ObjectIdInvalidationMap one_invalidation2;
+ one_invalidation2.Insert(kInv1);
+ EXPECT_TRUE(one_invalidation == one_invalidation2);
+
+ EXPECT_FALSE(empty == invalidate_all);
+}
+
+TEST_F(ObjectIdInvalidationMapTest, GetObjectIds) {
+ EXPECT_EQ(ObjectIdSet(), empty.GetObjectIds());
+ EXPECT_EQ(set1, one_invalidation.GetObjectIds());
+ EXPECT_EQ(all_set, invalidate_all.GetObjectIds());
+}
+
+TEST_F(ObjectIdInvalidationMapTest, GetSubsetWithObjectIds) {
+ EXPECT_TRUE(empty.GetSubsetWithObjectIds(set1).Empty());
+
+ EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(set1) ==
+ one_invalidation);
+ EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(all_set) ==
+ one_invalidation);
+ EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(set2).Empty());
+
+ EXPECT_TRUE(invalidate_all.GetSubsetWithObjectIds(ObjectIdSet()).Empty());
+}
+
+TEST_F(ObjectIdInvalidationMapTest, SerializeEmpty) {
+ scoped_ptr<base::ListValue> value = empty.ToValue();
+ ASSERT_TRUE(value.get());
+ ObjectIdInvalidationMap deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(empty == deserialized);
+}
+
+TEST_F(ObjectIdInvalidationMapTest, SerializeOneInvalidation) {
+ scoped_ptr<base::ListValue> value = one_invalidation.ToValue();
+ ASSERT_TRUE(value.get());
+ ObjectIdInvalidationMap deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(one_invalidation == deserialized);
+}
+
+TEST_F(ObjectIdInvalidationMapTest, SerializeInvalidateAll) {
+ scoped_ptr<base::ListValue> value = invalidate_all.ToValue();
+ ASSERT_TRUE(value.get());
+ ObjectIdInvalidationMap deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(invalidate_all == deserialized);
+}
+
+} // namespace
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/p2p_invalidator.cc b/chromium/sync/notifier/p2p_invalidator.cc
index 2468a51a735..cd82e6145e8 100644
--- a/chromium/sync/notifier/p2p_invalidator.cc
+++ b/chromium/sync/notifier/p2p_invalidator.cc
@@ -14,6 +14,7 @@
#include "jingle/notifier/listener/push_client.h"
#include "sync/notifier/invalidation_handler.h"
#include "sync/notifier/invalidation_util.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
@@ -27,7 +28,7 @@ const char kNotifyAll[] = "notifyAll";
const char kSenderIdKey[] = "senderId";
const char kNotificationTypeKey[] = "notificationType";
-const char kIdInvalidationMapKey[] = "idInvalidationMap";
+const char kInvalidationsKey[] = "invalidations";
} // namespace
@@ -96,8 +97,7 @@ bool P2PNotificationData::Equals(const P2PNotificationData& other) const {
return
(sender_id_ == other.sender_id_) &&
(target_ == other.target_) &&
- ObjectIdInvalidationMapEquals(invalidation_map_,
- other.invalidation_map_);
+ (invalidation_map_ == other.invalidation_map_);
}
std::string P2PNotificationData::ToString() const {
@@ -105,8 +105,7 @@ std::string P2PNotificationData::ToString() const {
dict->SetString(kSenderIdKey, sender_id_);
dict->SetString(kNotificationTypeKey,
P2PNotificationTargetToString(target_));
- dict->Set(kIdInvalidationMapKey,
- ObjectIdInvalidationMapToValue(invalidation_map_).release());
+ dict->Set(kInvalidationsKey, invalidation_map_.ToValue().release());
std::string json;
base::JSONWriter::Write(dict.get(), &json);
return json;
@@ -129,10 +128,9 @@ bool P2PNotificationData::ResetFromString(const std::string& str) {
}
target_ = P2PNotificationTargetFromString(target_str);
const base::ListValue* invalidation_map_list = NULL;
- if (!data_dict->GetList(kIdInvalidationMapKey, &invalidation_map_list) ||
- !ObjectIdInvalidationMapFromValue(*invalidation_map_list,
- &invalidation_map_)) {
- LOG(WARNING) << "Could not parse " << kIdInvalidationMapKey;
+ if (!data_dict->GetList(kInvalidationsKey, &invalidation_map_list) ||
+ !invalidation_map_.ResetFromValue(*invalidation_map_list)) {
+ LOG(WARNING) << "Could not parse " << kInvalidationsKey;
}
return true;
}
@@ -161,8 +159,7 @@ void P2PInvalidator::RegisterHandler(InvalidationHandler* handler) {
}
void P2PInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- // TODO(akalin): Handle arbitrary object IDs (http://crbug.com/140411).
+ const ObjectIdSet& ids) {
DCHECK(thread_checker_.CalledOnValidThread());
ObjectIdSet new_ids;
const ObjectIdSet& old_ids = registrar_.GetRegisteredIds(handler);
@@ -173,10 +170,8 @@ void P2PInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
registrar_.UpdateRegisteredIds(handler, ids);
const P2PNotificationData notification_data(
invalidator_client_id_,
- NOTIFY_SELF,
- ObjectIdSetToInvalidationMap(new_ids,
- Invalidation::kUnknownVersion,
- std::string()));
+ send_notification_target_,
+ ObjectIdInvalidationMap::InvalidateAll(ids));
SendNotificationData(notification_data);
}
@@ -185,12 +180,6 @@ void P2PInvalidator::UnregisterHandler(InvalidationHandler* handler) {
registrar_.UnregisterHandler(handler);
}
-void P2PInvalidator::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- DCHECK(thread_checker_.CalledOnValidThread());
- // Do nothing for the P2P implementation.
-}
-
InvalidatorState P2PInvalidator::GetInvalidatorState() const {
DCHECK(thread_checker_.CalledOnValidThread());
return registrar_.GetInvalidatorState();
@@ -213,9 +202,10 @@ void P2PInvalidator::UpdateCredentials(
logged_in_ = true;
}
-void P2PInvalidator::SendInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
+void P2PInvalidator::SendInvalidation(const ObjectIdSet& ids) {
DCHECK(thread_checker_.CalledOnValidThread());
+ ObjectIdInvalidationMap invalidation_map =
+ ObjectIdInvalidationMap::InvalidateAll(ids);
const P2PNotificationData notification_data(
invalidator_client_id_, send_notification_target_, invalidation_map);
SendNotificationData(notification_data);
@@ -230,9 +220,8 @@ void P2PInvalidator::OnNotificationsEnabled() {
const P2PNotificationData notification_data(
invalidator_client_id_,
NOTIFY_SELF,
- ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(),
- Invalidation::kUnknownVersion,
- std::string()));
+ ObjectIdInvalidationMap::InvalidateAll(
+ registrar_.GetAllRegisteredIds()));
SendNotificationData(notification_data);
}
}
@@ -266,9 +255,8 @@ void P2PInvalidator::OnIncomingNotification(
notification_data = P2PNotificationData(
invalidator_client_id_,
NOTIFY_ALL,
- ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(),
- Invalidation::kUnknownVersion,
- std::string()));
+ ObjectIdInvalidationMap::InvalidateAll(
+ registrar_.GetAllRegisteredIds()));
}
if (!notification_data.IsTargeted(invalidator_client_id_)) {
DVLOG(1) << "Not a target of the notification -- "
@@ -288,7 +276,7 @@ void P2PInvalidator::SendNotificationDataForTest(
void P2PInvalidator::SendNotificationData(
const P2PNotificationData& notification_data) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (notification_data.GetIdInvalidationMap().empty()) {
+ if (notification_data.GetIdInvalidationMap().Empty()) {
DVLOG(1) << "Not sending XMPP notification with empty state map: "
<< notification_data.ToString();
return;
diff --git a/chromium/sync/notifier/p2p_invalidator.h b/chromium/sync/notifier/p2p_invalidator.h
index 515b27b5c47..e0bd5bc2316 100644
--- a/chromium/sync/notifier/p2p_invalidator.h
+++ b/chromium/sync/notifier/p2p_invalidator.h
@@ -24,6 +24,7 @@
#include "sync/notifier/invalidator.h"
#include "sync/notifier/invalidator_registrar.h"
#include "sync/notifier/invalidator_state.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace notifier {
class PushClient;
@@ -105,8 +106,6 @@ class SYNC_EXPORT_PRIVATE P2PInvalidator
virtual void UpdateRegisteredIds(InvalidationHandler* handler,
const ObjectIdSet& ids) OVERRIDE;
virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
@@ -118,8 +117,7 @@ class SYNC_EXPORT_PRIVATE P2PInvalidator
virtual void OnIncomingNotification(
const notifier::Notification& notification) OVERRIDE;
- void SendInvalidation(
- const ObjectIdInvalidationMap& invalidation_map);
+ void SendInvalidation(const ObjectIdSet& ids);
void SendNotificationDataForTest(
const P2PNotificationData& notification_data);
diff --git a/chromium/sync/notifier/p2p_invalidator_unittest.cc b/chromium/sync/notifier/p2p_invalidator_unittest.cc
index 24cfe027f6c..7898fee4c58 100644
--- a/chromium/sync/notifier/p2p_invalidator_unittest.cc
+++ b/chromium/sync/notifier/p2p_invalidator_unittest.cc
@@ -10,7 +10,6 @@
#include "sync/internal_api/public/base/model_type.h"
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -98,9 +97,7 @@ class P2PInvalidatorTest : public testing::Test {
ObjectIdInvalidationMap MakeInvalidationMap(ModelTypeSet types) {
ObjectIdInvalidationMap invalidations;
ObjectIdSet ids = ModelTypeSetToObjectIdSet(types);
- return ObjectIdSetToInvalidationMap(ids,
- Invalidation::kUnknownVersion,
- std::string());
+ return ObjectIdInvalidationMap::InvalidateAll(ids);
}
// Simulate receiving all the notifications we sent out since last
@@ -166,10 +163,10 @@ TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
EXPECT_TRUE(notification_data.IsTargeted(std::string()));
EXPECT_FALSE(notification_data.IsTargeted("other1"));
EXPECT_FALSE(notification_data.IsTargeted("other2"));
- EXPECT_TRUE(notification_data.GetIdInvalidationMap().empty());
+ EXPECT_TRUE(notification_data.GetIdInvalidationMap().Empty());
const std::string& notification_data_str = notification_data.ToString();
EXPECT_EQ(
- "{\"idInvalidationMap\":[],\"notificationType\":\"notifySelf\","
+ "{\"invalidations\":[],\"notificationType\":\"notifySelf\","
"\"senderId\":\"\"}", notification_data_str);
P2PNotificationData notification_data_parsed;
@@ -180,27 +177,23 @@ TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
// Make sure the P2PNotificationData <-> string conversions work for a
// non-default-constructed P2PNotificationData.
TEST_F(P2PInvalidatorTest, P2PNotificationDataNonDefault) {
- const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(ModelTypeSet(BOOKMARKS, THEMES)),
- Invalidation::kUnknownVersion,
- std::string());
- const P2PNotificationData notification_data(
- "sender", NOTIFY_ALL, invalidation_map);
+ ObjectIdInvalidationMap invalidation_map =
+ ObjectIdInvalidationMap::InvalidateAll(
+ ModelTypeSetToObjectIdSet(ModelTypeSet(BOOKMARKS, THEMES)));
+ const P2PNotificationData notification_data("sender",
+ NOTIFY_ALL,
+ invalidation_map);
EXPECT_TRUE(notification_data.IsTargeted("sender"));
EXPECT_TRUE(notification_data.IsTargeted("other1"));
EXPECT_TRUE(notification_data.IsTargeted("other2"));
- EXPECT_THAT(invalidation_map,
- Eq(notification_data.GetIdInvalidationMap()));
+ EXPECT_EQ(invalidation_map, notification_data.GetIdInvalidationMap());
const std::string& notification_data_str = notification_data.ToString();
EXPECT_EQ(
- "{\"idInvalidationMap\":["
- "{\"objectId\":{\"name\":\"BOOKMARK\",\"source\":1004},"
- "\"state\":{\"ackHandle\":{\"state\":\"\",\"timestamp\":\"0\"},"
- "\"payload\":\"\",\"version\":\"-1\"}},"
- "{\"objectId\":{\"name\":\"THEME\",\"source\":1004},"
- "\"state\":{\"ackHandle\":{\"state\":\"\",\"timestamp\":\"0\"},"
- "\"payload\":\"\",\"version\":\"-1\"}}"
+ "{\"invalidations\":["
+ "{\"isUnknownVersion\":true,"
+ "\"objectId\":{\"name\":\"BOOKMARK\",\"source\":1004}},"
+ "{\"isUnknownVersion\":true,"
+ "\"objectId\":{\"name\":\"THEME\",\"source\":1004}}"
"],\"notificationType\":\"notifyAll\","
"\"senderId\":\"sender\"}", notification_data_str);
@@ -248,14 +241,8 @@ TEST_F(P2PInvalidatorTest, NotificationsBasic) {
// Sent with target NOTIFY_OTHERS so should not be propagated to
// |fake_handler_|.
- {
- const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)),
- Invalidation::kUnknownVersion,
- std::string());
- invalidator->SendInvalidation(invalidation_map);
- }
+ invalidator->SendInvalidation(
+ ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)));
ReflectSentNotifications();
EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
@@ -270,9 +257,7 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
const ModelTypeSet expected_types(THEMES);
const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(ModelTypeSetToObjectIdSet(changed_types),
- Invalidation::kUnknownVersion,
- std::string());
+ MakeInvalidationMap(changed_types);
P2PInvalidator* const invalidator = delegate_.GetInvalidator();
notifier::FakePushClient* const push_client = delegate_.GetPushClient();
@@ -288,23 +273,23 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
ReflectSentNotifications();
EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(MakeInvalidationMap(enabled_types),
- Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(ModelTypeSetToObjectIdSet(enabled_types),
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(P2PNotificationData());
ReflectSentNotifications();
EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
- const ObjectIdInvalidationMap& expected_ids =
- MakeInvalidationMap(expected_types);
+ const ObjectIdSet& expected_ids = ModelTypeSetToObjectIdSet(expected_types);
// Should be propagated.
invalidator->SendNotificationDataForTest(
P2PNotificationData("sender", NOTIFY_SELF, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(2, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(
@@ -329,7 +314,8 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
P2PNotificationData("sender2", NOTIFY_OTHERS, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(3, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(
@@ -342,14 +328,16 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
P2PNotificationData("sender", NOTIFY_ALL, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(4, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be propagated.
invalidator->SendNotificationDataForTest(
P2PNotificationData("sender2", NOTIFY_ALL, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(5, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(
diff --git a/chromium/sync/notifier/push_client_channel.cc b/chromium/sync/notifier/push_client_channel.cc
index b5f31c14a14..a067a215d64 100644
--- a/chromium/sync/notifier/push_client_channel.cc
+++ b/chromium/sync/notifier/push_client_channel.cc
@@ -19,9 +19,7 @@ const char kChannelName[] = "tango_raw";
PushClientChannel::PushClientChannel(
scoped_ptr<notifier::PushClient> push_client)
- : push_client_(push_client.Pass()),
- notifications_enabled_(false),
- scheduling_hash_(0) {
+ : push_client_(push_client.Pass()) {
push_client_->AddObserver(this);
notifier::Subscription subscription;
subscription.channel = kChannelName;
@@ -33,7 +31,6 @@ PushClientChannel::PushClientChannel(
PushClientChannel::~PushClientChannel() {
push_client_->RemoveObserver(this);
- STLDeleteElements(&network_status_receivers_);
}
void PushClientChannel::UpdateCredentials(
@@ -41,119 +38,28 @@ void PushClientChannel::UpdateCredentials(
push_client_->UpdateCredentials(email, token);
}
-void PushClientChannel::SendMessage(const std::string& outgoing_message) {
- push_client_->SendNotification(
- EncodeMessage(outgoing_message, service_context_, scheduling_hash_));
-}
-
-void PushClientChannel::SetMessageReceiver(
- invalidation::MessageCallback* incoming_receiver) {
- incoming_receiver_.reset(incoming_receiver);
-}
-
-void PushClientChannel::AddNetworkStatusReceiver(
- invalidation::NetworkStatusCallback* network_status_receiver) {
- network_status_receiver->Run(notifications_enabled_);
- network_status_receivers_.push_back(network_status_receiver);
-}
-
-void PushClientChannel::SetSystemResources(
- invalidation::SystemResources* resources) {
- // Do nothing.
+void PushClientChannel::SendEncodedMessage(const std::string& encoded_message) {
+ notifier::Recipient recipient;
+ recipient.to = kBotJid;
+ notifier::Notification notification;
+ notification.channel = kChannelName;
+ notification.recipients.push_back(recipient);
+ notification.data = encoded_message;
+ push_client_->SendNotification(notification);
}
void PushClientChannel::OnNotificationsEnabled() {
- for (NetworkStatusReceiverList::const_iterator it =
- network_status_receivers_.begin();
- it != network_status_receivers_.end(); ++it) {
- (*it)->Run(true);
- }
+ NotifyStateChange(INVALIDATIONS_ENABLED);
}
void PushClientChannel::OnNotificationsDisabled(
notifier::NotificationsDisabledReason reason) {
- for (NetworkStatusReceiverList::const_iterator it =
- network_status_receivers_.begin();
- it != network_status_receivers_.end(); ++it) {
- (*it)->Run(false);
- }
+ NotifyStateChange(FromNotifierReason(reason));
}
void PushClientChannel::OnIncomingNotification(
const notifier::Notification& notification) {
- if (!incoming_receiver_) {
- DLOG(ERROR) << "No receiver for incoming notification";
- return;
- }
- std::string message;
- if (!DecodeMessage(notification,
- &message, &service_context_, &scheduling_hash_)) {
- DLOG(ERROR) << "Could not parse ClientGatewayMessage from: "
- << notification.ToString();
- }
- incoming_receiver_->Run(message);
-}
-
-const std::string& PushClientChannel::GetServiceContextForTest() const {
- return service_context_;
-}
-
-int64 PushClientChannel::GetSchedulingHashForTest() const {
- return scheduling_hash_;
-}
-
-notifier::Notification PushClientChannel::EncodeMessageForTest(
- const std::string& message, const std::string& service_context,
- int64 scheduling_hash) {
- return EncodeMessage(message, service_context, scheduling_hash);
-}
-
-bool PushClientChannel::DecodeMessageForTest(
- const notifier::Notification& notification,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash) {
- return DecodeMessage(
- notification, message, service_context, scheduling_hash);
-}
-
-notifier::Notification PushClientChannel::EncodeMessage(
- const std::string& message, const std::string& service_context,
- int64 scheduling_hash) {
- ipc::invalidation::ClientGatewayMessage envelope;
- envelope.set_is_client_to_server(true);
- if (!service_context.empty()) {
- envelope.set_service_context(service_context);
- envelope.set_rpc_scheduling_hash(scheduling_hash);
- }
- envelope.set_network_message(message);
-
- notifier::Recipient recipient;
- recipient.to = kBotJid;
- notifier::Notification notification;
- notification.channel = kChannelName;
- notification.recipients.push_back(recipient);
- envelope.SerializeToString(&notification.data);
- return notification;
-}
-
-bool PushClientChannel::DecodeMessage(
- const notifier::Notification& notification,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash) {
- ipc::invalidation::ClientGatewayMessage envelope;
- if (!envelope.ParseFromString(notification.data)) {
- return false;
- }
- *message = envelope.network_message();
- if (envelope.has_service_context()) {
- *service_context = envelope.service_context();
- }
- if (envelope.has_rpc_scheduling_hash()) {
- *scheduling_hash = envelope.rpc_scheduling_hash();
- }
- return true;
+ DeliverIncomingMessage(notification.data);
}
} // namespace syncer
diff --git a/chromium/sync/notifier/push_client_channel.h b/chromium/sync/notifier/push_client_channel.h
index 7bf908aa5af..fa029ab91cd 100644
--- a/chromium/sync/notifier/push_client_channel.h
+++ b/chromium/sync/notifier/push_client_channel.h
@@ -6,14 +6,13 @@
#define SYNC_NOTIFIER_PUSH_CLIENT_CHANNEL_H_
#include <string>
-#include <vector>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
-#include "google/cacheinvalidation/include/system-resources.h"
#include "jingle/notifier/listener/push_client_observer.h"
#include "sync/base/sync_export.h"
+#include "sync/notifier/sync_system_resources.h"
namespace notifier {
class PushClient;
@@ -24,7 +23,7 @@ namespace syncer {
// A PushClientChannel is an implementation of NetworkChannel that
// routes messages through a PushClient.
class SYNC_EXPORT_PRIVATE PushClientChannel
- : public NON_EXPORTED_BASE(invalidation::NetworkChannel),
+ : public SyncNetworkChannel,
public NON_EXPORTED_BASE(notifier::PushClientObserver) {
public:
// |push_client| is guaranteed to be destroyed only when this object
@@ -38,14 +37,8 @@ class SYNC_EXPORT_PRIVATE PushClientChannel
// credentials.
void UpdateCredentials(const std::string& email, const std::string& token);
- // invalidation::NetworkChannel implementation.
- virtual void SendMessage(const std::string& outgoing_message) OVERRIDE;
- virtual void SetMessageReceiver(
- invalidation::MessageCallback* incoming_receiver) OVERRIDE;
- virtual void AddNetworkStatusReceiver(
- invalidation::NetworkStatusCallback* network_status_receiver) OVERRIDE;
- virtual void SetSystemResources(
- invalidation::SystemResources* resources) OVERRIDE;
+ // SyncNetworkChannel implementation.
+ virtual void SendEncodedMessage(const std::string& encoded_message) OVERRIDE;
// notifier::PushClient::Observer implementation.
virtual void OnNotificationsEnabled() OVERRIDE;
@@ -54,47 +47,8 @@ class SYNC_EXPORT_PRIVATE PushClientChannel
virtual void OnIncomingNotification(
const notifier::Notification& notification) OVERRIDE;
- const std::string& GetServiceContextForTest() const;
-
- int64 GetSchedulingHashForTest() const;
-
- static notifier::Notification EncodeMessageForTest(
- const std::string& message,
- const std::string& service_context,
- int64 scheduling_hash);
-
- static bool DecodeMessageForTest(
- const notifier::Notification& notification,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash);
-
private:
- typedef std::vector<invalidation::NetworkStatusCallback*>
- NetworkStatusReceiverList;
-
- static notifier::Notification EncodeMessage(
- const std::string& message,
- const std::string& service_context,
- int64 scheduling_hash);
-
- static bool DecodeMessage(
- const notifier::Notification& notification,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash);
-
scoped_ptr<notifier::PushClient> push_client_;
- scoped_ptr<invalidation::MessageCallback> incoming_receiver_;
- NetworkStatusReceiverList network_status_receivers_;
-
- bool notifications_enabled_;
-
- // Service context.
- std::string service_context_;
-
- // Scheduling hash.
- int64 scheduling_hash_;
DISALLOW_COPY_AND_ASSIGN(PushClientChannel);
};
diff --git a/chromium/sync/notifier/push_client_channel_unittest.cc b/chromium/sync/notifier/push_client_channel_unittest.cc
index d017e1b9fda..d0b75e41776 100644
--- a/chromium/sync/notifier/push_client_channel_unittest.cc
+++ b/chromium/sync/notifier/push_client_channel_unittest.cc
@@ -15,104 +15,45 @@
namespace syncer {
namespace {
-class PushClientChannelTest : public ::testing::Test {
+class PushClientChannelTest
+ : public ::testing::Test,
+ public SyncNetworkChannel::Observer {
protected:
PushClientChannelTest()
: fake_push_client_(new notifier::FakePushClient()),
push_client_channel_(
scoped_ptr<notifier::PushClient>(fake_push_client_)),
- connected_(false) {
+ last_invalidator_state_(DEFAULT_INVALIDATION_ERROR) {
+ push_client_channel_.AddObserver(this);
push_client_channel_.SetMessageReceiver(
invalidation::NewPermanentCallback(
this, &PushClientChannelTest::OnIncomingMessage));
- push_client_channel_.AddNetworkStatusReceiver(
- invalidation::NewPermanentCallback(
- this, &PushClientChannelTest::OnNetworkStatusChange));
push_client_channel_.SetSystemResources(NULL);
}
- virtual ~PushClientChannelTest() {}
+ virtual ~PushClientChannelTest() {
+ push_client_channel_.RemoveObserver(this);
+ }
- void OnIncomingMessage(std::string incoming_message) {
- last_message_ = incoming_message;
+ virtual void OnNetworkChannelStateChanged(
+ InvalidatorState invalidator_state) OVERRIDE {
+ last_invalidator_state_ = invalidator_state;
}
- void OnNetworkStatusChange(bool connected) {
- connected_ = connected;
+ void OnIncomingMessage(std::string incoming_message) {
+ last_message_ = incoming_message;
}
notifier::FakePushClient* fake_push_client_;
PushClientChannel push_client_channel_;
std::string last_message_;
- bool connected_;
+ InvalidatorState last_invalidator_state_;
};
const char kMessage[] = "message";
const char kServiceContext[] = "service context";
const int64 kSchedulingHash = 100;
-// Encode a message with some context into a notification and then
-// decode it. The decoded info should match the original info.
-TEST_F(PushClientChannelTest, EncodeDecode) {
- const notifier::Notification& notification =
- PushClientChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
- std::string message;
- std::string service_context;
- int64 scheduling_hash = 0LL;
- EXPECT_TRUE(PushClientChannel::DecodeMessageForTest(
- notification, &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
-}
-
-// Encode a message with no context into a notification and then
-// decode it. The decoded message should match the original message,
-// but the context and hash should be untouched.
-TEST_F(PushClientChannelTest, EncodeDecodeNoContext) {
- const notifier::Notification& notification =
- PushClientChannel::EncodeMessageForTest(
- kMessage, std::string(), kSchedulingHash);
- std::string message;
- std::string service_context = kServiceContext;
- int64 scheduling_hash = kSchedulingHash + 1;
- EXPECT_TRUE(PushClientChannel::DecodeMessageForTest(
- notification, &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash + 1, scheduling_hash);
-}
-
-// Decode an empty notification. It should result in an empty message
-// but should leave the context and hash untouched.
-TEST_F(PushClientChannelTest, DecodeEmpty) {
- std::string message = kMessage;
- std::string service_context = kServiceContext;
- int64 scheduling_hash = kSchedulingHash;
- EXPECT_TRUE(PushClientChannel::DecodeMessageForTest(
- notifier::Notification(),
- &message, &service_context, &scheduling_hash));
- EXPECT_TRUE(message.empty());
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
-}
-
-// Try to decode a garbage notification. It should leave all its
-// arguments untouched and return false.
-TEST_F(PushClientChannelTest, DecodeGarbage) {
- notifier::Notification notification;
- notification.data = "garbage";
- std::string message = kMessage;
- std::string service_context = kServiceContext;
- int64 scheduling_hash = kSchedulingHash;
- EXPECT_FALSE(PushClientChannel::DecodeMessageForTest(
- notification, &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
-}
-
// Make sure the channel subscribes to the correct notifications
// channel on construction.
TEST_F(PushClientChannelTest, Subscriptions) {
@@ -135,42 +76,33 @@ TEST_F(PushClientChannelTest, UpdateCredentials) {
EXPECT_EQ(kToken, fake_push_client_->token());
}
-// Call SendMessage on the channel. It should propagate it to the
-// push client.
-TEST_F(PushClientChannelTest, SendMessage) {
- EXPECT_TRUE(fake_push_client_->sent_notifications().empty());
- push_client_channel_.SendMessage(kMessage);
- const notifier::Notification expected_notification =
- PushClientChannel::EncodeMessageForTest(
- kMessage,
- push_client_channel_.GetServiceContextForTest(),
- push_client_channel_.GetSchedulingHashForTest());
- ASSERT_EQ(1u, fake_push_client_->sent_notifications().size());
- EXPECT_TRUE(
- fake_push_client_->sent_notifications()[0].Equals(
- expected_notification));
-}
-
// Simulate push client state changes on the push client. It should
// propagate to the channel.
TEST_F(PushClientChannelTest, OnPushClientStateChange) {
- EXPECT_FALSE(connected_);
+ EXPECT_EQ(DEFAULT_INVALIDATION_ERROR, last_invalidator_state_);
fake_push_client_->EnableNotifications();
- EXPECT_TRUE(connected_);
+ EXPECT_EQ(INVALIDATIONS_ENABLED, last_invalidator_state_);
fake_push_client_->DisableNotifications(
notifier::TRANSIENT_NOTIFICATION_ERROR);
- EXPECT_FALSE(connected_);
- fake_push_client_->EnableNotifications();
- EXPECT_TRUE(connected_);
+ EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, last_invalidator_state_);
fake_push_client_->DisableNotifications(
notifier::NOTIFICATION_CREDENTIALS_REJECTED);
- EXPECT_FALSE(connected_);
+ EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, last_invalidator_state_);
+}
+
+// Call SendMessage on the channel. It should propagate it to the
+// push client.
+TEST_F(PushClientChannelTest, SendMessage) {
+ EXPECT_TRUE(fake_push_client_->sent_notifications().empty());
+ push_client_channel_.SendMessage(kMessage);
+ ASSERT_EQ(1u, fake_push_client_->sent_notifications().size());
}
// Simulate an incoming notification. It should be decoded properly
// by the channel.
TEST_F(PushClientChannelTest, OnIncomingNotification) {
- const notifier::Notification notification =
+ notifier::Notification notification;
+ notification.data =
PushClientChannel::EncodeMessageForTest(
kMessage, kServiceContext, kSchedulingHash);
@@ -182,72 +114,5 @@ TEST_F(PushClientChannelTest, OnIncomingNotification) {
EXPECT_EQ(kMessage, last_message_);
}
-// Simulate an incoming notification with no receiver. It should be
-// dropped by the channel.
-TEST_F(PushClientChannelTest, OnIncomingNotificationNoReceiver) {
- const notifier::Notification notification =
- PushClientChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
-
- push_client_channel_.SetMessageReceiver(NULL);
- fake_push_client_->SimulateIncomingNotification(notification);
- EXPECT_TRUE(push_client_channel_.GetServiceContextForTest().empty());
- EXPECT_EQ(static_cast<int64>(0),
- push_client_channel_.GetSchedulingHashForTest());
- EXPECT_TRUE(last_message_.empty());
-}
-
-// Simulate an incoming garbage notification. It should be dropped by
-// the channel.
-TEST_F(PushClientChannelTest, OnIncomingNotificationGarbage) {
- notifier::Notification notification;
- notification.data = "garbage";
-
- fake_push_client_->SimulateIncomingNotification(notification);
- EXPECT_TRUE(push_client_channel_.GetServiceContextForTest().empty());
- EXPECT_EQ(static_cast<int64>(0),
- push_client_channel_.GetSchedulingHashForTest());
- EXPECT_TRUE(last_message_.empty());
-}
-
-// Send a message, simulate an incoming message with context, and then
-// send the same message again. The first sent message should not
-// have any context, but the second sent message should have the
-// context from the incoming emssage.
-TEST_F(PushClientChannelTest, PersistedMessageState) {
- push_client_channel_.SendMessage(kMessage);
- ASSERT_EQ(1u, fake_push_client_->sent_notifications().size());
- {
- std::string message;
- std::string service_context;
- int64 scheduling_hash = 0LL;
- EXPECT_TRUE(PushClientChannel::DecodeMessageForTest(
- fake_push_client_->sent_notifications()[0],
- &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_TRUE(service_context.empty());
- EXPECT_EQ(0LL, scheduling_hash);
- }
-
- const notifier::Notification notification =
- PushClientChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
- fake_push_client_->SimulateIncomingNotification(notification);
-
- push_client_channel_.SendMessage(kMessage);
- ASSERT_EQ(2u, fake_push_client_->sent_notifications().size());
- {
- std::string message;
- std::string service_context;
- int64 scheduling_hash = 0LL;
- EXPECT_TRUE(PushClientChannel::DecodeMessageForTest(
- fake_push_client_->sent_notifications()[1],
- &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
- }
-}
-
} // namespace
} // namespace syncer
diff --git a/chromium/sync/notifier/single_object_invalidation_set.cc b/chromium/sync/notifier/single_object_invalidation_set.cc
new file mode 100644
index 00000000000..6da3972fc72
--- /dev/null
+++ b/chromium/sync/notifier/single_object_invalidation_set.cc
@@ -0,0 +1,111 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/single_object_invalidation_set.h"
+
+#include "base/values.h"
+#include "sync/notifier/invalidation_util.h"
+
+namespace syncer {
+
+SingleObjectInvalidationSet::SingleObjectInvalidationSet() {}
+
+SingleObjectInvalidationSet::~SingleObjectInvalidationSet() {}
+
+void SingleObjectInvalidationSet::Insert(const Invalidation& invalidation) {
+ invalidations_.insert(invalidation);
+}
+
+void SingleObjectInvalidationSet::InsertAll(
+ const SingleObjectInvalidationSet& other) {
+ invalidations_.insert(other.begin(), other.end());
+}
+
+void SingleObjectInvalidationSet::Clear() {
+ invalidations_.clear();
+}
+
+bool SingleObjectInvalidationSet::StartsWithUnknownVersion() const {
+ return !invalidations_.empty() &&
+ invalidations_.begin()->is_unknown_version();
+}
+
+size_t SingleObjectInvalidationSet::GetSize() const {
+ return invalidations_.size();
+}
+
+bool SingleObjectInvalidationSet::IsEmpty() const {
+ return invalidations_.empty();
+}
+
+namespace {
+
+struct InvalidationComparator {
+ bool operator()(const Invalidation& inv1, const Invalidation& inv2) {
+ return inv1.Equals(inv2);
+ }
+};
+
+} // namespace
+
+bool SingleObjectInvalidationSet::operator==(
+ const SingleObjectInvalidationSet& other) const {
+ return std::equal(invalidations_.begin(),
+ invalidations_.end(),
+ other.invalidations_.begin(),
+ InvalidationComparator());
+}
+
+SingleObjectInvalidationSet::const_iterator
+SingleObjectInvalidationSet::begin() const {
+ return invalidations_.begin();
+}
+
+SingleObjectInvalidationSet::const_iterator
+SingleObjectInvalidationSet::end() const {
+ return invalidations_.end();
+}
+
+SingleObjectInvalidationSet::const_reverse_iterator
+SingleObjectInvalidationSet::rbegin() const {
+ return invalidations_.rbegin();
+}
+
+SingleObjectInvalidationSet::const_reverse_iterator
+SingleObjectInvalidationSet::rend() const {
+ return invalidations_.rend();
+}
+
+const Invalidation& SingleObjectInvalidationSet::back() const {
+ return *invalidations_.rbegin();
+}
+
+scoped_ptr<base::ListValue> SingleObjectInvalidationSet::ToValue() const {
+ scoped_ptr<base::ListValue> value(new ListValue);
+ for (InvalidationsSet::const_iterator it = invalidations_.begin();
+ it != invalidations_.end(); ++it) {
+ value->Append(it->ToValue().release());
+ }
+ return value.Pass();
+}
+
+bool SingleObjectInvalidationSet::ResetFromValue(
+ const base::ListValue& list) {
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ const base::DictionaryValue* dict;
+ if (!list.GetDictionary(i, &dict)) {
+ DLOG(WARNING) << "Could not find invalidation at index " << i;
+ return false;
+ }
+ scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
+ if (!invalidation) {
+ DLOG(WARNING) << "Failed to parse invalidation at index " << i;
+ return false;
+ }
+ invalidations_.insert(*invalidation);
+ }
+ return true;
+}
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/single_object_invalidation_set.h b/chromium/sync/notifier/single_object_invalidation_set.h
new file mode 100644
index 00000000000..e6f4d759aed
--- /dev/null
+++ b/chromium/sync/notifier/single_object_invalidation_set.h
@@ -0,0 +1,63 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
+#define SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
+
+#include <set>
+
+#include "base/memory/scoped_ptr.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/invalidation.h"
+#include "sync/notifier/invalidation_util.h"
+
+namespace base {
+class ListValue;
+} // namespace base
+
+namespace syncer {
+
+// Holds a list of invalidations that all share the same Object ID.
+//
+// The list is kept sorted by version to make it easier to perform common
+// operations, like checking for an unknown version invalidation or fetching the
+// highest invalidation with the highest version number.
+class SYNC_EXPORT SingleObjectInvalidationSet {
+ public:
+ typedef std::set<Invalidation, InvalidationVersionLessThan> InvalidationsSet;
+ typedef InvalidationsSet::const_iterator const_iterator;
+ typedef InvalidationsSet::const_reverse_iterator const_reverse_iterator;
+
+ SingleObjectInvalidationSet();
+ ~SingleObjectInvalidationSet();
+
+ void Insert(const Invalidation& invalidation);
+ void InsertAll(const SingleObjectInvalidationSet& other);
+ void Clear();
+
+ // Returns true if this list contains an unknown version.
+ //
+ // Unknown version invalidations always end up at the start of the list,
+ // because they have the lowest possible value in the sort ordering.
+ bool StartsWithUnknownVersion() const;
+ size_t GetSize() const;
+ bool IsEmpty() const;
+ bool operator==(const SingleObjectInvalidationSet& other) const;
+
+ const_iterator begin() const;
+ const_iterator end() const;
+ const_reverse_iterator rbegin() const;
+ const_reverse_iterator rend() const;
+ const Invalidation& back() const;
+
+ scoped_ptr<base::ListValue> ToValue() const;
+ bool ResetFromValue(const base::ListValue& list);
+
+ private:
+ InvalidationsSet invalidations_;
+};
+
+} // syncer
+
+#endif // SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
diff --git a/chromium/sync/notifier/single_object_invalidation_set_unittest.cc b/chromium/sync/notifier/single_object_invalidation_set_unittest.cc
new file mode 100644
index 00000000000..3fe074e10cf
--- /dev/null
+++ b/chromium/sync/notifier/single_object_invalidation_set_unittest.cc
@@ -0,0 +1,110 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/single_object_invalidation_set.h"
+
+#include "google/cacheinvalidation/types.pb.h"
+#include "sync/internal_api/public/base/invalidation_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+namespace {
+
+class SingleObjectInvalidationSetTest : public testing::Test {
+ public:
+ SingleObjectInvalidationSetTest()
+ : kId(ipc::invalidation::ObjectSource::TEST, "one") {
+ }
+ protected:
+ const invalidation::ObjectId kId;
+};
+
+TEST_F(SingleObjectInvalidationSetTest, InsertionAndOrdering) {
+ SingleObjectInvalidationSet l1;
+ SingleObjectInvalidationSet l2;
+
+ Invalidation inv0 = Invalidation::InitUnknownVersion(kId);
+ Invalidation inv1 = Invalidation::Init(kId, 1, "one");
+ Invalidation inv2 = Invalidation::Init(kId, 5, "five");
+
+ l1.Insert(inv0);
+ l1.Insert(inv1);
+ l1.Insert(inv2);
+
+ l2.Insert(inv1);
+ l2.Insert(inv2);
+ l2.Insert(inv0);
+
+ ASSERT_EQ(3U, l1.GetSize());
+ ASSERT_EQ(3U, l2.GetSize());
+
+ SingleObjectInvalidationSet::const_iterator it1 = l1.begin();
+ SingleObjectInvalidationSet::const_iterator it2 = l2.begin();
+ EXPECT_THAT(inv0, Eq(*it1));
+ EXPECT_THAT(inv0, Eq(*it2));
+ it1++;
+ it2++;
+ EXPECT_THAT(inv1, Eq(*it1));
+ EXPECT_THAT(inv1, Eq(*it2));
+ it1++;
+ it2++;
+ EXPECT_THAT(inv2, Eq(*it1));
+ EXPECT_THAT(inv2, Eq(*it2));
+ it1++;
+ it2++;
+ EXPECT_TRUE(it1 == l1.end());
+ EXPECT_TRUE(it2 == l2.end());
+}
+
+TEST_F(SingleObjectInvalidationSetTest, StartWithUnknownVersion) {
+ SingleObjectInvalidationSet list;
+ EXPECT_FALSE(list.StartsWithUnknownVersion());
+
+ list.Insert(Invalidation::Init(kId, 1, "one"));
+ EXPECT_FALSE(list.StartsWithUnknownVersion());
+
+ list.Insert(Invalidation::InitUnknownVersion(kId));
+ EXPECT_TRUE(list.StartsWithUnknownVersion());
+
+ list.Clear();
+ EXPECT_FALSE(list.StartsWithUnknownVersion());
+}
+
+TEST_F(SingleObjectInvalidationSetTest, SerializeEmpty) {
+ SingleObjectInvalidationSet list;
+
+ scoped_ptr<base::ListValue> value = list.ToValue();
+ ASSERT_TRUE(value.get());
+ SingleObjectInvalidationSet deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(list == deserialized);
+}
+
+TEST_F(SingleObjectInvalidationSetTest, SerializeOne) {
+ SingleObjectInvalidationSet list;
+ list.Insert(Invalidation::Init(kId, 1, "one"));
+
+ scoped_ptr<base::ListValue> value = list.ToValue();
+ ASSERT_TRUE(value.get());
+ SingleObjectInvalidationSet deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(list == deserialized);
+}
+
+TEST_F(SingleObjectInvalidationSetTest, SerializeMany) {
+ SingleObjectInvalidationSet list;
+ list.Insert(Invalidation::Init(kId, 1, "one"));
+ list.Insert(Invalidation::InitUnknownVersion(kId));
+
+ scoped_ptr<base::ListValue> value = list.ToValue();
+ ASSERT_TRUE(value.get());
+ SingleObjectInvalidationSet deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(list == deserialized);
+}
+
+} // namespace
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/sync_invalidation_listener.cc b/chromium/sync/notifier/sync_invalidation_listener.cc
index ed07f2070b7..9adb897bd1b 100644
--- a/chromium/sync/notifier/sync_invalidation_listener.cc
+++ b/chromium/sync/notifier/sync_invalidation_listener.cc
@@ -16,6 +16,7 @@
#include "google/cacheinvalidation/types.pb.h"
#include "jingle/notifier/listener/push_client.h"
#include "sync/notifier/invalidation_util.h"
+#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/notifier/registration_manager.h"
namespace {
@@ -29,22 +30,20 @@ namespace syncer {
SyncInvalidationListener::Delegate::~Delegate() {}
SyncInvalidationListener::SyncInvalidationListener(
- base::TickClock* tick_clock,
scoped_ptr<notifier::PushClient> push_client)
- : weak_ptr_factory_(this),
- ack_tracker_(tick_clock, this),
- push_client_(push_client.get()),
- sync_system_resources_(push_client.Pass(), this),
+ : push_client_channel_(push_client.Pass()),
+ sync_system_resources_(&push_client_channel_, this),
delegate_(NULL),
ticl_state_(DEFAULT_INVALIDATION_ERROR),
- push_client_state_(DEFAULT_INVALIDATION_ERROR) {
+ push_client_state_(DEFAULT_INVALIDATION_ERROR),
+ weak_ptr_factory_(this) {
DCHECK(CalledOnValidThread());
- push_client_->AddObserver(this);
+ push_client_channel_.AddObserver(this);
}
SyncInvalidationListener::~SyncInvalidationListener() {
DCHECK(CalledOnValidThread());
- push_client_->RemoveObserver(this);
+ push_client_channel_.RemoveObserver(this);
Stop();
DCHECK(!delegate_);
}
@@ -54,7 +53,7 @@ void SyncInvalidationListener::Start(
create_invalidation_client_callback,
const std::string& client_id, const std::string& client_info,
const std::string& invalidation_bootstrap_data,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& initial_unacked_invalidations,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
Delegate* delegate) {
DCHECK(CalledOnValidThread());
@@ -69,18 +68,7 @@ void SyncInvalidationListener::Start(
sync_system_resources_.storage()->SetInitialState(
invalidation_bootstrap_data);
- invalidation_state_map_ = initial_invalidation_state_map;
- if (invalidation_state_map_.empty()) {
- DVLOG(2) << "No initial max invalidation versions for any id";
- } else {
- for (InvalidationStateMap::const_iterator it =
- invalidation_state_map_.begin();
- it != invalidation_state_map_.end(); ++it) {
- DVLOG(2) << "Initial max invalidation version for "
- << ObjectIdToString(it->first) << " is "
- << it->second.version;
- }
- }
+ unacked_invalidations_map_ = initial_unacked_invalidations;
invalidation_state_tracker_ = invalidation_state_tracker;
DCHECK(invalidation_state_tracker_.IsInitialized());
@@ -101,25 +89,12 @@ void SyncInvalidationListener::Start(
registration_manager_.reset(
new RegistrationManager(invalidation_client_.get()));
-
- // Set up reminders for any invalidations that have not been locally
- // acknowledged.
- ObjectIdSet unacknowledged_ids;
- for (InvalidationStateMap::const_iterator it =
- invalidation_state_map_.begin();
- it != invalidation_state_map_.end(); ++it) {
- if (it->second.expected.Equals(it->second.current))
- continue;
- unacknowledged_ids.insert(it->first);
- }
- if (!unacknowledged_ids.empty())
- ack_tracker_.Track(unacknowledged_ids);
}
void SyncInvalidationListener::UpdateCredentials(
const std::string& email, const std::string& token) {
DCHECK(CalledOnValidThread());
- sync_system_resources_.network()->UpdateCredentials(email, token);
+ push_client_channel_.UpdateCredentials(email, token);
}
void SyncInvalidationListener::UpdateRegisteredIds(const ObjectIdSet& ids) {
@@ -133,27 +108,6 @@ void SyncInvalidationListener::UpdateRegisteredIds(const ObjectIdSet& ids) {
}
}
-void SyncInvalidationListener::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- DCHECK(CalledOnValidThread());
- InvalidationStateMap::iterator state_it = invalidation_state_map_.find(id);
- if (state_it == invalidation_state_map_.end())
- return;
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::Acknowledge,
- id,
- ack_handle);
- state_it->second.current = ack_handle;
- if (state_it->second.expected.Equals(ack_handle)) {
- // If the received ack matches the expected ack, then we no longer need to
- // keep track of |id| since it is up-to-date.
- ObjectIdSet ids;
- ids.insert(id);
- ack_tracker_.Ack(ids);
- }
-}
-
void SyncInvalidationListener::Ready(
invalidation::InvalidationClient* client) {
DCHECK(CalledOnValidThread());
@@ -169,43 +123,24 @@ void SyncInvalidationListener::Invalidate(
const invalidation::AckHandle& ack_handle) {
DCHECK(CalledOnValidThread());
DCHECK_EQ(client, invalidation_client_.get());
- DVLOG(1) << "Invalidate: " << InvalidationToString(invalidation);
+ client->Acknowledge(ack_handle);
const invalidation::ObjectId& id = invalidation.object_id();
- // The invalidation API spec allows for the possibility of redundant
- // invalidations, so keep track of the max versions and drop
- // invalidations with old versions.
- //
- // TODO(akalin): Now that we keep track of registered ids, we
- // should drop invalidations for unregistered ids. We may also
- // have to filter it at a higher level, as invalidations for
- // newly-unregistered ids may already be in flight.
- InvalidationStateMap::const_iterator it = invalidation_state_map_.find(id);
- if ((it != invalidation_state_map_.end()) &&
- (invalidation.version() <= it->second.version)) {
- // Drop redundant invalidations.
- client->Acknowledge(ack_handle);
- return;
- }
-
std::string payload;
// payload() CHECK()'s has_payload(), so we must check it ourselves first.
if (invalidation.has_payload())
payload = invalidation.payload();
- DVLOG(2) << "Setting max invalidation version for " << ObjectIdToString(id)
- << " to " << invalidation.version();
- invalidation_state_map_[id].version = invalidation.version();
- invalidation_state_map_[id].payload = payload;
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::SetMaxVersionAndPayload,
- id, invalidation.version(), payload);
+ DVLOG(2) << "Received invalidation with version " << invalidation.version()
+ << " for " << ObjectIdToString(id);
- ObjectIdSet ids;
- ids.insert(id);
- PrepareInvalidation(ids, invalidation.version(), payload, client, ack_handle);
+ ObjectIdInvalidationMap invalidations;
+ Invalidation inv = Invalidation::Init(id, invalidation.version(), payload);
+ inv.set_ack_handler(GetThisAsAckHandler());
+ invalidations.Insert(inv);
+
+ DispatchInvalidations(invalidations);
}
void SyncInvalidationListener::InvalidateUnknownVersion(
@@ -215,15 +150,14 @@ void SyncInvalidationListener::InvalidateUnknownVersion(
DCHECK(CalledOnValidThread());
DCHECK_EQ(client, invalidation_client_.get());
DVLOG(1) << "InvalidateUnknownVersion";
+ client->Acknowledge(ack_handle);
- ObjectIdSet ids;
- ids.insert(object_id);
- PrepareInvalidation(
- ids,
- Invalidation::kUnknownVersion,
- std::string(),
- client,
- ack_handle);
+ ObjectIdInvalidationMap invalidations;
+ Invalidation unknown_version = Invalidation::InitUnknownVersion(object_id);
+ unknown_version.set_ack_handler(GetThisAsAckHandler());
+ invalidations.Insert(unknown_version);
+
+ DispatchInvalidations(invalidations);
}
// This should behave as if we got an invalidation with version
@@ -234,71 +168,56 @@ void SyncInvalidationListener::InvalidateAll(
DCHECK(CalledOnValidThread());
DCHECK_EQ(client, invalidation_client_.get());
DVLOG(1) << "InvalidateAll";
+ client->Acknowledge(ack_handle);
+
+ ObjectIdInvalidationMap invalidations;
+ for (ObjectIdSet::iterator it = registered_ids_.begin();
+ it != registered_ids_.end(); ++it) {
+ Invalidation unknown_version = Invalidation::InitUnknownVersion(*it);
+ unknown_version.set_ack_handler(GetThisAsAckHandler());
+ invalidations.Insert(unknown_version);
+ }
- PrepareInvalidation(
- registered_ids_,
- Invalidation::kUnknownVersion,
- std::string(),
- client,
- ack_handle);
+ DispatchInvalidations(invalidations);
}
-void SyncInvalidationListener::PrepareInvalidation(
- const ObjectIdSet& ids,
- int64 version,
- const std::string& payload,
- invalidation::InvalidationClient* client,
- const invalidation::AckHandle& ack_handle) {
+// If a handler is registered, emit right away. Otherwise, save it for later.
+void SyncInvalidationListener::DispatchInvalidations(
+ const ObjectIdInvalidationMap& invalidations) {
DCHECK(CalledOnValidThread());
- // A server invalidation resets the local retry count.
- ack_tracker_.Ack(ids);
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::GenerateAckHandles,
- ids,
- base::MessageLoopProxy::current(),
- base::Bind(&SyncInvalidationListener::EmitInvalidation,
- weak_ptr_factory_.GetWeakPtr(),
- ids,
- version,
- payload,
- client,
- ack_handle));
-}
+ ObjectIdInvalidationMap to_save = invalidations;
+ ObjectIdInvalidationMap to_emit =
+ invalidations.GetSubsetWithObjectIds(registered_ids_);
-void SyncInvalidationListener::EmitInvalidation(
- const ObjectIdSet& ids,
- int64 version,
- const std::string& payload,
- invalidation::InvalidationClient* client,
- const invalidation::AckHandle& ack_handle,
- const AckHandleMap& local_ack_handles) {
- DCHECK(CalledOnValidThread());
- ObjectIdInvalidationMap invalidation_map =
- ObjectIdSetToInvalidationMap(ids, version, payload);
- for (AckHandleMap::const_iterator it = local_ack_handles.begin();
- it != local_ack_handles.end(); ++it) {
- // Update in-memory copy of the invalidation state.
- invalidation_state_map_[it->first].expected = it->second;
- invalidation_map[it->first].ack_handle = it->second;
- }
- ack_tracker_.Track(ids);
- delegate_->OnInvalidate(invalidation_map);
- client->Acknowledge(ack_handle);
+ SaveInvalidations(to_save);
+ EmitSavedInvalidations(to_emit);
}
-void SyncInvalidationListener::OnTimeout(const ObjectIdSet& ids) {
- ObjectIdInvalidationMap invalidation_map;
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- Invalidation invalidation;
- invalidation.ack_handle = invalidation_state_map_[*it].expected;
- invalidation.version = invalidation_state_map_[*it].version;
- invalidation.payload = invalidation_state_map_[*it].payload;
- invalidation_map.insert(std::make_pair(*it, invalidation));
+void SyncInvalidationListener::SaveInvalidations(
+ const ObjectIdInvalidationMap& to_save) {
+ ObjectIdSet objects_to_save = to_save.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = objects_to_save.begin();
+ it != objects_to_save.end(); ++it) {
+ UnackedInvalidationsMap::iterator lookup =
+ unacked_invalidations_map_.find(*it);
+ if (lookup == unacked_invalidations_map_.end()) {
+ lookup = unacked_invalidations_map_.insert(
+ std::make_pair(*it, UnackedInvalidationSet(*it))).first;
+ }
+ lookup->second.AddSet(to_save.ForObject(*it));
}
- delegate_->OnInvalidate(invalidation_map);
+ invalidation_state_tracker_.Call(
+ FROM_HERE,
+ &InvalidationStateTracker::SetSavedInvalidations,
+ unacked_invalidations_map_);
+}
+
+void SyncInvalidationListener::EmitSavedInvalidations(
+ const ObjectIdInvalidationMap& to_emit) {
+ DVLOG(2) << "Emitting invalidations: " << to_emit.ToString();
+ delegate_->OnInvalidate(to_emit);
}
void SyncInvalidationListener::InformRegistrationStatus(
@@ -371,6 +290,38 @@ void SyncInvalidationListener::InformError(
EmitStateChange();
}
+void SyncInvalidationListener::Acknowledge(
+ const invalidation::ObjectId& id,
+ const syncer::AckHandle& handle) {
+ UnackedInvalidationsMap::iterator lookup =
+ unacked_invalidations_map_.find(id);
+ if (lookup == unacked_invalidations_map_.end()) {
+ DLOG(WARNING) << "Received acknowledgement for untracked object ID";
+ return;
+ }
+ lookup->second.Acknowledge(handle);
+ invalidation_state_tracker_.Call(
+ FROM_HERE,
+ &InvalidationStateTracker::SetSavedInvalidations,
+ unacked_invalidations_map_);
+}
+
+void SyncInvalidationListener::Drop(
+ const invalidation::ObjectId& id,
+ const syncer::AckHandle& handle) {
+ UnackedInvalidationsMap::iterator lookup =
+ unacked_invalidations_map_.find(id);
+ if (lookup == unacked_invalidations_map_.end()) {
+ DLOG(WARNING) << "Received drop for untracked object ID";
+ return;
+ }
+ lookup->second.Drop(handle);
+ invalidation_state_tracker_.Call(
+ FROM_HERE,
+ &InvalidationStateTracker::SetSavedInvalidations,
+ unacked_invalidations_map_);
+}
+
void SyncInvalidationListener::WriteState(const std::string& state) {
DCHECK(CalledOnValidThread());
DVLOG(1) << "WriteState";
@@ -382,13 +333,31 @@ void SyncInvalidationListener::DoRegistrationUpdate() {
DCHECK(CalledOnValidThread());
const ObjectIdSet& unregistered_ids =
registration_manager_->UpdateRegisteredIds(registered_ids_);
- for (ObjectIdSet::const_iterator it = unregistered_ids.begin();
+ for (ObjectIdSet::iterator it = unregistered_ids.begin();
it != unregistered_ids.end(); ++it) {
- invalidation_state_map_.erase(*it);
+ unacked_invalidations_map_.erase(*it);
}
invalidation_state_tracker_.Call(
- FROM_HERE, &InvalidationStateTracker::Forget, unregistered_ids);
- ack_tracker_.Ack(unregistered_ids);
+ FROM_HERE,
+ &InvalidationStateTracker::SetSavedInvalidations,
+ unacked_invalidations_map_);
+
+ ObjectIdInvalidationMap object_id_invalidation_map;
+ for (UnackedInvalidationsMap::iterator map_it =
+ unacked_invalidations_map_.begin();
+ map_it != unacked_invalidations_map_.end(); ++map_it) {
+ if (registered_ids_.find(map_it->first) == registered_ids_.end()) {
+ continue;
+ }
+ map_it->second.ExportInvalidations(
+ GetThisAsAckHandler(),
+ &object_id_invalidation_map);
+ }
+
+ // There's no need to run these through DispatchInvalidations(); they've
+ // already been saved to storage (that's where we found them) so all we need
+ // to do now is emit them.
+ EmitSavedInvalidations(object_id_invalidation_map);
}
void SyncInvalidationListener::StopForTest() {
@@ -396,23 +365,12 @@ void SyncInvalidationListener::StopForTest() {
Stop();
}
-InvalidationStateMap SyncInvalidationListener::GetStateMapForTest() const {
- DCHECK(CalledOnValidThread());
- return invalidation_state_map_;
-}
-
-AckTracker* SyncInvalidationListener::GetAckTrackerForTest() {
- return &ack_tracker_;
-}
-
void SyncInvalidationListener::Stop() {
DCHECK(CalledOnValidThread());
if (!invalidation_client_) {
return;
}
- ack_tracker_.Clear();
-
registration_manager_.reset();
sync_system_resources_.Stop();
invalidation_client_->Stop();
@@ -420,8 +378,6 @@ void SyncInvalidationListener::Stop() {
invalidation_client_.reset();
delegate_ = NULL;
- invalidation_state_tracker_.Reset();
- invalidation_state_map_.clear();
ticl_state_ = DEFAULT_INVALIDATION_ERROR;
push_client_state_ = DEFAULT_INVALIDATION_ERROR;
}
@@ -449,23 +405,16 @@ void SyncInvalidationListener::EmitStateChange() {
delegate_->OnInvalidatorStateChange(GetState());
}
-void SyncInvalidationListener::OnNotificationsEnabled() {
+WeakHandle<AckHandler> SyncInvalidationListener::GetThisAsAckHandler() {
DCHECK(CalledOnValidThread());
- push_client_state_ = INVALIDATIONS_ENABLED;
- EmitStateChange();
+ return WeakHandle<AckHandler>(weak_ptr_factory_.GetWeakPtr());
}
-void SyncInvalidationListener::OnNotificationsDisabled(
- notifier::NotificationsDisabledReason reason) {
+void SyncInvalidationListener::OnNetworkChannelStateChanged(
+ InvalidatorState invalidator_state) {
DCHECK(CalledOnValidThread());
- push_client_state_ = FromNotifierReason(reason);
+ push_client_state_ = invalidator_state;
EmitStateChange();
}
-void SyncInvalidationListener::OnIncomingNotification(
- const notifier::Notification& notification) {
- DCHECK(CalledOnValidThread());
- // Do nothing, since this is already handled by |invalidation_client_|.
-}
-
} // namespace syncer
diff --git a/chromium/sync/notifier/sync_invalidation_listener.h b/chromium/sync/notifier/sync_invalidation_listener.h
index d280e2e5c6c..2b4632d1da7 100644
--- a/chromium/sync/notifier/sync_invalidation_listener.h
+++ b/chromium/sync/notifier/sync_invalidation_listener.h
@@ -17,19 +17,15 @@
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
#include "google/cacheinvalidation/include/invalidation-listener.h"
-#include "jingle/notifier/listener/push_client_observer.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/ack_tracker.h"
+#include "sync/notifier/ack_handler.h"
#include "sync/notifier/invalidation_state_tracker.h"
#include "sync/notifier/invalidator_state.h"
-#include "sync/notifier/object_id_invalidation_map.h"
+#include "sync/notifier/push_client_channel.h"
#include "sync/notifier/state_writer.h"
#include "sync/notifier/sync_system_resources.h"
-
-namespace base {
-class TickClock;
-} // namespace base
+#include "sync/notifier/unacked_invalidation_set.h"
namespace buzz {
class XmppTaskParentInterface;
@@ -41,6 +37,7 @@ class PushClient;
namespace syncer {
+class ObjectIdInvalidationMap;
class RegistrationManager;
// SyncInvalidationListener is not thread-safe and lives on the sync
@@ -48,9 +45,9 @@ class RegistrationManager;
class SYNC_EXPORT_PRIVATE SyncInvalidationListener
: public NON_EXPORTED_BASE(invalidation::InvalidationListener),
public StateWriter,
- public NON_EXPORTED_BASE(notifier::PushClientObserver),
- public base::NonThreadSafe,
- public AckTracker::Delegate {
+ public SyncNetworkChannel::Observer,
+ public AckHandler,
+ public base::NonThreadSafe {
public:
typedef base::Callback<invalidation::InvalidationClient*(
invalidation::SystemResources*,
@@ -64,13 +61,12 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
virtual ~Delegate();
virtual void OnInvalidate(
- const ObjectIdInvalidationMap& invalidation_map) = 0;
+ const ObjectIdInvalidationMap& invalidations) = 0;
virtual void OnInvalidatorStateChange(InvalidatorState state) = 0;
};
explicit SyncInvalidationListener(
- base::TickClock* tick_clock,
scoped_ptr<notifier::PushClient> push_client);
// Calls Stop().
@@ -83,7 +79,7 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
create_invalidation_client_callback,
const std::string& client_id, const std::string& client_info,
const std::string& invalidation_bootstrap_data,
- const InvalidationStateMap& initial_invalidation_state_map,
+ const UnackedInvalidationsMap& initial_object_states,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
Delegate* delegate);
@@ -92,9 +88,6 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
// Update the set of object IDs that we're interested in getting
// notifications for. May be called at any time.
void UpdateRegisteredIds(const ObjectIdSet& ids);
- // Acknowledge that an invalidation for |id| was handled.
- void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle);
// invalidation::InvalidationListener implementation.
virtual void Ready(
@@ -127,21 +120,24 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
invalidation::InvalidationClient* client,
const invalidation::ErrorInfo& error_info) OVERRIDE;
+ // AckHandler implementation.
+ virtual void Acknowledge(
+ const invalidation::ObjectId& id,
+ const syncer::AckHandle& handle) OVERRIDE;
+ virtual void Drop(
+ const invalidation::ObjectId& id,
+ const syncer::AckHandle& handle) OVERRIDE;
+
// StateWriter implementation.
virtual void WriteState(const std::string& state) OVERRIDE;
- // notifier::PushClientObserver implementation.
- virtual void OnNotificationsEnabled() OVERRIDE;
- virtual void OnNotificationsDisabled(
- notifier::NotificationsDisabledReason reason) OVERRIDE;
- virtual void OnIncomingNotification(
- const notifier::Notification& notification) OVERRIDE;
+ // SyncNetworkChannel::Observer implementation.
+ virtual void OnNetworkChannelStateChanged(
+ InvalidatorState invalidator_state) OVERRIDE;
void DoRegistrationUpdate();
void StopForTest();
- InvalidationStateMap GetStateMapForTest() const;
- AckTracker* GetAckTrackerForTest();
private:
void Stop();
@@ -150,28 +146,30 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
void EmitStateChange();
- void PrepareInvalidation(const ObjectIdSet& ids,
- int64 version,
- const std::string& payload,
- invalidation::InvalidationClient* client,
- const invalidation::AckHandle& ack_handle);
- void EmitInvalidation(const ObjectIdSet& ids,
- int64 version,
- const std::string& payload,
- invalidation::InvalidationClient* client,
- const invalidation::AckHandle& ack_handle,
- const AckHandleMap& local_ack_handles);
-
- // AckTracker::Delegate implementation.
- virtual void OnTimeout(const ObjectIdSet& ids) OVERRIDE;
+ // Sends invalidations to their appropriate destination.
+ //
+ // If there are no observers registered for them, they will be saved for
+ // later.
+ //
+ // If there are observers registered, they will be saved (to make sure we
+ // don't drop them until they've been acted on) and emitted to the observers.
+ void DispatchInvalidations(const ObjectIdInvalidationMap& invalidations);
- base::WeakPtrFactory<SyncInvalidationListener> weak_ptr_factory_;
- AckTracker ack_tracker_;
+ // Saves invalidations.
+ //
+ // This call isn't synchronous so we can't guarantee these invalidations will
+ // be safely on disk by the end of the call, but it should ensure that the
+ // data makes it to disk eventually.
+ void SaveInvalidations(const ObjectIdInvalidationMap& to_save);
+
+ // Emits previously saved invalidations to their registered observers.
+ void EmitSavedInvalidations(const ObjectIdInvalidationMap& to_emit);
- // Owned by |sync_system_resources_|.
- notifier::PushClient* const push_client_;
+ WeakHandle<AckHandler> GetThisAsAckHandler();
+
+ PushClientChannel push_client_channel_;
SyncSystemResources sync_system_resources_;
- InvalidationStateMap invalidation_state_map_;
+ UnackedInvalidationsMap unacked_invalidations_map_;
WeakHandle<InvalidationStateTracker> invalidation_state_tracker_;
Delegate* delegate_;
scoped_ptr<invalidation::InvalidationClient> invalidation_client_;
@@ -183,6 +181,8 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
InvalidatorState ticl_state_;
InvalidatorState push_client_state_;
+ base::WeakPtrFactory<SyncInvalidationListener> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SyncInvalidationListener);
};
diff --git a/chromium/sync/notifier/sync_invalidation_listener_unittest.cc b/chromium/sync/notifier/sync_invalidation_listener_unittest.cc
index d3aa712b7d9..2808b9727a7 100644
--- a/chromium/sync/notifier/sync_invalidation_listener_unittest.cc
+++ b/chromium/sync/notifier/sync_invalidation_listener_unittest.cc
@@ -3,23 +3,24 @@
// found in the LICENSE file.
#include <cstddef>
+#include <map>
#include <set>
#include <string>
+#include <vector>
#include "base/compiler_specific.h"
#include "base/message_loop/message_loop.h"
#include "base/stl_util.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
#include "google/cacheinvalidation/include/invalidation-client.h"
#include "google/cacheinvalidation/include/types.h"
#include "jingle/notifier/listener/fake_push_client.h"
-#include "sync/internal_api/public/base/invalidation_test_util.h"
#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/ack_tracker.h"
+#include "sync/notifier/dropped_invalidation_tracker.h"
#include "sync/notifier/fake_invalidation_state_tracker.h"
#include "sync/notifier/invalidation_util.h"
+#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/notifier/sync_invalidation_listener.h"
+#include "sync/notifier/unacked_invalidation_set_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -38,7 +39,6 @@ const char kNewState[] = "new_state";
const char kPayload1[] = "payload1";
const char kPayload2[] = "payload2";
-const int64 kMinVersion = FakeInvalidationStateTracker::kMinVersion;
const int64 kVersion1 = 1LL;
const int64 kVersion2 = 2LL;
@@ -137,41 +137,108 @@ class FakeInvalidationClient : public invalidation::InvalidationClient {
class FakeDelegate : public SyncInvalidationListener::Delegate {
public:
explicit FakeDelegate(SyncInvalidationListener* listener)
- : listener_(listener),
- state_(TRANSIENT_INVALIDATION_ERROR) {}
+ : state_(TRANSIENT_INVALIDATION_ERROR),
+ drop_handlers_deleter_(&drop_handlers_) {}
virtual ~FakeDelegate() {}
- int GetInvalidationCount(const ObjectId& id) const {
- ObjectIdCountMap::const_iterator it = invalidation_counts_.find(id);
- return (it == invalidation_counts_.end()) ? 0 : it->second;
+ size_t GetInvalidationCount(const ObjectId& id) const {
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ return 0;
+ } else {
+ return it->second.size();
+ }
}
int64 GetVersion(const ObjectId& id) const {
- ObjectIdInvalidationMap::const_iterator it = invalidations_.find(id);
- return (it == invalidations_.end()) ? 0 : it->second.version;
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return 0;
+ } else {
+ return it->second.back().version();
+ }
}
std::string GetPayload(const ObjectId& id) const {
- ObjectIdInvalidationMap::const_iterator it = invalidations_.find(id);
- return (it == invalidations_.end()) ? std::string() : it->second.payload;
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return 0;
+ } else {
+ return it->second.back().payload();
+ }
+ }
+
+ bool IsUnknownVersion(const ObjectId& id) const {
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return false;
+ } else {
+ return it->second.back().is_unknown_version();
+ }
+ }
+
+ bool StartsWithUnknownVersion(const ObjectId& id) const {
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return false;
+ } else {
+ return it->second.front().is_unknown_version();
+ }
}
InvalidatorState GetInvalidatorState() const {
return state_;
}
- void Acknowledge(const ObjectId& id) {
- listener_->Acknowledge(id, invalidations_[id].ack_handle);
+ DroppedInvalidationTracker* GetDropTrackerForObject(const ObjectId& id) {
+ DropHandlers::iterator it = drop_handlers_.find(id);
+ if (it == drop_handlers_.end()) {
+ drop_handlers_.insert(
+ std::make_pair(id, new DroppedInvalidationTracker(id)));
+ return drop_handlers_.find(id)->second;
+ } else {
+ return it->second;
+ }
}
- // SyncInvalidationListener::Delegate implementation.
+ void AcknowledgeNthInvalidation(const ObjectId& id, size_t n) {
+ List& list = invalidations_[id];
+ List::iterator it = list.begin() + n;
+ it->Acknowledge();
+ }
+
+ void AcknowledgeAll(const ObjectId& id) {
+ List& list = invalidations_[id];
+ for (List::iterator it = list.begin(); it != list.end(); ++it) {
+ it->Acknowledge();
+ }
+ }
+
+ void DropNthInvalidation(const ObjectId& id, size_t n) {
+ DroppedInvalidationTracker* drop_tracker = GetDropTrackerForObject(id);
+ List& list = invalidations_[id];
+ List::iterator it = list.begin() + n;
+ it->Drop(drop_tracker);
+ }
+ void RecoverFromDropEvent(const ObjectId& id) {
+ DroppedInvalidationTracker* drop_tracker = GetDropTrackerForObject(id);
+ drop_tracker->RecordRecoveryFromDropEvent();
+ }
+
+ // SyncInvalidationListener::Delegate implementation.
virtual void OnInvalidate(
const ObjectIdInvalidationMap& invalidation_map) OVERRIDE {
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- ++invalidation_counts_[it->first];
- invalidations_[it->first] = it->second;
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::iterator it = ids.begin(); it != ids.end(); ++it) {
+ const SingleObjectInvalidationSet& incoming =
+ invalidation_map.ForObject(*it);
+ List& list = invalidations_[*it];
+ list.insert(list.end(), incoming.begin(), incoming.end());
}
}
@@ -180,11 +247,16 @@ class FakeDelegate : public SyncInvalidationListener::Delegate {
}
private:
- typedef std::map<ObjectId, int, ObjectIdLessThan> ObjectIdCountMap;
- ObjectIdCountMap invalidation_counts_;
- ObjectIdInvalidationMap invalidations_;
- SyncInvalidationListener* listener_;
+ typedef std::vector<Invalidation> List;
+ typedef std::map<ObjectId, List, ObjectIdLessThan> Map;
+ typedef std::map<ObjectId,
+ DroppedInvalidationTracker*,
+ ObjectIdLessThan> DropHandlers;
+
+ Map invalidations_;
InvalidatorState state_;
+ DropHandlers drop_handlers_;
+ STLValueDeleter<DropHandlers> drop_handlers_deleter_;
};
invalidation::InvalidationClient* CreateFakeInvalidationClient(
@@ -198,50 +270,6 @@ invalidation::InvalidationClient* CreateFakeInvalidationClient(
return *fake_invalidation_client;
}
-// TODO(dcheng): FakeTickClock and FakeBackoffEntry ought to be factored out
-// into a helpers file so it can be shared with the AckTracker unittest.
-class FakeTickClock : public base::TickClock {
- public:
- FakeTickClock() {}
- virtual ~FakeTickClock() {}
-
- void LeapForward(int seconds) {
- ASSERT_GT(seconds, 0);
- fake_now_ticks_ += base::TimeDelta::FromSeconds(seconds);
- }
-
- virtual base::TimeTicks NowTicks() OVERRIDE {
- return fake_now_ticks_;
- }
-
- private:
- base::TimeTicks fake_now_ticks_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeTickClock);
-};
-
-class FakeBackoffEntry : public net::BackoffEntry {
- public:
- FakeBackoffEntry(const Policy *const policy, base::TickClock* tick_clock)
- : BackoffEntry(policy), tick_clock_(tick_clock) {
- }
-
- protected:
- virtual base::TimeTicks ImplGetTimeNow() const OVERRIDE {
- return tick_clock_->NowTicks();
- }
-
- private:
- base::TickClock* const tick_clock_;
-};
-
-scoped_ptr<net::BackoffEntry> CreateMockEntry(
- base::TickClock* tick_clock,
- const net::BackoffEntry::Policy *const policy) {
- return scoped_ptr<net::BackoffEntry>(
- new FakeBackoffEntry(policy, tick_clock));
-}
-
class SyncInvalidationListenerTest : public testing::Test {
protected:
SyncInvalidationListenerTest()
@@ -251,8 +279,7 @@ class SyncInvalidationListenerTest : public testing::Test {
kAppsId_(kChromeSyncSourceId, "APP"),
fake_push_client_(new notifier::FakePushClient()),
fake_invalidation_client_(NULL),
- listener_(&tick_clock_,
- scoped_ptr<notifier::PushClient>(fake_push_client_)),
+ listener_(scoped_ptr<notifier::PushClient>(fake_push_client_)),
fake_delegate_(&listener_) {}
virtual void SetUp() {
@@ -278,7 +305,7 @@ class SyncInvalidationListenerTest : public testing::Test {
listener_.Start(base::Bind(&CreateFakeInvalidationClient,
&fake_invalidation_client_),
kClientId, kClientInfo, kState,
- fake_tracker_.GetAllInvalidationStates(),
+ fake_tracker_.GetSavedInvalidations(),
MakeWeakHandle(fake_tracker_.AsWeakPtr()),
&fake_delegate_);
DCHECK(fake_invalidation_client_);
@@ -291,12 +318,12 @@ class SyncInvalidationListenerTest : public testing::Test {
// avoid leaking the inner task. listener_.StopForTest() does not
// schedule any tasks, so it's both necessary and sufficient to
// drain the task queue before calling it.
- message_loop_.RunUntilIdle();
+ FlushPendingWrites();
fake_invalidation_client_ = NULL;
listener_.StopForTest();
}
- int GetInvalidationCount(const ObjectId& id) const {
+ size_t GetInvalidationCount(const ObjectId& id) const {
return fake_delegate_.GetInvalidationCount(id);
}
@@ -308,12 +335,32 @@ class SyncInvalidationListenerTest : public testing::Test {
return fake_delegate_.GetPayload(id);
}
- InvalidatorState GetInvalidatorState() const {
- return fake_delegate_.GetInvalidatorState();
+ bool IsUnknownVersion(const ObjectId& id) const {
+ return fake_delegate_.IsUnknownVersion(id);
+ }
+
+ bool StartsWithUnknownVersion(const ObjectId& id) const {
+ return fake_delegate_.StartsWithUnknownVersion(id);
}
- int64 GetMaxVersion(const ObjectId& id) const {
- return fake_tracker_.GetMaxVersion(id);
+ void AcknowledgeNthInvalidation(const ObjectId& id, size_t n) {
+ fake_delegate_.AcknowledgeNthInvalidation(id, n);
+ }
+
+ void DropNthInvalidation(const ObjectId& id, size_t n) {
+ return fake_delegate_.DropNthInvalidation(id, n);
+ }
+
+ void RecoverFromDropEvent(const ObjectId& id) {
+ return fake_delegate_.RecoverFromDropEvent(id);
+ }
+
+ void AcknowledgeAll(const ObjectId& id) {
+ fake_delegate_.AcknowledgeAll(id);
+ }
+
+ InvalidatorState GetInvalidatorState() const {
+ return fake_delegate_.GetInvalidatorState();
}
std::string GetInvalidatorClientId() const {
@@ -324,6 +371,29 @@ class SyncInvalidationListenerTest : public testing::Test {
return fake_tracker_.GetBootstrapData();
}
+ UnackedInvalidationsMap GetSavedInvalidations() {
+ // Allow any queued writes to go through first.
+ FlushPendingWrites();
+ return fake_tracker_.GetSavedInvalidations();
+ }
+
+ SingleObjectInvalidationSet GetSavedInvalidationsForType(const ObjectId& id) {
+ const UnackedInvalidationsMap& saved_state = GetSavedInvalidations();
+ UnackedInvalidationsMap::const_iterator it =
+ saved_state.find(kBookmarksId_);
+ if (it == saved_state.end()) {
+ ADD_FAILURE() << "No state saved for ID " << ObjectIdToString(id);
+ return SingleObjectInvalidationSet();
+ }
+ ObjectIdInvalidationMap map;
+ it->second.ExportInvalidations(WeakHandle<AckHandler>(), &map);
+ if (map.Empty()) {
+ return SingleObjectInvalidationSet();
+ } else {
+ return map.ForObject(id);
+ }
+ }
+
ObjectIdSet GetRegisteredIds() const {
return fake_invalidation_client_->GetRegisteredIds();
}
@@ -340,9 +410,6 @@ class SyncInvalidationListenerTest : public testing::Test {
const AckHandle ack_handle("fakedata");
fake_invalidation_client_->ClearAckedHandles();
listener_.Invalidate(fake_invalidation_client_, inv, ack_handle);
- // Pump message loop to trigger InvalidationStateTracker::SetMaxVersion()
- // and callback from InvalidationStateTracker::GenerateAckHandles().
- message_loop_.RunUntilIdle();
EXPECT_TRUE(fake_invalidation_client_->IsAckedHandle(ack_handle));
}
@@ -350,11 +417,9 @@ class SyncInvalidationListenerTest : public testing::Test {
void FireInvalidateUnknownVersion(const ObjectId& object_id) {
const AckHandle ack_handle("fakedata_unknown");
fake_invalidation_client_->ClearAckedHandles();
- listener_.InvalidateUnknownVersion(fake_invalidation_client_, object_id,
- ack_handle);
- // Pump message loop to trigger callback from
- // InvalidationStateTracker::GenerateAckHandles().
- message_loop_.RunUntilIdle();
+ listener_.InvalidateUnknownVersion(fake_invalidation_client_,
+ object_id,
+ ack_handle);
EXPECT_TRUE(fake_invalidation_client_->IsAckedHandle(ack_handle));
}
@@ -362,16 +427,18 @@ class SyncInvalidationListenerTest : public testing::Test {
const AckHandle ack_handle("fakedata_all");
fake_invalidation_client_->ClearAckedHandles();
listener_.InvalidateAll(fake_invalidation_client_, ack_handle);
- // Pump message loop to trigger callback from
- // InvalidationStateTracker::GenerateAckHandles().
- message_loop_.RunUntilIdle();
EXPECT_TRUE(fake_invalidation_client_->IsAckedHandle(ack_handle));
}
void WriteState(const std::string& new_state) {
listener_.WriteState(new_state);
+
// Pump message loop to trigger
// InvalidationStateTracker::WriteState().
+ FlushPendingWrites();
+ }
+
+ void FlushPendingWrites() {
message_loop_.RunUntilIdle();
}
@@ -383,29 +450,6 @@ class SyncInvalidationListenerTest : public testing::Test {
fake_push_client_->DisableNotifications(reason);
}
- void VerifyUnacknowledged(const ObjectId& object_id) {
- InvalidationStateMap state_map = fake_tracker_.GetAllInvalidationStates();
- EXPECT_THAT(state_map[object_id].current,
- Not(Eq(state_map[object_id].expected)));
- EXPECT_EQ(listener_.GetStateMapForTest(), state_map);
- }
-
- void VerifyAcknowledged(const ObjectId& object_id) {
- InvalidationStateMap state_map = fake_tracker_.GetAllInvalidationStates();
- EXPECT_THAT(state_map[object_id].current,
- Eq(state_map[object_id].expected));
- EXPECT_EQ(listener_.GetStateMapForTest(), state_map);
- }
-
- void AcknowledgeAndVerify(const ObjectId& object_id) {
- VerifyUnacknowledged(object_id);
- fake_delegate_.Acknowledge(object_id);
- // Pump message loop to trigger
- // InvalidationStateTracker::Acknowledge().
- message_loop_.RunUntilIdle();
- VerifyAcknowledged(object_id);
- }
-
const ObjectId kBookmarksId_;
const ObjectId kPreferencesId_;
const ObjectId kExtensionsId_;
@@ -415,13 +459,14 @@ class SyncInvalidationListenerTest : public testing::Test {
private:
base::MessageLoop message_loop_;
- FakeInvalidationStateTracker fake_tracker_;
notifier::FakePushClient* const fake_push_client_;
protected:
+ // A derrived test needs direct access to this.
+ FakeInvalidationStateTracker fake_tracker_;
+
// Tests need to access these directly.
FakeInvalidationClient* fake_invalidation_client_;
- FakeTickClock tick_clock_;
SyncInvalidationListener listener_;
private:
@@ -445,11 +490,10 @@ TEST_F(SyncInvalidationListenerTest, InvalidateNoPayload) {
FireInvalidate(id, kVersion1, NULL);
- EXPECT_EQ(1, GetInvalidationCount(id));
+ ASSERT_EQ(1U, GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
EXPECT_EQ(kVersion1, GetVersion(id));
EXPECT_EQ("", GetPayload(id));
- EXPECT_EQ(kVersion1, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
}
// Fire an invalidation with an empty payload. It should be
@@ -460,11 +504,10 @@ TEST_F(SyncInvalidationListenerTest, InvalidateEmptyPayload) {
FireInvalidate(id, kVersion1, "");
- EXPECT_EQ(1, GetInvalidationCount(id));
+ ASSERT_EQ(1U, GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
EXPECT_EQ(kVersion1, GetVersion(id));
EXPECT_EQ("", GetPayload(id));
- EXPECT_EQ(kVersion1, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
}
// Fire an invalidation with a payload. It should be processed, and
@@ -474,262 +517,133 @@ TEST_F(SyncInvalidationListenerTest, InvalidateWithPayload) {
FireInvalidate(id, kVersion1, kPayload1);
- EXPECT_EQ(1, GetInvalidationCount(id));
+ ASSERT_EQ(1U, GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
EXPECT_EQ(kVersion1, GetVersion(id));
EXPECT_EQ(kPayload1, GetPayload(id));
- EXPECT_EQ(kVersion1, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
+}
+
+// Fire ten invalidations in a row. All should be received.
+TEST_F(SyncInvalidationListenerTest, ManyInvalidations_NoDrop) {
+ const int kRepeatCount = 10;
+ const ObjectId& id = kPreferencesId_;
+ int64 initial_version = kVersion1;
+ for (int64 i = initial_version; i < initial_version + kRepeatCount; ++i) {
+ FireInvalidate(id, i, kPayload1);
+ }
+ ASSERT_EQ(static_cast<size_t>(kRepeatCount), GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
+ EXPECT_EQ(kPayload1, GetPayload(id));
+ EXPECT_EQ(initial_version + kRepeatCount - 1, GetVersion(id));
}
// Fire an invalidation for an unregistered object ID with a payload. It should
// still be processed, and both the payload and the version should be updated.
-TEST_F(SyncInvalidationListenerTest, InvalidateUnregisteredWithPayload) {
- const ObjectId kUnregisteredId(
- kChromeSyncSourceId, "unregistered");
+TEST_F(SyncInvalidationListenerTest, InvalidateBeforeRegistration_Simple) {
+ const ObjectId kUnregisteredId(kChromeSyncSourceId, "unregistered");
const ObjectId& id = kUnregisteredId;
+ ObjectIdSet ids;
+ ids.insert(id);
- EXPECT_EQ(0, GetInvalidationCount(id));
- EXPECT_EQ("", GetPayload(id));
- EXPECT_EQ(kMinVersion, GetMaxVersion(id));
+ EXPECT_EQ(0U, GetInvalidationCount(id));
+
+ FireInvalidate(id, kVersion1, kPayload1);
- FireInvalidate(id, kVersion1, "unregistered payload");
+ ASSERT_EQ(0U, GetInvalidationCount(id));
+
+ EnableNotifications();
+ listener_.Ready(fake_invalidation_client_);
+ listener_.UpdateRegisteredIds(ids);
- EXPECT_EQ(1, GetInvalidationCount(id));
+ ASSERT_EQ(1U, GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
EXPECT_EQ(kVersion1, GetVersion(id));
- EXPECT_EQ("unregistered payload", GetPayload(id));
- EXPECT_EQ(kVersion1, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
+ EXPECT_EQ(kPayload1, GetPayload(id));
+}
+
+// Fire ten invalidations before an object registers. Some invalidations will
+// be dropped an replaced with an unknown version invalidation.
+TEST_F(SyncInvalidationListenerTest, InvalidateBeforeRegistration_Drop) {
+ const int kRepeatCount =
+ UnackedInvalidationSet::kMaxBufferedInvalidations + 1;
+ const ObjectId kUnregisteredId(kChromeSyncSourceId, "unregistered");
+ const ObjectId& id = kUnregisteredId;
+ ObjectIdSet ids;
+ ids.insert(id);
+
+ EXPECT_EQ(0U, GetInvalidationCount(id));
+
+ int64 initial_version = kVersion1;
+ for (int64 i = initial_version; i < initial_version + kRepeatCount; ++i) {
+ FireInvalidate(id, i, kPayload1);
+ }
+
+ EnableNotifications();
+ listener_.Ready(fake_invalidation_client_);
+ listener_.UpdateRegisteredIds(ids);
+
+ ASSERT_EQ(UnackedInvalidationSet::kMaxBufferedInvalidations,
+ GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
+ EXPECT_EQ(initial_version + kRepeatCount - 1, GetVersion(id));
+ EXPECT_EQ(kPayload1, GetPayload(id));
+ EXPECT_TRUE(StartsWithUnknownVersion(id));
}
-// Fire an invalidation, then fire another one with a lower version.
-// The first one should be processed and should update the payload and
-// version, but the second one shouldn't.
+// Fire an invalidation, then fire another one with a lower version. Both
+// should be received.
TEST_F(SyncInvalidationListenerTest, InvalidateVersion) {
const ObjectId& id = kPreferencesId_;
FireInvalidate(id, kVersion2, kPayload2);
- EXPECT_EQ(1, GetInvalidationCount(id));
+ ASSERT_EQ(1U, GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
EXPECT_EQ(kVersion2, GetVersion(id));
EXPECT_EQ(kPayload2, GetPayload(id));
- EXPECT_EQ(kVersion2, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
FireInvalidate(id, kVersion1, kPayload1);
- EXPECT_EQ(1, GetInvalidationCount(id));
- EXPECT_EQ(kVersion2, GetVersion(id));
- EXPECT_EQ(kPayload2, GetPayload(id));
- EXPECT_EQ(kVersion2, GetMaxVersion(id));
- VerifyAcknowledged(id);
+ ASSERT_EQ(2U, GetInvalidationCount(id));
+ ASSERT_FALSE(IsUnknownVersion(id));
+
+ EXPECT_EQ(kVersion1, GetVersion(id));
+ EXPECT_EQ(kPayload1, GetPayload(id));
}
-// Fire an invalidation with an unknown version twice. It shouldn't
-// update the payload or version either time, but it should still be
-// processed.
+// Fire an invalidation with an unknown version.
TEST_F(SyncInvalidationListenerTest, InvalidateUnknownVersion) {
const ObjectId& id = kBookmarksId_;
FireInvalidateUnknownVersion(id);
- EXPECT_EQ(1, GetInvalidationCount(id));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(id));
- EXPECT_EQ("", GetPayload(id));
- EXPECT_EQ(kMinVersion, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
-
- FireInvalidateUnknownVersion(id);
-
- EXPECT_EQ(2, GetInvalidationCount(id));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(id));
- EXPECT_EQ("", GetPayload(id));
- EXPECT_EQ(kMinVersion, GetMaxVersion(id));
- AcknowledgeAndVerify(id);
+ ASSERT_EQ(1U, GetInvalidationCount(id));
+ EXPECT_TRUE(IsUnknownVersion(id));
}
-// Fire an invalidation for all enabled IDs. It shouldn't update the
-// payload or version, but it should still invalidate the IDs.
+// Fire an invalidation for all enabled IDs.
TEST_F(SyncInvalidationListenerTest, InvalidateAll) {
FireInvalidateAll();
for (ObjectIdSet::const_iterator it = registered_ids_.begin();
it != registered_ids_.end(); ++it) {
- EXPECT_EQ(1, GetInvalidationCount(*it));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(*it));
- EXPECT_EQ("", GetPayload(*it));
- EXPECT_EQ(kMinVersion, GetMaxVersion(*it));
- AcknowledgeAndVerify(*it);
+ ASSERT_EQ(1U, GetInvalidationCount(*it));
+ EXPECT_TRUE(IsUnknownVersion(*it));
}
}
-// Comprehensive test of various scenarios for multiple IDs.
+// Test a simple scenario for multiple IDs.
TEST_F(SyncInvalidationListenerTest, InvalidateMultipleIds) {
FireInvalidate(kBookmarksId_, 3, NULL);
- EXPECT_EQ(1, GetInvalidationCount(kBookmarksId_));
+ ASSERT_EQ(1U, GetInvalidationCount(kBookmarksId_));
+ ASSERT_FALSE(IsUnknownVersion(kBookmarksId_));
EXPECT_EQ(3, GetVersion(kBookmarksId_));
EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
- AcknowledgeAndVerify(kBookmarksId_);
+ // kExtensionId is not registered, so the invalidation should not get through.
FireInvalidate(kExtensionsId_, 2, NULL);
-
- EXPECT_EQ(1, GetInvalidationCount(kExtensionsId_));
- EXPECT_EQ(2, GetVersion(kExtensionsId_));
- EXPECT_EQ("", GetPayload(kExtensionsId_));
- EXPECT_EQ(2, GetMaxVersion(kExtensionsId_));
- AcknowledgeAndVerify(kExtensionsId_);
-
- // Invalidations with lower version numbers should be ignored.
-
- FireInvalidate(kBookmarksId_, 1, NULL);
-
- EXPECT_EQ(1, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
-
- FireInvalidate(kExtensionsId_, 1, NULL);
-
- EXPECT_EQ(1, GetInvalidationCount(kExtensionsId_));
- EXPECT_EQ(2, GetVersion(kExtensionsId_));
- EXPECT_EQ("", GetPayload(kExtensionsId_));
- EXPECT_EQ(2, GetMaxVersion(kExtensionsId_));
-
- // InvalidateAll shouldn't change any version state.
-
- FireInvalidateAll();
-
- EXPECT_EQ(2, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
- AcknowledgeAndVerify(kBookmarksId_);
-
- EXPECT_EQ(1, GetInvalidationCount(kPreferencesId_));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(kPreferencesId_));
- EXPECT_EQ("", GetPayload(kPreferencesId_));
- EXPECT_EQ(kMinVersion, GetMaxVersion(kPreferencesId_));
- AcknowledgeAndVerify(kPreferencesId_);
-
- // Note that kExtensionsId_ is not registered, so InvalidateAll() shouldn't
- // affect it.
- EXPECT_EQ(1, GetInvalidationCount(kExtensionsId_));
- EXPECT_EQ(2, GetVersion(kExtensionsId_));
- EXPECT_EQ("", GetPayload(kExtensionsId_));
- EXPECT_EQ(2, GetMaxVersion(kExtensionsId_));
- VerifyAcknowledged(kExtensionsId_);
-
- // Invalidations with higher version numbers should be processed.
-
- FireInvalidate(kPreferencesId_, 5, NULL);
- EXPECT_EQ(2, GetInvalidationCount(kPreferencesId_));
- EXPECT_EQ(5, GetVersion(kPreferencesId_));
- EXPECT_EQ("", GetPayload(kPreferencesId_));
- EXPECT_EQ(5, GetMaxVersion(kPreferencesId_));
- AcknowledgeAndVerify(kPreferencesId_);
-
- FireInvalidate(kExtensionsId_, 3, NULL);
- EXPECT_EQ(2, GetInvalidationCount(kExtensionsId_));
- EXPECT_EQ(3, GetVersion(kExtensionsId_));
- EXPECT_EQ("", GetPayload(kExtensionsId_));
- EXPECT_EQ(3, GetMaxVersion(kExtensionsId_));
- AcknowledgeAndVerify(kExtensionsId_);
-
- FireInvalidate(kBookmarksId_, 4, NULL);
- EXPECT_EQ(3, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(4, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(4, GetMaxVersion(kBookmarksId_));
- AcknowledgeAndVerify(kBookmarksId_);
-}
-
-// Various tests for the local invalidation feature.
-// Tests a "normal" scenario. We allow one timeout period to expire by sending
-// ack handles that are not the "latest" ack handle. Once the timeout expires,
-// we verify that we get a second callback and then acknowledge it. Once
-// acknowledged, no further timeouts should occur.
-TEST_F(SyncInvalidationListenerTest, InvalidateOneTimeout) {
- listener_.GetAckTrackerForTest()->SetCreateBackoffEntryCallbackForTest(
- base::Bind(&CreateMockEntry, &tick_clock_));
-
- // Trigger the initial invalidation.
- FireInvalidate(kBookmarksId_, 3, NULL);
- EXPECT_EQ(1, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
- VerifyUnacknowledged(kBookmarksId_);
-
- // Trigger one timeout.
- tick_clock_.LeapForward(60);
- EXPECT_TRUE(listener_.GetAckTrackerForTest()->TriggerTimeoutAtForTest(
- tick_clock_.NowTicks()));
- EXPECT_EQ(2, GetInvalidationCount(kBookmarksId_));
- // Other properties should remain the same.
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
-
- AcknowledgeAndVerify(kBookmarksId_);
-
- // No more invalidations should remain in the queue.
- EXPECT_TRUE(listener_.GetAckTrackerForTest()->IsQueueEmptyForTest());
-}
-
-// Test that an unacknowledged invalidation triggers reminders if the listener
-// is restarted.
-TEST_F(SyncInvalidationListenerTest, InvalidationTimeoutRestart) {
- listener_.GetAckTrackerForTest()->SetCreateBackoffEntryCallbackForTest(
- base::Bind(&CreateMockEntry, &tick_clock_));
-
- FireInvalidate(kBookmarksId_, 3, NULL);
- EXPECT_EQ(1, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
-
- // Trigger one timeout.
- tick_clock_.LeapForward(60);
- EXPECT_TRUE(listener_.GetAckTrackerForTest()->TriggerTimeoutAtForTest(
- tick_clock_.NowTicks()));
- EXPECT_EQ(2, GetInvalidationCount(kBookmarksId_));
- // Other properties should remain the same.
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
-
- // Restarting the client should reset the retry count and the timeout period
- // (e.g. it shouldn't increase to 120 seconds). Skip ahead 1200 seconds to be
- // on the safe side.
- StopClient();
- tick_clock_.LeapForward(1200);
- StartClient();
-
- // The bookmark invalidation state should not have changed.
- EXPECT_EQ(2, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
-
- // Now trigger the invalidation reminder after the client restarts.
- tick_clock_.LeapForward(60);
- EXPECT_TRUE(listener_.GetAckTrackerForTest()->TriggerTimeoutAtForTest(
- tick_clock_.NowTicks()));
- EXPECT_EQ(3, GetInvalidationCount(kBookmarksId_));
- // Other properties should remain the same.
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
- EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
-
- AcknowledgeAndVerify(kBookmarksId_);
-
- // No more invalidations should remain in the queue.
- EXPECT_TRUE(listener_.GetAckTrackerForTest()->IsQueueEmptyForTest());
-
- // The queue should remain empty when we restart now.
- RestartClient();
- EXPECT_TRUE(listener_.GetAckTrackerForTest()->IsQueueEmptyForTest());
+ ASSERT_EQ(0U, GetInvalidationCount(kExtensionsId_));
}
// Registration tests.
@@ -871,22 +785,183 @@ TEST_F(SyncInvalidationListenerTest, RegisterTypesPreserved) {
// Make sure that state is correctly purged from the local invalidation state
// map cache when an ID is unregistered.
TEST_F(SyncInvalidationListenerTest, UnregisterCleansUpStateMapCache) {
+ const ObjectId& id = kBookmarksId_;
listener_.Ready(fake_invalidation_client_);
- EXPECT_TRUE(listener_.GetStateMapForTest().empty());
- FireInvalidate(kBookmarksId_, 1, "hello");
- EXPECT_EQ(1U, listener_.GetStateMapForTest().size());
- EXPECT_TRUE(ContainsKey(listener_.GetStateMapForTest(), kBookmarksId_));
+ EXPECT_TRUE(GetSavedInvalidations().empty());
+ FireInvalidate(id, 1, "hello");
+ EXPECT_EQ(1U, GetSavedInvalidations().size());
+ EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), id));
FireInvalidate(kPreferencesId_, 2, "world");
- EXPECT_EQ(2U, listener_.GetStateMapForTest().size());
- EXPECT_TRUE(ContainsKey(listener_.GetStateMapForTest(), kBookmarksId_));
- EXPECT_TRUE(ContainsKey(listener_.GetStateMapForTest(), kPreferencesId_));
+ EXPECT_EQ(2U, GetSavedInvalidations().size());
+
+ EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), id));
+ EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), kPreferencesId_));
ObjectIdSet ids;
- ids.insert(kBookmarksId_);
+ ids.insert(id);
listener_.UpdateRegisteredIds(ids);
- EXPECT_EQ(1U, listener_.GetStateMapForTest().size());
- EXPECT_TRUE(ContainsKey(listener_.GetStateMapForTest(), kBookmarksId_));
+ EXPECT_EQ(1U, GetSavedInvalidations().size());
+ EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), id));
+}
+
+TEST_F(SyncInvalidationListenerTest, DuplicateInvaldiations_Simple) {
+ const ObjectId& id = kBookmarksId_;
+ listener_.Ready(fake_invalidation_client_);
+
+ // Send a stream of invalidations, including two copies of the second.
+ FireInvalidate(id, 1, "one");
+ FireInvalidate(id, 2, "two");
+ FireInvalidate(id, 3, "three");
+ FireInvalidate(id, 2, "two");
+
+ // Expect that the duplicate was discarded.
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ EXPECT_EQ(3U, list.GetSize());
+ SingleObjectInvalidationSet::const_iterator it = list.begin();
+ EXPECT_EQ(1, it->version());
+ it++;
+ EXPECT_EQ(2, it->version());
+ it++;
+ EXPECT_EQ(3, it->version());
+}
+
+TEST_F(SyncInvalidationListenerTest, DuplicateInvalidations_NearBufferLimit) {
+ const size_t kPairsToSend = UnackedInvalidationSet::kMaxBufferedInvalidations;
+ const ObjectId& id = kBookmarksId_;
+ listener_.Ready(fake_invalidation_client_);
+
+ // We will have enough buffer space in the state tracker for all these
+ // invalidations only if duplicates are ignored.
+ for (size_t i = 0; i < kPairsToSend; ++i) {
+ FireInvalidate(id, i, "payload");
+ FireInvalidate(id, i, "payload");
+ }
+
+ // Expect that the state map ignored duplicates.
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ EXPECT_EQ(kPairsToSend, list.GetSize());
+ EXPECT_FALSE(list.begin()->is_unknown_version());
+
+ // Expect that all invalidations (including duplicates) were emitted.
+ EXPECT_EQ(kPairsToSend*2, GetInvalidationCount(id));
+
+ // Acknowledge all invalidations to clear the internal state.
+ AcknowledgeAll(id);
+ EXPECT_TRUE(GetSavedInvalidationsForType(id).IsEmpty());
+}
+
+TEST_F(SyncInvalidationListenerTest, DuplicateInvalidations_UnknownVersion) {
+ const ObjectId& id = kBookmarksId_;
+ listener_.Ready(fake_invalidation_client_);
+
+ FireInvalidateUnknownVersion(id);
+ FireInvalidateUnknownVersion(id);
+
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ EXPECT_EQ(1U, list.GetSize());
+ }
+
+ // Acknowledge the second. There should be no effect on the stored list.
+ ASSERT_EQ(2U, GetInvalidationCount(id));
+ AcknowledgeNthInvalidation(id, 1);
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ EXPECT_EQ(1U, list.GetSize());
+ }
+
+ // Acknowledge the first. This should remove the invalidation from the list.
+ ASSERT_EQ(2U, GetInvalidationCount(id));
+ AcknowledgeNthInvalidation(id, 0);
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ EXPECT_EQ(0U, list.GetSize());
+ }
+}
+
+// Make sure that acknowledgements erase items from the local store.
+TEST_F(SyncInvalidationListenerTest, AcknowledgementsCleanUpStateMapCache) {
+ const ObjectId& id = kBookmarksId_;
+ listener_.Ready(fake_invalidation_client_);
+
+ EXPECT_TRUE(GetSavedInvalidations().empty());
+ FireInvalidate(id, 10, "hello");
+ FireInvalidate(id, 20, "world");
+ FireInvalidateUnknownVersion(id);
+
+ // Expect that all three invalidations have been saved to permanent storage.
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ ASSERT_EQ(3U, list.GetSize());
+ EXPECT_TRUE(list.begin()->is_unknown_version());
+ EXPECT_EQ(20, list.back().version());
+ }
+
+ // Acknowledge the second sent invaldiation (version 20) and verify it was
+ // removed from storage.
+ AcknowledgeNthInvalidation(id, 1);
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ ASSERT_EQ(2U, list.GetSize());
+ EXPECT_TRUE(list.begin()->is_unknown_version());
+ EXPECT_EQ(10, list.back().version());
+ }
+
+ // Acknowledge the last sent invalidation (unknown version) and verify it was
+ // removed from storage.
+ AcknowledgeNthInvalidation(id, 2);
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ ASSERT_EQ(1U, list.GetSize());
+ EXPECT_FALSE(list.begin()->is_unknown_version());
+ EXPECT_EQ(10, list.back().version());
+ }
+}
+
+// Make sure that drops erase items from the local store.
+TEST_F(SyncInvalidationListenerTest, DropsCleanUpStateMapCache) {
+ const ObjectId& id = kBookmarksId_;
+ listener_.Ready(fake_invalidation_client_);
+
+ EXPECT_TRUE(GetSavedInvalidations().empty());
+ FireInvalidate(id, 10, "hello");
+ FireInvalidate(id, 20, "world");
+ FireInvalidateUnknownVersion(id);
+
+ // Expect that all three invalidations have been saved to permanent storage.
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ ASSERT_EQ(3U, list.GetSize());
+ EXPECT_TRUE(list.begin()->is_unknown_version());
+ EXPECT_EQ(20, list.back().version());
+ }
+
+ // Drop the second sent invalidation (version 20) and verify it was removed
+ // from storage. Also verify we still have an unknown version invalidation.
+ DropNthInvalidation(id, 1);
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ ASSERT_EQ(2U, list.GetSize());
+ EXPECT_TRUE(list.begin()->is_unknown_version());
+ EXPECT_EQ(10, list.back().version());
+ }
+
+ // Drop the remaining invalidation. Verify an unknown version is all that
+ // remains.
+ DropNthInvalidation(id, 0);
+ {
+ SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
+ ASSERT_EQ(1U, list.GetSize());
+ EXPECT_TRUE(list.begin()->is_unknown_version());
+ }
+
+ // Announce that the delegate has recovered from the drop. Verify no
+ // invalidations remain saved.
+ RecoverFromDropEvent(id);
+ EXPECT_TRUE(GetSavedInvalidationsForType(id).IsEmpty());
+
+ RecoverFromDropEvent(id);
}
// Without readying the client, disable notifications, then enable
@@ -993,6 +1068,60 @@ TEST_F(SyncInvalidationListenerTest, InvalidationClientAuthError) {
EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
}
+// A variant of SyncInvalidationListenerTest that starts with some initial
+// state. We make not attempt to abstract away the contents of this state. The
+// tests that make use of this harness depend on its implementation details.
+class SyncInvalidationListenerTest_WithInitialState
+ : public SyncInvalidationListenerTest {
+ public:
+ virtual void SetUp() {
+ UnackedInvalidationSet bm_state(kBookmarksId_);
+ UnackedInvalidationSet ext_state(kExtensionsId_);
+
+ Invalidation bm_unknown = Invalidation::InitUnknownVersion(kBookmarksId_);
+ Invalidation bm_v100 = Invalidation::Init(kBookmarksId_, 100, "hundred");
+ bm_state.Add(bm_unknown);
+ bm_state.Add(bm_v100);
+
+ Invalidation ext_v10 = Invalidation::Init(kExtensionsId_, 10, "ten");
+ Invalidation ext_v20 = Invalidation::Init(kExtensionsId_, 20, "twenty");
+ ext_state.Add(ext_v10);
+ ext_state.Add(ext_v20);
+
+ initial_state.insert(std::make_pair(kBookmarksId_, bm_state));
+ initial_state.insert(std::make_pair(kExtensionsId_, ext_state));
+
+ fake_tracker_.SetSavedInvalidations(initial_state);
+
+ SyncInvalidationListenerTest::SetUp();
+ }
+
+ UnackedInvalidationsMap initial_state;
+};
+
+// Verify that saved invalidations are forwarded when handlers register.
+TEST_F(SyncInvalidationListenerTest_WithInitialState,
+ ReceiveSavedInvalidations) {
+ EnableNotifications();
+ listener_.Ready(fake_invalidation_client_);
+
+ EXPECT_THAT(initial_state, test_util::Eq(GetSavedInvalidations()));
+
+ ASSERT_EQ(2U, GetInvalidationCount(kBookmarksId_));
+ EXPECT_EQ(100, GetVersion(kBookmarksId_));
+
+ ASSERT_EQ(0U, GetInvalidationCount(kExtensionsId_));
+
+ FireInvalidate(kExtensionsId_, 30, "thirty");
+
+ ObjectIdSet ids = GetRegisteredIds();
+ ids.insert(kExtensionsId_);
+ listener_.UpdateRegisteredIds(ids);
+
+ ASSERT_EQ(3U, GetInvalidationCount(kExtensionsId_));
+ EXPECT_EQ(30, GetVersion(kExtensionsId_));
+}
+
} // namespace
} // namespace syncer
diff --git a/chromium/sync/notifier/sync_system_resources.cc b/chromium/sync/notifier/sync_system_resources.cc
index be50a82212d..99e3ee5ea5d 100644
--- a/chromium/sync/notifier/sync_system_resources.cc
+++ b/chromium/sync/notifier/sync_system_resources.cc
@@ -14,9 +14,9 @@
#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
+#include "google/cacheinvalidation/client_gateway.pb.h"
#include "google/cacheinvalidation/deps/callback.h"
#include "google/cacheinvalidation/include/types.h"
-#include "jingle/notifier/listener/push_client.h"
#include "sync/notifier/invalidation_util.h"
namespace syncer {
@@ -61,10 +61,10 @@ void SyncLogger::SetSystemResources(invalidation::SystemResources* resources) {
}
SyncInvalidationScheduler::SyncInvalidationScheduler()
- : weak_factory_(this),
- created_on_loop_(base::MessageLoop::current()),
+ : created_on_loop_(base::MessageLoop::current()),
is_started_(false),
- is_stopped_(false) {
+ is_stopped_(false),
+ weak_factory_(this) {
CHECK(created_on_loop_);
}
@@ -128,6 +128,134 @@ void SyncInvalidationScheduler::RunPostedTask(invalidation::Closure* task) {
delete task;
}
+SyncNetworkChannel::SyncNetworkChannel()
+ : invalidator_state_(DEFAULT_INVALIDATION_ERROR),
+ scheduling_hash_(0) {
+}
+
+SyncNetworkChannel::~SyncNetworkChannel() {
+ STLDeleteElements(&network_status_receivers_);
+}
+
+void SyncNetworkChannel::SendMessage(const std::string& outgoing_message) {
+ std::string encoded_message;
+ EncodeMessage(&encoded_message, outgoing_message, service_context_,
+ scheduling_hash_);
+ SendEncodedMessage(encoded_message);
+}
+
+void SyncNetworkChannel::SetMessageReceiver(
+ invalidation::MessageCallback* incoming_receiver) {
+ incoming_receiver_.reset(incoming_receiver);
+}
+
+void SyncNetworkChannel::AddNetworkStatusReceiver(
+ invalidation::NetworkStatusCallback* network_status_receiver) {
+ network_status_receiver->Run(invalidator_state_ == INVALIDATIONS_ENABLED);
+ network_status_receivers_.push_back(network_status_receiver);
+}
+
+void SyncNetworkChannel::SetSystemResources(
+ invalidation::SystemResources* resources) {
+ // Do nothing.
+}
+
+void SyncNetworkChannel::AddObserver(Observer* observer) {
+ observers_.AddObserver(observer);
+}
+
+void SyncNetworkChannel::RemoveObserver(Observer* observer) {
+ observers_.RemoveObserver(observer);
+}
+
+const std::string& SyncNetworkChannel::GetServiceContextForTest() const {
+ return service_context_;
+}
+
+int64 SyncNetworkChannel::GetSchedulingHashForTest() const {
+ return scheduling_hash_;
+}
+
+std::string SyncNetworkChannel::EncodeMessageForTest(
+ const std::string& message, const std::string& service_context,
+ int64 scheduling_hash) {
+ std::string encoded_message;
+ EncodeMessage(&encoded_message, message, service_context, scheduling_hash);
+ return encoded_message;
+}
+
+bool SyncNetworkChannel::DecodeMessageForTest(
+ const std::string& data,
+ std::string* message,
+ std::string* service_context,
+ int64* scheduling_hash) {
+ return DecodeMessage(data, message, service_context, scheduling_hash);
+}
+
+void SyncNetworkChannel::NotifyStateChange(InvalidatorState invalidator_state) {
+ // Remember state for future NetworkStatusReceivers.
+ invalidator_state_ = invalidator_state;
+ // Notify NetworkStatusReceivers in cacheinvalidation.
+ for (NetworkStatusReceiverList::const_iterator it =
+ network_status_receivers_.begin();
+ it != network_status_receivers_.end(); ++it) {
+ (*it)->Run(invalidator_state_ == INVALIDATIONS_ENABLED);
+ }
+ // Notify observers.
+ FOR_EACH_OBSERVER(Observer, observers_,
+ OnNetworkChannelStateChanged(invalidator_state_));
+}
+
+void SyncNetworkChannel::DeliverIncomingMessage(const std::string& data) {
+ if (!incoming_receiver_) {
+ DLOG(ERROR) << "No receiver for incoming notification";
+ return;
+ }
+ std::string message;
+ if (!DecodeMessage(data,
+ &message, &service_context_, &scheduling_hash_)) {
+ DLOG(ERROR) << "Could not parse ClientGatewayMessage";
+ return;
+ }
+ incoming_receiver_->Run(message);
+}
+
+void SyncNetworkChannel::EncodeMessage(
+ std::string* encoded_message,
+ const std::string& message,
+ const std::string& service_context,
+ int64 scheduling_hash) {
+ ipc::invalidation::ClientGatewayMessage envelope;
+ envelope.set_is_client_to_server(true);
+ if (!service_context.empty()) {
+ envelope.set_service_context(service_context);
+ envelope.set_rpc_scheduling_hash(scheduling_hash);
+ }
+ envelope.set_network_message(message);
+ envelope.SerializeToString(encoded_message);
+}
+
+
+bool SyncNetworkChannel::DecodeMessage(
+ const std::string& data,
+ std::string* message,
+ std::string* service_context,
+ int64* scheduling_hash) {
+ ipc::invalidation::ClientGatewayMessage envelope;
+ if (!envelope.ParseFromString(data)) {
+ return false;
+ }
+ *message = envelope.network_message();
+ if (envelope.has_service_context()) {
+ *service_context = envelope.service_context();
+ }
+ if (envelope.has_rpc_scheduling_hash()) {
+ *scheduling_hash = envelope.rpc_scheduling_hash();
+ }
+ return true;
+}
+
+
SyncStorage::SyncStorage(StateWriter* state_writer,
invalidation::Scheduler* scheduler)
: state_writer_(state_writer),
@@ -195,14 +323,14 @@ void SyncStorage::RunAndDeleteReadKeyCallback(
}
SyncSystemResources::SyncSystemResources(
- scoped_ptr<notifier::PushClient> push_client,
+ SyncNetworkChannel* sync_network_channel,
StateWriter* state_writer)
: is_started_(false),
logger_(new SyncLogger()),
internal_scheduler_(new SyncInvalidationScheduler()),
listener_scheduler_(new SyncInvalidationScheduler()),
storage_(new SyncStorage(state_writer, internal_scheduler_.get())),
- push_client_channel_(push_client.Pass()) {
+ sync_network_channel_(sync_network_channel) {
}
SyncSystemResources::~SyncSystemResources() {
@@ -240,8 +368,8 @@ SyncStorage* SyncSystemResources::storage() {
return storage_.get();
}
-PushClientChannel* SyncSystemResources::network() {
- return &push_client_channel_;
+SyncNetworkChannel* SyncSystemResources::network() {
+ return sync_network_channel_;
}
SyncInvalidationScheduler* SyncSystemResources::internal_scheduler() {
diff --git a/chromium/sync/notifier/sync_system_resources.h b/chromium/sync/notifier/sync_system_resources.h
index e333bb24f94..3ddc7087c94 100644
--- a/chromium/sync/notifier/sync_system_resources.h
+++ b/chromium/sync/notifier/sync_system_resources.h
@@ -20,13 +20,9 @@
#include "base/threading/non_thread_safe.h"
#include "google/cacheinvalidation/include/system-resources.h"
#include "sync/base/sync_export.h"
-#include "sync/notifier/push_client_channel.h"
+#include "sync/notifier/invalidator_state.h"
#include "sync/notifier/state_writer.h"
-namespace notifier {
-class PushClient;
-} // namespace notifier
-
namespace syncer {
class SyncLogger : public invalidation::Logger {
@@ -65,7 +61,9 @@ class SyncInvalidationScheduler : public invalidation::Scheduler {
invalidation::SystemResources* resources) OVERRIDE;
private:
- base::WeakPtrFactory<SyncInvalidationScheduler> weak_factory_;
+ // Runs the task, deletes it, and removes it from |posted_tasks_|.
+ void RunPostedTask(invalidation::Closure* task);
+
// Holds all posted tasks that have not yet been run.
std::set<invalidation::Closure*> posted_tasks_;
@@ -73,8 +71,100 @@ class SyncInvalidationScheduler : public invalidation::Scheduler {
bool is_started_;
bool is_stopped_;
- // Runs the task, deletes it, and removes it from |posted_tasks_|.
- void RunPostedTask(invalidation::Closure* task);
+ base::WeakPtrFactory<SyncInvalidationScheduler> weak_factory_;
+};
+
+// SyncNetworkChannel implements common tasks needed to interact with
+// invalidation library:
+// - registering message and network status callbacks
+// - Encoding/Decoding message to ClientGatewayMessage
+// - notifying observers about network channel state change
+// Implementation of particular network protocol should implement
+// SendEncodedMessage and call NotifyStateChange and DeliverIncomingMessage.
+class SYNC_EXPORT_PRIVATE SyncNetworkChannel
+ : public NON_EXPORTED_BASE(invalidation::NetworkChannel) {
+ public:
+ class Observer {
+ public:
+ // Called when network channel state changes. Possible states are:
+ // - INVALIDATIONS_ENABLED : connection is established and working
+ // - TRANSIENT_INVALIDATION_ERROR : no network, connection lost, etc.
+ // - INVALIDATION_CREDENTIALS_REJECTED : Issues with auth token
+ virtual void OnNetworkChannelStateChanged(
+ InvalidatorState invalidator_state) = 0;
+ };
+
+ SyncNetworkChannel();
+
+ virtual ~SyncNetworkChannel();
+
+ // invalidation::NetworkChannel implementation.
+ virtual void SendMessage(const std::string& outgoing_message) OVERRIDE;
+ virtual void SetMessageReceiver(
+ invalidation::MessageCallback* incoming_receiver) OVERRIDE;
+ virtual void AddNetworkStatusReceiver(
+ invalidation::NetworkStatusCallback* network_status_receiver) OVERRIDE;
+ virtual void SetSystemResources(
+ invalidation::SystemResources* resources) OVERRIDE;
+
+ // Subclass should implement SendEncodedMessage to send encoded message to
+ // Tango over network.
+ virtual void SendEncodedMessage(const std::string& encoded_message) = 0;
+
+ // Classes interested in network channel state changes should implement
+ // SyncNetworkChannel::Observer and register here.
+ void AddObserver(Observer* observer);
+ void RemoveObserver(Observer* observer);
+
+ const std::string& GetServiceContextForTest() const;
+
+ int64 GetSchedulingHashForTest() const;
+
+ static std::string EncodeMessageForTest(
+ const std::string& message,
+ const std::string& service_context,
+ int64 scheduling_hash);
+
+ static bool DecodeMessageForTest(
+ const std::string& notification,
+ std::string* message,
+ std::string* service_context,
+ int64* scheduling_hash);
+
+ protected:
+ // Subclass should notify about connection state through NotifyStateChange.
+ void NotifyStateChange(InvalidatorState invalidator_state);
+ // Subclass should call DeliverIncomingMessage for message to reach
+ // invalidations library.
+ void DeliverIncomingMessage(const std::string& message);
+
+ private:
+ typedef std::vector<invalidation::NetworkStatusCallback*>
+ NetworkStatusReceiverList;
+
+ static void EncodeMessage(
+ std::string* encoded_message,
+ const std::string& message,
+ const std::string& service_context,
+ int64 scheduling_hash);
+
+ static bool DecodeMessage(
+ const std::string& data,
+ std::string* message,
+ std::string* service_context,
+ int64* scheduling_hash);
+
+ // Callbacks into invalidation library
+ scoped_ptr<invalidation::MessageCallback> incoming_receiver_;
+ NetworkStatusReceiverList network_status_receivers_;
+
+ // Last channel state for new network status receivers.
+ InvalidatorState invalidator_state_;
+
+ ObserverList<Observer> observers_;
+
+ std::string service_context_;
+ int64 scheduling_hash_;
};
class SyncStorage : public invalidation::Storage {
@@ -120,7 +210,7 @@ class SyncStorage : public invalidation::Storage {
class SYNC_EXPORT_PRIVATE SyncSystemResources
: public NON_EXPORTED_BASE(invalidation::SystemResources) {
public:
- SyncSystemResources(scoped_ptr<notifier::PushClient> push_client,
+ SyncSystemResources(SyncNetworkChannel* sync_network_channel,
StateWriter* state_writer);
virtual ~SyncSystemResources();
@@ -133,7 +223,7 @@ class SYNC_EXPORT_PRIVATE SyncSystemResources
virtual std::string platform() const OVERRIDE;
virtual SyncLogger* logger() OVERRIDE;
virtual SyncStorage* storage() OVERRIDE;
- virtual PushClientChannel* network() OVERRIDE;
+ virtual SyncNetworkChannel* network() OVERRIDE;
virtual SyncInvalidationScheduler* internal_scheduler() OVERRIDE;
virtual SyncInvalidationScheduler* listener_scheduler() OVERRIDE;
@@ -144,7 +234,8 @@ class SYNC_EXPORT_PRIVATE SyncSystemResources
scoped_ptr<SyncInvalidationScheduler> internal_scheduler_;
scoped_ptr<SyncInvalidationScheduler> listener_scheduler_;
scoped_ptr<SyncStorage> storage_;
- PushClientChannel push_client_channel_;
+ // sync_network_channel_ is owned by SyncInvalidationListener.
+ SyncNetworkChannel* sync_network_channel_;
};
} // namespace syncer
diff --git a/chromium/sync/notifier/sync_system_resources_unittest.cc b/chromium/sync/notifier/sync_system_resources_unittest.cc
index e8ee38c5c3e..f63e90621a0 100644
--- a/chromium/sync/notifier/sync_system_resources_unittest.cc
+++ b/chromium/sync/notifier/sync_system_resources_unittest.cc
@@ -13,6 +13,7 @@
#include "google/cacheinvalidation/include/types.h"
#include "jingle/notifier/listener/fake_push_client.h"
+#include "sync/notifier/push_client_channel.h"
#include "sync/notifier/state_writer.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -49,9 +50,9 @@ class MockStorageCallback {
class SyncSystemResourcesTest : public testing::Test {
protected:
SyncSystemResourcesTest()
- : sync_system_resources_(
- scoped_ptr<notifier::PushClient>(new notifier::FakePushClient()),
- &mock_state_writer_) {}
+ : push_client_channel_(
+ scoped_ptr<notifier::PushClient>(new notifier::FakePushClient())),
+ sync_system_resources_(&push_client_channel_, &mock_state_writer_) {}
virtual ~SyncSystemResourcesTest() {}
@@ -85,6 +86,7 @@ class SyncSystemResourcesTest : public testing::Test {
// Needed by |sync_system_resources_|.
base::MessageLoop message_loop_;
MockStateWriter mock_state_writer_;
+ PushClientChannel push_client_channel_;
SyncSystemResources sync_system_resources_;
private:
@@ -174,5 +176,228 @@ TEST_F(SyncSystemResourcesTest, WriteState) {
results);
}
+class TestSyncNetworkChannel : public SyncNetworkChannel {
+ public:
+ TestSyncNetworkChannel() {}
+ virtual ~TestSyncNetworkChannel() {}
+
+ using SyncNetworkChannel::NotifyStateChange;
+ using SyncNetworkChannel::DeliverIncomingMessage;
+
+ virtual void SendEncodedMessage(const std::string& encoded_message) OVERRIDE {
+ last_encoded_message_ = encoded_message;
+ }
+
+ std::string last_encoded_message_;
+};
+
+class SyncNetworkChannelTest
+ : public testing::Test,
+ public SyncNetworkChannel::Observer {
+ protected:
+ SyncNetworkChannelTest()
+ : last_invalidator_state_(DEFAULT_INVALIDATION_ERROR),
+ connected_(false) {
+ network_channel_.AddObserver(this);
+ network_channel_.SetMessageReceiver(
+ invalidation::NewPermanentCallback(
+ this, &SyncNetworkChannelTest::OnIncomingMessage));
+ network_channel_.AddNetworkStatusReceiver(
+ invalidation::NewPermanentCallback(
+ this, &SyncNetworkChannelTest::OnNetworkStatusChange));
+ }
+
+ virtual ~SyncNetworkChannelTest() {
+ network_channel_.RemoveObserver(this);
+ }
+
+ virtual void OnNetworkChannelStateChanged(
+ InvalidatorState invalidator_state) OVERRIDE {
+ last_invalidator_state_ = invalidator_state;
+ }
+
+ void OnIncomingMessage(std::string incoming_message) {
+ last_message_ = incoming_message;
+ }
+
+ void OnNetworkStatusChange(bool connected) {
+ connected_ = connected;
+ }
+
+ TestSyncNetworkChannel network_channel_;
+ InvalidatorState last_invalidator_state_;
+ std::string last_message_;
+ bool connected_;
+};
+
+const char kMessage[] = "message";
+const char kServiceContext[] = "service context";
+const int64 kSchedulingHash = 100;
+
+// Encode a message with some context and then decode it. The decoded info
+// should match the original info.
+TEST_F(SyncNetworkChannelTest, EncodeDecode) {
+ const std::string& data =
+ SyncNetworkChannel::EncodeMessageForTest(
+ kMessage, kServiceContext, kSchedulingHash);
+ std::string message;
+ std::string service_context;
+ int64 scheduling_hash = 0LL;
+ EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
+ data, &message, &service_context, &scheduling_hash));
+ EXPECT_EQ(kMessage, message);
+ EXPECT_EQ(kServiceContext, service_context);
+ EXPECT_EQ(kSchedulingHash, scheduling_hash);
+}
+
+// Encode a message with no context and then decode it. The decoded message
+// should match the original message, but the context and hash should be
+// untouched.
+TEST_F(SyncNetworkChannelTest, EncodeDecodeNoContext) {
+ const std::string& data =
+ SyncNetworkChannel::EncodeMessageForTest(
+ kMessage, std::string(), kSchedulingHash);
+ std::string message;
+ std::string service_context = kServiceContext;
+ int64 scheduling_hash = kSchedulingHash + 1;
+ EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
+ data, &message, &service_context, &scheduling_hash));
+ EXPECT_EQ(kMessage, message);
+ EXPECT_EQ(kServiceContext, service_context);
+ EXPECT_EQ(kSchedulingHash + 1, scheduling_hash);
+}
+
+// Decode an empty notification. It should result in an empty message
+// but should leave the context and hash untouched.
+TEST_F(SyncNetworkChannelTest, DecodeEmpty) {
+ std::string message = kMessage;
+ std::string service_context = kServiceContext;
+ int64 scheduling_hash = kSchedulingHash;
+ EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
+ std::string(), &message, &service_context, &scheduling_hash));
+ EXPECT_TRUE(message.empty());
+ EXPECT_EQ(kServiceContext, service_context);
+ EXPECT_EQ(kSchedulingHash, scheduling_hash);
+}
+
+// Try to decode a garbage notification. It should leave all its
+// arguments untouched and return false.
+TEST_F(SyncNetworkChannelTest, DecodeGarbage) {
+ std::string data = "garbage";
+ std::string message = kMessage;
+ std::string service_context = kServiceContext;
+ int64 scheduling_hash = kSchedulingHash;
+ EXPECT_FALSE(SyncNetworkChannel::DecodeMessageForTest(
+ data, &message, &service_context, &scheduling_hash));
+ EXPECT_EQ(kMessage, message);
+ EXPECT_EQ(kServiceContext, service_context);
+ EXPECT_EQ(kSchedulingHash, scheduling_hash);
+}
+
+// Simulate network channel state change. It should propagate to observer.
+TEST_F(SyncNetworkChannelTest, OnNetworkChannelStateChanged) {
+ EXPECT_EQ(DEFAULT_INVALIDATION_ERROR, last_invalidator_state_);
+ EXPECT_FALSE(connected_);
+ network_channel_.NotifyStateChange(INVALIDATIONS_ENABLED);
+ EXPECT_EQ(INVALIDATIONS_ENABLED, last_invalidator_state_);
+ EXPECT_TRUE(connected_);
+ network_channel_.NotifyStateChange(INVALIDATION_CREDENTIALS_REJECTED);
+ EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, last_invalidator_state_);
+ EXPECT_FALSE(connected_);
+}
+
+// Call SendMessage on the channel. SendEncodedMessage should be called for it.
+TEST_F(SyncNetworkChannelTest, SendMessage) {
+ network_channel_.SendMessage(kMessage);
+ std::string expected_encoded_message =
+ SyncNetworkChannel::EncodeMessageForTest(
+ kMessage,
+ network_channel_.GetServiceContextForTest(),
+ network_channel_.GetSchedulingHashForTest());
+ ASSERT_EQ(expected_encoded_message, network_channel_.last_encoded_message_);
+}
+
+// Simulate an incoming notification. It should be decoded properly
+// by the channel.
+TEST_F(SyncNetworkChannelTest, OnIncomingMessage) {
+ const std::string message =
+ SyncNetworkChannel::EncodeMessageForTest(
+ kMessage, kServiceContext, kSchedulingHash);
+
+ network_channel_.DeliverIncomingMessage(message);
+ EXPECT_EQ(kServiceContext,
+ network_channel_.GetServiceContextForTest());
+ EXPECT_EQ(kSchedulingHash,
+ network_channel_.GetSchedulingHashForTest());
+ EXPECT_EQ(kMessage, last_message_);
+}
+
+// Simulate an incoming notification with no receiver. It should be dropped by
+// the channel.
+TEST_F(SyncNetworkChannelTest, OnIncomingMessageNoReceiver) {
+ const std::string message =
+ SyncNetworkChannel::EncodeMessageForTest(
+ kMessage, kServiceContext, kSchedulingHash);
+
+ network_channel_.SetMessageReceiver(NULL);
+ network_channel_.DeliverIncomingMessage(message);
+ EXPECT_TRUE(network_channel_.GetServiceContextForTest().empty());
+ EXPECT_EQ(static_cast<int64>(0),
+ network_channel_.GetSchedulingHashForTest());
+ EXPECT_TRUE(last_message_.empty());
+}
+
+// Simulate an incoming garbage notification. It should be dropped by
+// the channel.
+TEST_F(SyncNetworkChannelTest, OnIncomingMessageGarbage) {
+ std::string message = "garbage";
+
+ network_channel_.DeliverIncomingMessage(message);
+ EXPECT_TRUE(network_channel_.GetServiceContextForTest().empty());
+ EXPECT_EQ(static_cast<int64>(0),
+ network_channel_.GetSchedulingHashForTest());
+ EXPECT_TRUE(last_message_.empty());
+}
+
+// Send a message, simulate an incoming message with context, and then
+// send the same message again. The first sent message should not
+// have any context, but the second sent message should have the
+// context from the incoming emssage.
+TEST_F(SyncNetworkChannelTest, PersistedMessageState) {
+ network_channel_.SendMessage(kMessage);
+ ASSERT_FALSE(network_channel_.last_encoded_message_.empty());
+ {
+ std::string message;
+ std::string service_context;
+ int64 scheduling_hash = 0LL;
+ EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
+ network_channel_.last_encoded_message_,
+ &message, &service_context, &scheduling_hash));
+ EXPECT_EQ(kMessage, message);
+ EXPECT_TRUE(service_context.empty());
+ EXPECT_EQ(0LL, scheduling_hash);
+ }
+
+ const std::string& encoded_message =
+ SyncNetworkChannel::EncodeMessageForTest(
+ kMessage, kServiceContext, kSchedulingHash);
+ network_channel_.DeliverIncomingMessage(encoded_message);
+
+ network_channel_.last_encoded_message_.clear();
+ network_channel_.SendMessage(kMessage);
+ ASSERT_FALSE(network_channel_.last_encoded_message_.empty());
+ {
+ std::string message;
+ std::string service_context;
+ int64 scheduling_hash = 0LL;
+ EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
+ network_channel_.last_encoded_message_,
+ &message, &service_context, &scheduling_hash));
+ EXPECT_EQ(kMessage, message);
+ EXPECT_EQ(kServiceContext, service_context);
+ EXPECT_EQ(kSchedulingHash, scheduling_hash);
+ }
+}
+
} // namespace
} // namespace syncer
diff --git a/chromium/sync/notifier/unacked_invalidation_set.cc b/chromium/sync/notifier/unacked_invalidation_set.cc
new file mode 100644
index 00000000000..705dbd2ded8
--- /dev/null
+++ b/chromium/sync/notifier/unacked_invalidation_set.cc
@@ -0,0 +1,204 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/unacked_invalidation_set.h"
+
+#include "base/strings/string_number_conversions.h"
+#include "sync/internal_api/public/base/ack_handle.h"
+#include "sync/notifier/object_id_invalidation_map.h"
+#include "sync/notifier/sync_invalidation_listener.h"
+
+namespace {
+
+const char kSourceKey[] = "source";
+const char kNameKey[] = "name";
+const char kInvalidationListKey[] = "invalidation-list";
+
+} // namespace
+
+namespace syncer {
+
+const size_t UnackedInvalidationSet::kMaxBufferedInvalidations = 5;
+
+// static
+UnackedInvalidationSet::UnackedInvalidationSet(
+ invalidation::ObjectId id)
+ : registered_(false),
+ object_id_(id) {}
+
+UnackedInvalidationSet::~UnackedInvalidationSet() {}
+
+const invalidation::ObjectId& UnackedInvalidationSet::object_id() const {
+ return object_id_;
+}
+
+void UnackedInvalidationSet::Add(
+ const Invalidation& invalidation) {
+ SingleObjectInvalidationSet set;
+ set.Insert(invalidation);
+ AddSet(set);
+ if (!registered_)
+ Truncate(kMaxBufferedInvalidations);
+}
+
+void UnackedInvalidationSet::AddSet(
+ const SingleObjectInvalidationSet& invalidations) {
+ invalidations_.insert(invalidations.begin(), invalidations.end());
+ if (!registered_)
+ Truncate(kMaxBufferedInvalidations);
+}
+
+void UnackedInvalidationSet::ExportInvalidations(
+ WeakHandle<AckHandler> ack_handler,
+ ObjectIdInvalidationMap* out) const {
+ for (SingleObjectInvalidationSet::const_iterator it = invalidations_.begin();
+ it != invalidations_.end(); ++it) {
+ // Copy the invalidation and set the copy's ack_handler.
+ Invalidation inv(*it);
+ inv.set_ack_handler(ack_handler);
+ out->Insert(inv);
+ }
+}
+
+void UnackedInvalidationSet::Clear() {
+ invalidations_.clear();
+}
+
+void UnackedInvalidationSet::SetHandlerIsRegistered() {
+ registered_ = true;
+}
+
+void UnackedInvalidationSet::SetHandlerIsUnregistered() {
+ registered_ = false;
+ Truncate(kMaxBufferedInvalidations);
+}
+
+// Removes the matching ack handle from the list.
+void UnackedInvalidationSet::Acknowledge(const AckHandle& handle) {
+ bool handle_found = false;
+ for (SingleObjectInvalidationSet::const_iterator it = invalidations_.begin();
+ it != invalidations_.end(); ++it) {
+ if (it->ack_handle().Equals(handle)) {
+ invalidations_.erase(*it);
+ handle_found = true;
+ break;
+ }
+ }
+ DLOG_IF(WARNING, !handle_found)
+ << "Unrecognized to ack for object " << ObjectIdToString(object_id_);
+ (void)handle_found; // Silence unused variable warning in release builds.
+}
+
+// Erase the invalidation with matching ack handle from the list. Also creates
+// an 'UnknownVersion' invalidation with the same ack handle and places it at
+// the beginning of the list. If an unknown version invalidation currently
+// exists, it is replaced.
+void UnackedInvalidationSet::Drop(const AckHandle& handle) {
+ SingleObjectInvalidationSet::const_iterator it;
+ for (it = invalidations_.begin(); it != invalidations_.end(); ++it) {
+ if (it->ack_handle().Equals(handle)) {
+ break;
+ }
+ }
+ if (it == invalidations_.end()) {
+ DLOG(WARNING) << "Unrecognized drop request for object "
+ << ObjectIdToString(object_id_);
+ return;
+ }
+
+ Invalidation unknown_version = Invalidation::InitFromDroppedInvalidation(*it);
+ invalidations_.erase(*it);
+
+ // If an unknown version is in the list, we remove it so we can replace it.
+ if (!invalidations_.empty() && invalidations_.begin()->is_unknown_version()) {
+ invalidations_.erase(*invalidations_.begin());
+ }
+
+ invalidations_.insert(unknown_version);
+}
+
+scoped_ptr<base::DictionaryValue> UnackedInvalidationSet::ToValue() const {
+ scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue);
+ value->SetString(kSourceKey, base::IntToString(object_id_.source()));
+ value->SetString(kNameKey, object_id_.name());
+
+ scoped_ptr<base::ListValue> list_value(new ListValue);
+ for (InvalidationsSet::const_iterator it = invalidations_.begin();
+ it != invalidations_.end(); ++it) {
+ list_value->Append(it->ToValue().release());
+ }
+ value->Set(kInvalidationListKey, list_value.release());
+
+ return value.Pass();
+}
+
+bool UnackedInvalidationSet::ResetFromValue(
+ const base::DictionaryValue& value) {
+ std::string source_str;
+ if (!value.GetString(kSourceKey, &source_str)) {
+ DLOG(WARNING) << "Unable to deserialize source";
+ return false;
+ }
+ int source = 0;
+ if (!base::StringToInt(source_str, &source)) {
+ DLOG(WARNING) << "Invalid source: " << source_str;
+ return false;
+ }
+ std::string name;
+ if (!value.GetString(kNameKey, &name)) {
+ DLOG(WARNING) << "Unable to deserialize name";
+ return false;
+ }
+ object_id_ = invalidation::ObjectId(source, name);
+ const base::ListValue* invalidation_list = NULL;
+ if (!value.GetList(kInvalidationListKey, &invalidation_list)
+ || !ResetListFromValue(*invalidation_list)) {
+ // Earlier versions of this class did not set this field, so we don't treat
+ // parsing errors here as a fatal failure.
+ DLOG(WARNING) << "Unable to deserialize invalidation list.";
+ }
+ return true;
+}
+
+bool UnackedInvalidationSet::ResetListFromValue(
+ const base::ListValue& list) {
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ const base::DictionaryValue* dict;
+ if (!list.GetDictionary(i, &dict)) {
+ DLOG(WARNING) << "Failed to get invalidation dictionary at index " << i;
+ return false;
+ }
+ scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
+ if (!invalidation) {
+ DLOG(WARNING) << "Failed to parse invalidation at index " << i;
+ return false;
+ }
+ invalidations_.insert(*invalidation.get());
+ }
+ return true;
+}
+
+void UnackedInvalidationSet::Truncate(size_t max_size) {
+ DCHECK_GT(max_size, 0U);
+
+ if (invalidations_.size() <= max_size) {
+ return;
+ }
+
+ while (invalidations_.size() > max_size) {
+ invalidations_.erase(*invalidations_.begin());
+ }
+
+ // We dropped some invalidations. We remember the fact that an unknown
+ // amount of information has been lost by ensuring this list begins with
+ // an UnknownVersion invalidation. We remove the oldest remaining
+ // invalidation to make room for it.
+ invalidation::ObjectId id = invalidations_.begin()->object_id();
+ invalidations_.erase(*invalidations_.begin());
+
+ Invalidation unknown_version = Invalidation::InitUnknownVersion(id);
+ invalidations_.insert(unknown_version);
+}
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/unacked_invalidation_set.h b/chromium/sync/notifier/unacked_invalidation_set.h
new file mode 100644
index 00000000000..aae9cdab3eb
--- /dev/null
+++ b/chromium/sync/notifier/unacked_invalidation_set.h
@@ -0,0 +1,117 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_UNACKED_INVALIDATION_SET_H_
+#define SYNC_NOTIFIER_UNACKED_INVALIDATION_SET_H_
+
+#include <vector>
+
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/invalidation.h"
+#include "sync/internal_api/public/util/weak_handle.h"
+#include "sync/notifier/invalidation_util.h"
+
+namespace base {
+class DictionaryValue;
+} // namespace base
+
+namespace syncer {
+
+namespace test_util {
+class UnackedInvalidationSetEqMatcher;
+} // test_util
+
+class SingleObjectInvalidationSet;
+class ObjectIdInvalidationMap;
+class AckHandle;
+
+// Manages the set of invalidations that are awaiting local acknowledgement for
+// a particular ObjectId. This set of invalidations will be persisted across
+// restarts, though this class is not directly responsible for that.
+class SYNC_EXPORT UnackedInvalidationSet {
+ public:
+ static const size_t kMaxBufferedInvalidations;
+
+ UnackedInvalidationSet(invalidation::ObjectId id);
+ ~UnackedInvalidationSet();
+
+ // Returns the ObjectID of the invalidations this class is tracking.
+ const invalidation::ObjectId& object_id() const;
+
+ // Adds a new invalidation to the set awaiting acknowledgement.
+ void Add(const Invalidation& invalidation);
+
+ // Adds many new invalidations to the set awaiting acknowledgement.
+ void AddSet(const SingleObjectInvalidationSet& invalidations);
+
+ // Exports the set of invalidations awaiting acknowledgement as an
+ // ObjectIdInvalidationMap. Each of these invalidations will be associated
+ // with the given |ack_handler|.
+ //
+ // The contents of the UnackedInvalidationSet are not directly modified by
+ // this procedure, but the AckHandles stored in those exported invalidations
+ // are likely to end up back here in calls to Acknowledge() or Drop().
+ void ExportInvalidations(WeakHandle<AckHandler> ack_handler,
+ ObjectIdInvalidationMap* out) const;
+
+ // Removes all stored invalidations from this object.
+ void Clear();
+
+ // Indicates that a handler has registered to handle these invalidations.
+ //
+ // Registrations with the invalidations server persist across restarts, but
+ // registrations from InvalidationHandlers to the InvalidationService are not.
+ // In the time immediately after a restart, it's possible that the server
+ // will send us invalidations, and we won't have a handler to send them to.
+ //
+ // The SetIsRegistered() call indicates that this period has come to an end.
+ // There is now a handler that can receive these invalidations. Once this
+ // function has been called, the kMaxBufferedInvalidations limit will be
+ // ignored. It is assumed that the handler will manage its own buffer size.
+ void SetHandlerIsRegistered();
+
+ // Indicates that the handler has now unregistered itself.
+ //
+ // This causes the object to resume enforcement of the
+ // kMaxBufferedInvalidations limit.
+ void SetHandlerIsUnregistered();
+
+ // Given an AckHandle belonging to one of the contained invalidations, finds
+ // the invalidation and drops it from the list. It is considered to be
+ // acknowledged, so there is no need to continue maintaining its state.
+ void Acknowledge(const AckHandle& handle);
+
+ // Given an AckHandle belonging to one of the contained invalidations, finds
+ // the invalidation, drops it from the list, and adds additional state to
+ // indicate that this invalidation has been lost without being acted on.
+ void Drop(const AckHandle& handle);
+
+ scoped_ptr<base::DictionaryValue> ToValue() const;
+ bool ResetFromValue(const base::DictionaryValue& value);
+
+ private:
+ // Allow this test helper to have access to our internals.
+ friend class test_util::UnackedInvalidationSetEqMatcher;
+
+ typedef std::set<Invalidation, InvalidationVersionLessThan> InvalidationsSet;
+
+ bool ResetListFromValue(const base::ListValue& value);
+
+ // Limits the list size to the given maximum. This function will correctly
+ // update this class' internal data to indicate if invalidations have been
+ // dropped.
+ void Truncate(size_t max_size);
+
+ bool registered_;
+ invalidation::ObjectId object_id_;
+ InvalidationsSet invalidations_;
+};
+
+typedef std::map<invalidation::ObjectId,
+ UnackedInvalidationSet,
+ ObjectIdLessThan> UnackedInvalidationsMap;
+
+} // namespace syncer
+
+#endif // SYNC_NOTIFIER_UNACKED_INVALIDATION_SET_H_
diff --git a/chromium/sync/notifier/unacked_invalidation_set_test_util.cc b/chromium/sync/notifier/unacked_invalidation_set_test_util.cc
new file mode 100644
index 00000000000..8961574c9f9
--- /dev/null
+++ b/chromium/sync/notifier/unacked_invalidation_set_test_util.cc
@@ -0,0 +1,181 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/unacked_invalidation_set_test_util.h"
+
+#include "base/json/json_string_value_serializer.h"
+#include "sync/notifier/object_id_invalidation_map.h"
+#include "testing/gmock/include/gmock/gmock-matchers.h"
+
+namespace syncer {
+
+using ::testing::MakeMatcher;
+using ::testing::MatchResultListener;
+using ::testing::Matcher;
+using ::testing::MatcherInterface;
+using ::testing::PrintToString;
+
+namespace test_util {
+
+// This class needs to be declared outside the null namespace so the
+// UnackedInvalidationSet can declare it as a friend. This class needs access
+// to the UnackedInvalidationSet internals to implement its comparispon
+// function.
+class UnackedInvalidationSetEqMatcher
+ : public testing::MatcherInterface<const UnackedInvalidationSet&> {
+ public:
+ explicit UnackedInvalidationSetEqMatcher(
+ const UnackedInvalidationSet& expected);
+
+ virtual bool MatchAndExplain(
+ const UnackedInvalidationSet& actual,
+ MatchResultListener* listener) const OVERRIDE;
+ virtual void DescribeTo(::std::ostream* os) const OVERRIDE;
+ virtual void DescribeNegationTo(::std::ostream* os) const OVERRIDE;
+
+ private:
+ const UnackedInvalidationSet expected_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnackedInvalidationSetEqMatcher);
+};
+
+namespace {
+
+struct InvalidationEq {
+ bool operator()(const syncer::Invalidation& a,
+ const syncer::Invalidation& b) const {
+ return a.Equals(b);
+ }
+};
+
+} // namespace
+
+UnackedInvalidationSetEqMatcher::UnackedInvalidationSetEqMatcher(
+ const UnackedInvalidationSet& expected)
+ : expected_(expected) {}
+
+bool UnackedInvalidationSetEqMatcher::MatchAndExplain(
+ const UnackedInvalidationSet& actual,
+ MatchResultListener* listener) const {
+ // Use our friendship with this class to compare the internals of two
+ // instances.
+ //
+ // Note that the registration status is intentionally not considered
+ // when performing this comparison.
+ return expected_.object_id_ == actual.object_id_
+ && std::equal(expected_.invalidations_.begin(),
+ expected_.invalidations_.end(),
+ actual.invalidations_.begin(),
+ InvalidationEq());
+}
+
+void UnackedInvalidationSetEqMatcher::DescribeTo(::std::ostream* os) const {
+ *os << " is equal to " << PrintToString(expected_);
+}
+
+void UnackedInvalidationSetEqMatcher::DescribeNegationTo(
+ ::std::ostream* os) const {
+ *os << " isn't equal to " << PrintToString(expected_);
+}
+
+// We're done declaring UnackedInvalidationSetEqMatcher. Everything else can
+// go into the null namespace.
+namespace {
+
+ObjectIdInvalidationMap UnackedInvalidationsMapToObjectIdInvalidationMap(
+ const UnackedInvalidationsMap& state_map) {
+ ObjectIdInvalidationMap object_id_invalidation_map;
+ for (UnackedInvalidationsMap::const_iterator it = state_map.begin();
+ it != state_map.end(); ++it) {
+ it->second.ExportInvalidations(syncer::WeakHandle<AckHandler>(),
+ &object_id_invalidation_map);
+ }
+ return object_id_invalidation_map;
+}
+
+class UnackedInvalidationsMapEqMatcher
+ : public testing::MatcherInterface<const UnackedInvalidationsMap&> {
+ public:
+ explicit UnackedInvalidationsMapEqMatcher(
+ const UnackedInvalidationsMap& expected);
+
+ virtual bool MatchAndExplain(const UnackedInvalidationsMap& actual,
+ MatchResultListener* listener) const;
+ virtual void DescribeTo(::std::ostream* os) const;
+ virtual void DescribeNegationTo(::std::ostream* os) const;
+
+ private:
+ const UnackedInvalidationsMap expected_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnackedInvalidationsMapEqMatcher);
+};
+
+UnackedInvalidationsMapEqMatcher::UnackedInvalidationsMapEqMatcher(
+ const UnackedInvalidationsMap& expected)
+ : expected_(expected) {
+}
+
+bool UnackedInvalidationsMapEqMatcher::MatchAndExplain(
+ const UnackedInvalidationsMap& actual,
+ MatchResultListener* listener) const {
+ ObjectIdInvalidationMap expected_inv =
+ UnackedInvalidationsMapToObjectIdInvalidationMap(expected_);
+ ObjectIdInvalidationMap actual_inv =
+ UnackedInvalidationsMapToObjectIdInvalidationMap(actual);
+
+ return expected_inv == actual_inv;
+}
+
+void UnackedInvalidationsMapEqMatcher::DescribeTo(
+ ::std::ostream* os) const {
+ *os << " is equal to " << PrintToString(expected_);
+}
+
+void UnackedInvalidationsMapEqMatcher::DescribeNegationTo(
+ ::std::ostream* os) const {
+ *os << " isn't equal to " << PrintToString(expected_);
+}
+
+} // namespace
+
+void PrintTo(const UnackedInvalidationSet& invalidations,
+ ::std::ostream* os) {
+ scoped_ptr<base::DictionaryValue> value = invalidations.ToValue();
+
+ std::string output;
+ JSONStringValueSerializer serializer(&output);
+ serializer.set_pretty_print(true);
+ serializer.Serialize(*value.get());
+
+ (*os) << output;
+}
+
+void PrintTo(const UnackedInvalidationsMap& map, ::std::ostream* os) {
+ scoped_ptr<base::ListValue> list(new base::ListValue);
+ for (UnackedInvalidationsMap::const_iterator it = map.begin();
+ it != map.end(); ++it) {
+ list->Append(it->second.ToValue().release());
+ }
+
+ std::string output;
+ JSONStringValueSerializer serializer(&output);
+ serializer.set_pretty_print(true);
+ serializer.Serialize(*list.get());
+
+ (*os) << output;
+}
+
+Matcher<const UnackedInvalidationSet&> Eq(
+ const UnackedInvalidationSet& expected) {
+ return MakeMatcher(new UnackedInvalidationSetEqMatcher(expected));
+}
+
+Matcher<const UnackedInvalidationsMap&> Eq(
+ const UnackedInvalidationsMap& expected) {
+ return MakeMatcher(new UnackedInvalidationsMapEqMatcher(expected));
+}
+
+} // namespace test_util
+
+};
diff --git a/chromium/sync/notifier/unacked_invalidation_set_test_util.h b/chromium/sync/notifier/unacked_invalidation_set_test_util.h
new file mode 100644
index 00000000000..e93726b28aa
--- /dev/null
+++ b/chromium/sync/notifier/unacked_invalidation_set_test_util.h
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/unacked_invalidation_set.h"
+
+#include "testing/gmock/include/gmock/gmock-matchers.h"
+
+namespace syncer {
+
+namespace test_util {
+
+void PrintTo(const UnackedInvalidationSet& invalidations, ::std::ostream* os);
+
+void PrintTo(const UnackedInvalidationsMap& map, ::std::ostream* os);
+
+::testing::Matcher<const UnackedInvalidationSet&> Eq(
+ const UnackedInvalidationSet& expected);
+
+::testing::Matcher<const UnackedInvalidationsMap&> Eq(
+ const UnackedInvalidationsMap& expected);
+
+} // namespace test_util
+
+} // namespace syncer
diff --git a/chromium/sync/notifier/unacked_invalidation_set_unittest.cc b/chromium/sync/notifier/unacked_invalidation_set_unittest.cc
new file mode 100644
index 00000000000..d6549ab186d
--- /dev/null
+++ b/chromium/sync/notifier/unacked_invalidation_set_unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/unacked_invalidation_set.h"
+
+#include "base/json/json_string_value_serializer.h"
+#include "sync/notifier/object_id_invalidation_map.h"
+#include "sync/notifier/single_object_invalidation_set.h"
+#include "sync/notifier/unacked_invalidation_set_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+class UnackedInvalidationSetTest : public testing::Test {
+ public:
+ UnackedInvalidationSetTest()
+ : kObjectId_(10, "ASDF"),
+ unacked_invalidations_(kObjectId_) {}
+
+ SingleObjectInvalidationSet GetStoredInvalidations() {
+ ObjectIdInvalidationMap map;
+ unacked_invalidations_.ExportInvalidations(WeakHandle<AckHandler>(), &map);
+ ObjectIdSet ids = map.GetObjectIds();
+ if (ids.find(kObjectId_) != ids.end()) {
+ return map.ForObject(kObjectId_);
+ } else {
+ return SingleObjectInvalidationSet();
+ }
+ }
+
+ const invalidation::ObjectId kObjectId_;
+ UnackedInvalidationSet unacked_invalidations_;
+};
+
+namespace {
+
+// Test storage and retrieval of zero invalidations.
+TEST_F(UnackedInvalidationSetTest, Empty) {
+ EXPECT_EQ(0U, GetStoredInvalidations().GetSize());
+}
+
+// Test storage and retrieval of a single invalidation.
+TEST_F(UnackedInvalidationSetTest, OneInvalidation) {
+ Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
+ unacked_invalidations_.Add(inv1);
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ ASSERT_EQ(1U, set.GetSize());
+ EXPECT_FALSE(set.StartsWithUnknownVersion());
+}
+
+// Test that calling Clear() returns us to the empty state.
+TEST_F(UnackedInvalidationSetTest, Clear) {
+ Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
+ unacked_invalidations_.Add(inv1);
+ unacked_invalidations_.Clear();
+
+ EXPECT_EQ(0U, GetStoredInvalidations().GetSize());
+}
+
+// Test that repeated unknown version invalidations are squashed together.
+TEST_F(UnackedInvalidationSetTest, UnknownVersions) {
+ Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
+ Invalidation inv2 = Invalidation::InitUnknownVersion(kObjectId_);
+ Invalidation inv3 = Invalidation::InitUnknownVersion(kObjectId_);
+ unacked_invalidations_.Add(inv1);
+ unacked_invalidations_.Add(inv2);
+ unacked_invalidations_.Add(inv3);
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ ASSERT_EQ(2U, set.GetSize());
+ EXPECT_TRUE(set.StartsWithUnknownVersion());
+}
+
+// Tests that no truncation occurs while we're under the limit.
+TEST_F(UnackedInvalidationSetTest, NoTruncation) {
+ size_t kMax = UnackedInvalidationSet::kMaxBufferedInvalidations;
+
+ for (size_t i = 0; i < kMax; ++i) {
+ Invalidation inv = Invalidation::Init(kObjectId_, i, "payload");
+ unacked_invalidations_.Add(inv);
+ }
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ ASSERT_EQ(kMax, set.GetSize());
+ EXPECT_FALSE(set.StartsWithUnknownVersion());
+ EXPECT_EQ(0, set.begin()->version());
+ EXPECT_EQ(kMax-1, static_cast<size_t>(set.rbegin()->version()));
+}
+
+// Test that truncation happens as we reach the limit.
+TEST_F(UnackedInvalidationSetTest, Truncation) {
+ size_t kMax = UnackedInvalidationSet::kMaxBufferedInvalidations;
+
+ for (size_t i = 0; i < kMax + 1; ++i) {
+ Invalidation inv = Invalidation::Init(kObjectId_, i, "payload");
+ unacked_invalidations_.Add(inv);
+ }
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ ASSERT_EQ(kMax, set.GetSize());
+ EXPECT_TRUE(set.StartsWithUnknownVersion());
+ EXPECT_TRUE(set.begin()->is_unknown_version());
+ EXPECT_EQ(kMax, static_cast<size_t>(set.rbegin()->version()));
+}
+
+// Test that we don't truncate while a handler is registered.
+TEST_F(UnackedInvalidationSetTest, RegistrationAndTruncation) {
+ unacked_invalidations_.SetHandlerIsRegistered();
+
+ size_t kMax = UnackedInvalidationSet::kMaxBufferedInvalidations;
+
+ for (size_t i = 0; i < kMax + 1; ++i) {
+ Invalidation inv = Invalidation::Init(kObjectId_, i, "payload");
+ unacked_invalidations_.Add(inv);
+ }
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ ASSERT_EQ(kMax+1, set.GetSize());
+ EXPECT_FALSE(set.StartsWithUnknownVersion());
+ EXPECT_EQ(0, set.begin()->version());
+ EXPECT_EQ(kMax, static_cast<size_t>(set.rbegin()->version()));
+
+ // Unregistering should re-enable truncation.
+ unacked_invalidations_.SetHandlerIsUnregistered();
+ SingleObjectInvalidationSet set2 = GetStoredInvalidations();
+ ASSERT_EQ(kMax, set2.GetSize());
+ EXPECT_TRUE(set2.StartsWithUnknownVersion());
+ EXPECT_TRUE(set2.begin()->is_unknown_version());
+ EXPECT_EQ(kMax, static_cast<size_t>(set2.rbegin()->version()));
+}
+
+// Test acknowledgement.
+TEST_F(UnackedInvalidationSetTest, Acknowledge) {
+ // inv2 is included in this test just to make sure invalidations that
+ // are supposed to be unaffected by this operation will be unaffected.
+
+ // We don't expect to be receiving acks or drops unless this flag is set.
+ // Not that it makes much of a difference in behavior.
+ unacked_invalidations_.SetHandlerIsRegistered();
+
+ Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
+ Invalidation inv2 = Invalidation::InitUnknownVersion(kObjectId_);
+ AckHandle inv1_handle = inv1.ack_handle();
+
+ unacked_invalidations_.Add(inv1);
+ unacked_invalidations_.Add(inv2);
+
+ unacked_invalidations_.Acknowledge(inv1_handle);
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ EXPECT_EQ(1U, set.GetSize());
+ EXPECT_TRUE(set.StartsWithUnknownVersion());
+}
+
+// Test drops.
+TEST_F(UnackedInvalidationSetTest, Drop) {
+ // inv2 is included in this test just to make sure invalidations that
+ // are supposed to be unaffected by this operation will be unaffected.
+
+ // We don't expect to be receiving acks or drops unless this flag is set.
+ // Not that it makes much of a difference in behavior.
+ unacked_invalidations_.SetHandlerIsRegistered();
+
+ Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
+ Invalidation inv2 = Invalidation::Init(kObjectId_, 15, "payload");
+ AckHandle inv1_handle = inv1.ack_handle();
+
+ unacked_invalidations_.Add(inv1);
+ unacked_invalidations_.Add(inv2);
+
+ unacked_invalidations_.Drop(inv1_handle);
+
+ SingleObjectInvalidationSet set = GetStoredInvalidations();
+ ASSERT_EQ(2U, set.GetSize());
+ EXPECT_TRUE(set.StartsWithUnknownVersion());
+ EXPECT_EQ(15, set.rbegin()->version());
+}
+
+class UnackedInvalidationSetSerializationTest
+ : public UnackedInvalidationSetTest {
+ public:
+ UnackedInvalidationSet SerializeDeserialize() {
+ scoped_ptr<base::DictionaryValue> value = unacked_invalidations_.ToValue();
+ UnackedInvalidationSet deserialized(kObjectId_);
+ deserialized.ResetFromValue(*value.get());
+ return deserialized;
+ }
+};
+
+TEST_F(UnackedInvalidationSetSerializationTest, Empty) {
+ UnackedInvalidationSet deserialized = SerializeDeserialize();
+ EXPECT_THAT(unacked_invalidations_, test_util::Eq(deserialized));
+}
+
+TEST_F(UnackedInvalidationSetSerializationTest, OneInvalidation) {
+ Invalidation inv = Invalidation::Init(kObjectId_, 10, "payload");
+ unacked_invalidations_.Add(inv);
+
+ UnackedInvalidationSet deserialized = SerializeDeserialize();
+ EXPECT_THAT(unacked_invalidations_, test_util::Eq(deserialized));
+}
+
+TEST_F(UnackedInvalidationSetSerializationTest, WithUnknownVersion) {
+ Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
+ Invalidation inv2 = Invalidation::InitUnknownVersion(kObjectId_);
+ Invalidation inv3 = Invalidation::InitUnknownVersion(kObjectId_);
+ unacked_invalidations_.Add(inv1);
+ unacked_invalidations_.Add(inv2);
+ unacked_invalidations_.Add(inv3);
+
+ UnackedInvalidationSet deserialized = SerializeDeserialize();
+ EXPECT_THAT(unacked_invalidations_, test_util::Eq(deserialized));
+}
+
+} // namespace
+
+} // namespace syncer
diff --git a/chromium/sync/protocol/app_list_specifics.proto b/chromium/sync/protocol/app_list_specifics.proto
new file mode 100644
index 00000000000..c677a4bd0c7
--- /dev/null
+++ b/chromium/sync/protocol/app_list_specifics.proto
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Sync protocol datatype extension for the app list (aka app launcher).
+
+// Update proto_{value,enum}_conversions{.h,.cc,_unittest.cc} if you change
+// any fields in this file.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+option retain_unknown_fields = true;
+
+package sync_pb;
+
+// Properties of app list objects.
+message AppListSpecifics {
+ // Unique identifier for the item:
+ // * TYPE_FOLDER: Folder id (generated)
+ // * TYPE_APP: App Id
+ // * TYPE_URL: Url
+ optional string item_id = 1;
+
+ // What type of item this is.
+ enum AppListItemType {
+ // An extension app.
+ TYPE_APP = 1;
+ // A request to remove any matching default installed apps.
+ TYPE_REMOVE_DEFAULT_APP = 2;
+ // A folder containing entries whose |parent_id| matches |item_id|.
+ TYPE_FOLDER = 3;
+ // A URL shortcut (functionally equivalent to a bookmark).
+ TYPE_URL = 4;
+ }
+ optional AppListItemType item_type = 2;
+
+ // Item name (FOLDER or URL).
+ optional string item_name = 3;
+
+ // Id of the parent (folder) item.
+ optional string parent_id = 4;
+
+ // Which page this item will appear on in the app list.
+ optional string page_ordinal = 5;
+
+ // Where on a page this item will appear.
+ optional string item_ordinal = 6;
+}
diff --git a/chromium/sync/protocol/article_specifics.proto b/chromium/sync/protocol/article_specifics.proto
new file mode 100644
index 00000000000..00631fd16ec
--- /dev/null
+++ b/chromium/sync/protocol/article_specifics.proto
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Sync protocol datatype extension for the article.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+option retain_unknown_fields = true;
+
+package sync_pb;
+
+// Properties of Article objects.
+message ArticleSpecifics {
+ // Next ID to use: 4
+
+ optional string entry_id = 1;
+
+ optional string title = 2;
+
+ repeated ArticlePage pages = 3;
+}
+
+message ArticlePage {
+ // Next ID to use: 2
+
+ optional string url = 1;
+}
diff --git a/chromium/sync/protocol/bookmark_specifics.proto b/chromium/sync/protocol/bookmark_specifics.proto
index efb03e52ec5..5ac93ba577a 100644
--- a/chromium/sync/protocol/bookmark_specifics.proto
+++ b/chromium/sync/protocol/bookmark_specifics.proto
@@ -14,6 +14,12 @@ option retain_unknown_fields = true;
package sync_pb;
+// Corresponds to a single meta info key/value pair for a bookmark node.
+message MetaInfo {
+ optional string key = 1;
+ optional string value = 2;
+}
+
// Properties of bookmark sync objects.
message BookmarkSpecifics {
optional string url = 1;
@@ -23,5 +29,6 @@ message BookmarkSpecifics {
// base::Time.
optional int64 creation_time_us = 4;
optional string icon_url = 5;
+ repeated MetaInfo meta_info = 6;
}
diff --git a/chromium/sync/protocol/nigori_specifics.proto b/chromium/sync/protocol/nigori_specifics.proto
index 6e72ae14936..87e6c3771f0 100644
--- a/chromium/sync/protocol/nigori_specifics.proto
+++ b/chromium/sync/protocol/nigori_specifics.proto
@@ -120,5 +120,11 @@ message NigoriSpecifics {
// Boolean corresponding to Whether to encrypt favicons data or not.
optional bool encrypt_favicon_images = 35;
optional bool encrypt_favicon_tracking = 36;
+
+ // Boolean corresponding to whether articles should be encrypted.
+ optional bool encrypt_articles = 37;
+
+ // Boolean corresponding to whether app list items should be encrypted.
+ optional bool encrypt_app_list = 38;
}
diff --git a/chromium/sync/protocol/proto_enum_conversions.cc b/chromium/sync/protocol/proto_enum_conversions.cc
index 3183633d6e2..af958222414 100644
--- a/chromium/sync/protocol/proto_enum_conversions.cc
+++ b/chromium/sync/protocol/proto_enum_conversions.cc
@@ -20,6 +20,20 @@ namespace syncer {
#define ENUM_CASE(enum_parent, enum_value) \
case enum_parent::enum_value: return #enum_value
+const char* GetAppListItemTypeString(
+ sync_pb::AppListSpecifics::AppListItemType item_type) {
+ ASSERT_ENUM_BOUNDS(sync_pb::AppListSpecifics, AppListItemType,
+ TYPE_APP, TYPE_URL);
+ switch (item_type) {
+ ENUM_CASE(sync_pb::AppListSpecifics, TYPE_APP);
+ ENUM_CASE(sync_pb::AppListSpecifics, TYPE_REMOVE_DEFAULT_APP);
+ ENUM_CASE(sync_pb::AppListSpecifics, TYPE_FOLDER);
+ ENUM_CASE(sync_pb::AppListSpecifics, TYPE_URL);
+ }
+ NOTREACHED();
+ return "";
+}
+
const char* GetBrowserTypeString(
sync_pb::SessionWindow::BrowserType browser_type) {
ASSERT_ENUM_BOUNDS(sync_pb::SessionWindow, BrowserType,
diff --git a/chromium/sync/protocol/proto_enum_conversions.h b/chromium/sync/protocol/proto_enum_conversions.h
index b6d84facf9a..6812cb7b8d0 100644
--- a/chromium/sync/protocol/proto_enum_conversions.h
+++ b/chromium/sync/protocol/proto_enum_conversions.h
@@ -8,6 +8,7 @@
// Keep this file in sync with the .proto files in this directory.
#include "sync/base/sync_export.h"
+#include "sync/protocol/app_list_specifics.pb.h"
#include "sync/protocol/client_debug_info.pb.h"
#include "sync/protocol/session_specifics.pb.h"
#include "sync/protocol/sync.pb.h"
@@ -20,6 +21,9 @@ namespace syncer {
// The returned strings (which don't have to be freed) are in ASCII.
// The result of passing in an invalid enum value is undefined.
+SYNC_EXPORT_PRIVATE const char* GetAppListItemTypeString(
+ sync_pb::AppListSpecifics::AppListItemType item_type);
+
SYNC_EXPORT_PRIVATE const char* GetBrowserTypeString(
sync_pb::SessionWindow::BrowserType browser_type);
diff --git a/chromium/sync/protocol/proto_enum_conversions_unittest.cc b/chromium/sync/protocol/proto_enum_conversions_unittest.cc
index f66d3796c25..7b323a53a8f 100644
--- a/chromium/sync/protocol/proto_enum_conversions_unittest.cc
+++ b/chromium/sync/protocol/proto_enum_conversions_unittest.cc
@@ -25,6 +25,13 @@ void TestEnumStringFunction(const char* (*enum_string_fn)(T),
}
}
+TEST_F(ProtoEnumConversionsTest, GetAppListItemTypeString) {
+ TestEnumStringFunction(
+ GetAppListItemTypeString,
+ sync_pb::AppListSpecifics::AppListItemType_MIN,
+ sync_pb::AppListSpecifics::AppListItemType_MAX);
+}
+
TEST_F(ProtoEnumConversionsTest, GetBrowserTypeString) {
TestEnumStringFunction(
GetBrowserTypeString,
diff --git a/chromium/sync/protocol/proto_value_conversions.cc b/chromium/sync/protocol/proto_value_conversions.cc
index a7da289b816..5c8a7ab6457 100644
--- a/chromium/sync/protocol/proto_value_conversions.cc
+++ b/chromium/sync/protocol/proto_value_conversions.cc
@@ -14,6 +14,7 @@
#include "base/strings/string_number_conversions.h"
#include "base/values.h"
#include "sync/internal_api/public/base/unique_position.h"
+#include "sync/protocol/app_list_specifics.pb.h"
#include "sync/protocol/app_notification_specifics.pb.h"
#include "sync/protocol/app_setting_specifics.pb.h"
#include "sync/protocol/app_specifics.pb.h"
@@ -54,9 +55,7 @@ base::StringValue* MakeInt64Value(int64 x) {
// that instead of a StringValue.
base::StringValue* MakeBytesValue(const std::string& bytes) {
std::string bytes_base64;
- if (!base::Base64Encode(bytes, &bytes_base64)) {
- NOTREACHED();
- }
+ base::Base64Encode(bytes, &bytes_base64);
return new base::StringValue(bytes_base64);
}
@@ -245,6 +244,9 @@ base::DictionaryValue* SyncedNotificationImageToValue(
const sync_pb::SyncedNotificationImage& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_STR(url);
+ SET_STR(alt_text);
+ SET_INT32(preferred_width);
+ SET_INT32(preferred_height);
return value;
}
@@ -252,6 +254,8 @@ base::DictionaryValue* SyncedNotificationProfileImageToValue(
const sync_pb::SyncedNotificationProfileImage& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_STR(image_url);
+ SET_STR(oid);
+ SET_STR(display_name);
return value;
}
@@ -262,15 +266,45 @@ base::DictionaryValue* MediaToValue(
return value;
}
+base::DictionaryValue* SyncedNotificationActionToValue(
+ const sync_pb::SyncedNotificationAction& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(text);
+ SET(icon, SyncedNotificationImageToValue);
+ SET_STR(url);
+ SET_STR(request_data);
+ SET_STR(accessibility_label);
+ return value;
+}
+
+base::DictionaryValue* SyncedNotificationDestiationToValue(
+ const sync_pb::SyncedNotificationDestination& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(text);
+ SET(icon, SyncedNotificationImageToValue);
+ SET_STR(url);
+ SET_STR(accessibility_label);
+ return value;
+}
+
+base::DictionaryValue* TargetToValue(
+ const sync_pb::Target& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET(destination, SyncedNotificationDestiationToValue);
+ SET(action, SyncedNotificationActionToValue);
+ SET_STR(target_key);
+ return value;
+}
+
base::DictionaryValue* SimpleCollapsedLayoutToValue(
const sync_pb::SimpleCollapsedLayout& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
+ SET(app_icon, SyncedNotificationImageToValue);
+ SET_REP(profile_image, SyncedNotificationProfileImageToValue);
SET_STR(heading);
SET_STR(description);
SET_STR(annotation);
SET_REP(media, MediaToValue);
- SET_REP(profile_image, SyncedNotificationProfileImageToValue);
- SET(app_icon, SyncedNotificationImageToValue);
return value;
}
@@ -278,13 +312,25 @@ base::DictionaryValue* CollapsedInfoToValue(
const sync_pb::CollapsedInfo& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET(simple_collapsed_layout, SimpleCollapsedLayoutToValue);
+ SET_INT64(creation_timestamp_usec);
+ SET(default_destination, SyncedNotificationDestiationToValue);
+ SET_REP(target, TargetToValue);
+ return value;
+}
+
+base::DictionaryValue* SyncedNotificationToValue(
+ const sync_pb::SyncedNotification& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(type);
+ SET_STR(external_id);
+ // TODO(petewil) Add SyncedNotificationCreator here if we ever need it.
return value;
}
base::DictionaryValue* RenderInfoToValue(
const sync_pb::SyncedNotificationRenderInfo& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
- // TODO(petewil): Add the expanded info values too.
+ // TODO(petewil): Add the expanded info values once we start using them.
SET(collapsed_info, CollapsedInfoToValue);
return value;
}
@@ -293,10 +339,25 @@ base::DictionaryValue* CoalescedNotificationToValue(
const sync_pb::CoalescedSyncedNotification& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_STR(key);
+ SET_STR(app_id);
+ SET_REP(notification, SyncedNotificationToValue);
+ SET(render_info, RenderInfoToValue);
SET_INT32(read_state);
SET_INT64(creation_time_msec);
SET_INT32(priority);
- SET(render_info, RenderInfoToValue);
+ return value;
+}
+
+base::DictionaryValue* AppListSpecificsToValue(
+ const sync_pb::AppListSpecifics& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(item_id);
+ SET_ENUM(item_type, GetAppListItemTypeString);
+ SET_STR(item_name);
+ SET_STR(parent_id);
+ SET_STR(page_ordinal);
+ SET_STR(item_ordinal);
+
return value;
}
@@ -364,6 +425,14 @@ base::DictionaryValue* AutofillProfileSpecificsToValue(
return value;
}
+base::DictionaryValue* MetaInfoToValue(
+ const sync_pb::MetaInfo& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(key);
+ SET_STR(value);
+ return value;
+}
+
base::DictionaryValue* BookmarkSpecificsToValue(
const sync_pb::BookmarkSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -372,6 +441,7 @@ base::DictionaryValue* BookmarkSpecificsToValue(
SET_STR(title);
SET_INT64(creation_time_us);
SET_STR(icon_url);
+ SET_REP(meta_info, &MetaInfoToValue);
return value;
}
@@ -515,6 +585,8 @@ base::DictionaryValue* NigoriSpecificsToValue(
SET_BOOL(encrypt_apps);
SET_BOOL(encrypt_search_engines);
SET_BOOL(encrypt_dictionary);
+ SET_BOOL(encrypt_articles);
+ SET_BOOL(encrypt_app_list);
SET_BOOL(encrypt_everything);
SET_BOOL(sync_tab_favicons);
SET_ENUM(passphrase_type, PassphraseTypeString);
@@ -524,6 +596,22 @@ base::DictionaryValue* NigoriSpecificsToValue(
return value;
}
+base::DictionaryValue* ArticlePageToValue(
+ const sync_pb::ArticlePage& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(url);
+ return value;
+}
+
+base::DictionaryValue* ArticleSpecificsToValue(
+ const sync_pb::ArticleSpecifics& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(entry_id);
+ SET_STR(title);
+ SET_REP(pages, ArticlePageToValue);
+ return value;
+}
+
base::DictionaryValue* PasswordSpecificsToValue(
const sync_pb::PasswordSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -621,8 +709,10 @@ base::DictionaryValue* EntitySpecificsToValue(
const sync_pb::EntitySpecifics& specifics) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_FIELD(app, AppSpecificsToValue);
+ SET_FIELD(app_list, AppListSpecificsToValue);
SET_FIELD(app_notification, AppNotificationToValue);
SET_FIELD(app_setting, AppSettingSpecificsToValue);
+ SET_FIELD(article, ArticleSpecificsToValue);
SET_FIELD(autofill, AutofillSpecificsToValue);
SET_FIELD(autofill_profile, AutofillProfileSpecificsToValue);
SET_FIELD(bookmark, BookmarkSpecificsToValue);
diff --git a/chromium/sync/protocol/proto_value_conversions.h b/chromium/sync/protocol/proto_value_conversions.h
index f5306d791a7..9bf45e3267e 100644
--- a/chromium/sync/protocol/proto_value_conversions.h
+++ b/chromium/sync/protocol/proto_value_conversions.h
@@ -14,10 +14,12 @@ class DictionaryValue;
}
namespace sync_pb {
+class AppListSpecifics;
class AppNotification;
class AppNotificationSettings;
class AppSettingSpecifics;
class AppSpecifics;
+class ArticleSpecifics;
class AutofillProfileSpecifics;
class AutofillSpecifics;
class BookmarkSpecifics;
@@ -58,11 +60,15 @@ class SessionTab;
class SessionWindow;
class SimpleCollapsedLayout;
class SyncCycleCompletedEventInfo;
+class SyncedNotification;
+class SyncedNotificationAction;
+class SyncedNotificationDestination;
class SyncedNotificationImage;
class SyncedNotificationProfileImage;
class SyncedNotificationRenderInfo;
class SyncedNotificationSpecifics;
class TabNavigation;
+class Target;
class ThemeSpecifics;
class TimeRangeDirective;
class TypedUrlSpecifics;
@@ -86,6 +92,10 @@ namespace syncer {
SYNC_EXPORT_PRIVATE base::DictionaryValue* EncryptedDataToValue(
const sync_pb::EncryptedData& encrypted_data);
+// Sub-protocol of AppListSpecifics.
+SYNC_EXPORT_PRIVATE base::DictionaryValue* AppListSpecificsToValue(
+ const sync_pb::AppListSpecifics& proto);
+
// Sub-protocol of AppSpecifics.
SYNC_EXPORT_PRIVATE base::DictionaryValue* AppSettingsToValue(
const sync_pb::AppNotificationSettings& app_notification_settings);
@@ -141,6 +151,28 @@ base::DictionaryValue* RenderInfoToValue(
base::DictionaryValue* CoalescedNotificationToValue(
const sync_pb::CoalescedSyncedNotification& proto);
+base::DictionaryValue* SyncedNotificationActionToValue(
+ const sync_pb::SyncedNotificationAction& action);
+
+base::DictionaryValue* SyncedNotificationDestinationToValue(
+ const sync_pb::SyncedNotificationDestination& destination);
+
+base::DictionaryValue* SyncedNotificationToValue(
+ const sync_pb::SyncedNotification& notification);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionSpecificsToValue(
+ const sync_pb::SessionSpecifics& session_specifics);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationImageToValue(
+ const sync_pb::SyncedNotificationImage& image);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue*
+ SyncedNotificationProfileImageToValue(
+ const sync_pb::SyncedNotificationProfileImage& image);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue* TargetToValue(
+ const sync_pb::Target& target);
+
// Main *SpecificsToValue functions.
SYNC_EXPORT_PRIVATE base::DictionaryValue* AppNotificationToValue(
@@ -152,6 +184,9 @@ base::DictionaryValue* AppSettingSpecificsToValue(
SYNC_EXPORT_PRIVATE base::DictionaryValue* AppSpecificsToValue(
const sync_pb::AppSpecifics& app_specifics);
+SYNC_EXPORT_PRIVATE base::DictionaryValue* ArticleSpecificsToValue(
+ const sync_pb::ArticleSpecifics& article_specifics);
+
SYNC_EXPORT_PRIVATE base::DictionaryValue* AutofillSpecificsToValue(
const sync_pb::AutofillSpecifics& autofill_specifics);
@@ -214,16 +249,6 @@ SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationSpecificsToValue(
SYNC_EXPORT_PRIVATE base::DictionaryValue* SearchEngineSpecificsToValue(
const sync_pb::SearchEngineSpecifics& search_engine_specifics);
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionSpecificsToValue(
- const sync_pb::SessionSpecifics& session_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationImageToValue(
- const sync_pb::SyncedNotificationImage& image);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue*
- SyncedNotificationProfileImageToValue(
- const sync_pb::SyncedNotificationProfileImage& image);
-
SYNC_EXPORT_PRIVATE base::DictionaryValue* ThemeSpecificsToValue(
const sync_pb::ThemeSpecifics& theme_specifics);
diff --git a/chromium/sync/protocol/proto_value_conversions_unittest.cc b/chromium/sync/protocol/proto_value_conversions_unittest.cc
index 718a78a60b2..1366dd58900 100644
--- a/chromium/sync/protocol/proto_value_conversions_unittest.cc
+++ b/chromium/sync/protocol/proto_value_conversions_unittest.cc
@@ -53,7 +53,7 @@ TEST_F(ProtoValueConversionsTest, ProtoChangeCheck) {
// If this number changes, that means we added or removed a data
// type. Don't forget to add a unit test for {New
// type}SpecificsToValue below.
- EXPECT_EQ(28, MODEL_TYPE_COUNT);
+ EXPECT_EQ(30, MODEL_TYPE_COUNT);
// We'd also like to check if we changed any field in our messages.
// However, that's hard to do: sizeof could work, but it's
@@ -93,6 +93,10 @@ TEST_F(ProtoValueConversionsTest, PasswordSpecificsData) {
EXPECT_EQ("<redacted>", password_value);
}
+TEST_F(ProtoValueConversionsTest, AppListSpecificsToValue) {
+ TestSpecificsToValue(AppListSpecificsToValue);
+}
+
TEST_F(ProtoValueConversionsTest, AppNotificationToValue) {
TestSpecificsToValue(AppNotificationToValue);
}
@@ -133,6 +137,13 @@ TEST_F(ProtoValueConversionsTest, BookmarkSpecificsData) {
sync_pb::BookmarkSpecifics specifics;
specifics.set_creation_time_us(creation_time.ToInternalValue());
specifics.set_icon_url(icon_url);
+ sync_pb::MetaInfo* meta_1 = specifics.add_meta_info();
+ meta_1->set_key("key1");
+ meta_1->set_value("value1");
+ sync_pb::MetaInfo* meta_2 = specifics.add_meta_info();
+ meta_2->set_key("key2");
+ meta_2->set_value("value2");
+
scoped_ptr<base::DictionaryValue> value(BookmarkSpecificsToValue(specifics));
EXPECT_FALSE(value->empty());
std::string encoded_time;
@@ -141,6 +152,22 @@ TEST_F(ProtoValueConversionsTest, BookmarkSpecificsData) {
std::string encoded_icon_url;
EXPECT_TRUE(value->GetString("icon_url", &encoded_icon_url));
EXPECT_EQ(icon_url, encoded_icon_url);
+ base::ListValue* meta_info_list;
+ ASSERT_TRUE(value->GetList("meta_info", &meta_info_list));
+ EXPECT_EQ(2u, meta_info_list->GetSize());
+ base::DictionaryValue* meta_info;
+ std::string meta_key;
+ std::string meta_value;
+ ASSERT_TRUE(meta_info_list->GetDictionary(0, &meta_info));
+ EXPECT_TRUE(meta_info->GetString("key", &meta_key));
+ EXPECT_TRUE(meta_info->GetString("value", &meta_value));
+ EXPECT_EQ("key1", meta_key);
+ EXPECT_EQ("value1", meta_value);
+ ASSERT_TRUE(meta_info_list->GetDictionary(1, &meta_info));
+ EXPECT_TRUE(meta_info->GetString("key", &meta_key));
+ EXPECT_TRUE(meta_info->GetString("value", &meta_value));
+ EXPECT_EQ("key2", meta_key);
+ EXPECT_EQ("value2", meta_value);
}
TEST_F(ProtoValueConversionsTest, PriorityPreferenceSpecificsToValue) {
@@ -219,6 +246,10 @@ TEST_F(ProtoValueConversionsTest, DictionarySpecificsToValue) {
TestSpecificsToValue(DictionarySpecificsToValue);
}
+TEST_F(ProtoValueConversionsTest, ArticleSpecificsToValue) {
+ TestSpecificsToValue(ArticleSpecificsToValue);
+}
+
// TODO(akalin): Figure out how to better test EntitySpecificsToValue.
TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
@@ -228,8 +259,10 @@ TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
#define SET_FIELD(key) (void)specifics.mutable_##key()
SET_FIELD(app);
+ SET_FIELD(app_list);
SET_FIELD(app_notification);
SET_FIELD(app_setting);
+ SET_FIELD(article);
SET_FIELD(autofill);
SET_FIELD(autofill_profile);
SET_FIELD(bookmark);
diff --git a/chromium/sync/protocol/sync.proto b/chromium/sync/protocol/sync.proto
index 445fa413840..ecaceefbf6a 100644
--- a/chromium/sync/protocol/sync.proto
+++ b/chromium/sync/protocol/sync.proto
@@ -14,9 +14,11 @@ option retain_unknown_fields = true;
package sync_pb;
+import "app_list_specifics.proto";
import "app_notification_specifics.proto";
import "app_setting_specifics.proto";
import "app_specifics.proto";
+import "article_specifics.proto";
import "autofill_specifics.proto";
import "bookmark_specifics.proto";
import "client_commands.proto";
@@ -118,6 +120,8 @@ message EntitySpecifics {
optional FaviconImageSpecifics favicon_image = 182019;
optional ManagedUserSettingSpecifics managed_user_setting = 186662;
optional ManagedUserSpecifics managed_user = 194582;
+ optional ArticleSpecifics article = 223759;
+ optional AppListSpecifics app_list = 229170;
}
message SyncEntity {
diff --git a/chromium/sync/protocol/unique_position.proto b/chromium/sync/protocol/unique_position.proto
index 992c1342d69..4864f27ae0e 100644
--- a/chromium/sync/protocol/unique_position.proto
+++ b/chromium/sync/protocol/unique_position.proto
@@ -24,7 +24,33 @@ package sync_pb;
// Items under the same parent are positioned relative to each other by a
// lexicographic comparison of their UniquePosition values.
message UniquePosition {
+ // History:
+ //
+ // Unique positions were first introduced in M28. This change was rolled out
+ // in such a way that it would try to maintain backwards compatibilty with
+ // clients that understood only the old int64-based positions.
+ //
+ // At first, clients supported only the 'value' field. This version never
+ // made it to stable. We later added support for the 'compressed_value'
+ // field, and clients would populate either one or the other.
+ //
+ // In M30, we added the custom_compressed_v1 representation. This
+ // representation was better than the previous implementations in almost every
+ // way. However, we could not use it right away, since older clients would
+ // not understand it. We decided to write both the old-style ('value' or
+ // 'custom_compressed') representation and the 'custom_compressed_v1'
+ // repersentations to every protobuf during the transition period. Protobufs
+ // written during this transition period would be readable by clients who
+ // understand at least one of the two formats.
+ //
+ // In M33, we dropped support for writing the backwards-compatibility fields.
+ // Protobufs written by this version or later are not be intelligible by
+ // clients with version M29 or older. Those clients will end up making use of
+ // the old int64 position fallback mechanism.
+
// The uncompressed string of bytes representing the position.
+ //
+ // Deprecated. See history note above.
optional bytes value = 1;
// The client may choose to write a compressed position to this field instead
@@ -33,6 +59,8 @@ message UniquePosition {
// with gzip and stored in the compressed_value field. The position's
// uncompressed length must be specified and written to the
// uncompressed_length field.
+ //
+ // Deprecated. See history note above.
optional bytes compressed_value = 2;
optional uint64 uncompressed_length = 3;
@@ -44,7 +72,7 @@ message UniquePosition {
//
// The compression scheme is implemented and documented in
// sync/internal_api/base/unique_position.cc.
- //
+ //
// As of M30, this is the preferred encoding. Newer clients may continue to
// populate the 'value' and 'compressed_value' fields to ensure backwards
// compatibility, but they will always try to read from this field first.
diff --git a/chromium/sync/sessions/data_type_tracker.cc b/chromium/sync/sessions/data_type_tracker.cc
index a061679839f..b0b464923ce 100644
--- a/chromium/sync/sessions/data_type_tracker.cc
+++ b/chromium/sync/sessions/data_type_tracker.cc
@@ -5,6 +5,8 @@
#include "sync/sessions/data_type_tracker.h"
#include "base/logging.h"
+#include "sync/internal_api/public/base/invalidation.h"
+#include "sync/notifier/single_object_invalidation_set.h"
#include "sync/sessions/nudge_tracker.h"
namespace syncer {
@@ -27,13 +29,20 @@ void DataTypeTracker::RecordLocalRefreshRequest() {
local_refresh_request_count_++;
}
-void DataTypeTracker::RecordRemoteInvalidation(
- const std::string& payload) {
- pending_payloads_.push_back(payload);
- if (pending_payloads_.size() > payload_buffer_size_) {
- // Drop the oldest payload if we've overflowed.
- pending_payloads_.pop_front();
- local_payload_overflow_ = true;
+void DataTypeTracker::RecordRemoteInvalidations(
+ const SingleObjectInvalidationSet& invalidations) {
+ for (SingleObjectInvalidationSet::const_iterator it =
+ invalidations.begin(); it != invalidations.end(); ++it) {
+ if (it->is_unknown_version()) {
+ server_payload_overflow_ = true;
+ } else {
+ pending_payloads_.push_back(it->payload());
+ if (pending_payloads_.size() > payload_buffer_size_) {
+ // Drop the oldest payload if we've overflowed.
+ pending_payloads_.pop_front();
+ local_payload_overflow_ = true;
+ }
+ }
}
}
diff --git a/chromium/sync/sessions/data_type_tracker.h b/chromium/sync/sessions/data_type_tracker.h
index 30bc3b6dbbc..6ecaa0eb7c8 100644
--- a/chromium/sync/sessions/data_type_tracker.h
+++ b/chromium/sync/sessions/data_type_tracker.h
@@ -14,6 +14,10 @@
#include "sync/protocol/sync.pb.h"
namespace syncer {
+
+class Invalidation;
+class SingleObjectInvalidationSet;
+
namespace sessions {
typedef std::deque<std::string> PayloadList;
@@ -32,8 +36,9 @@ class DataTypeTracker {
// Tracks that a local refresh request has been made for this type.
void RecordLocalRefreshRequest();
- // Tracks that we received an invalidation notification for this type.
- void RecordRemoteInvalidation(const std::string& payload);
+ // Tracks that we received invalidation notifications for this type.
+ void RecordRemoteInvalidations(
+ const SingleObjectInvalidationSet& invalidations);
// Records that a sync cycle has been performed successfully.
// Generally, this means that all local changes have been committed and all
diff --git a/chromium/sync/sessions/debug_info_getter.h b/chromium/sync/sessions/debug_info_getter.h
index c1536ba50c8..7efe0cb649f 100644
--- a/chromium/sync/sessions/debug_info_getter.h
+++ b/chromium/sync/sessions/debug_info_getter.h
@@ -15,9 +15,13 @@ namespace sessions {
// to communicate the debug info data to the syncer.
class SYNC_EXPORT_PRIVATE DebugInfoGetter {
public:
- // Gets the client debug info and clears the state so the same data is not
- // sent again.
- virtual void GetAndClearDebugInfo(sync_pb::DebugInfo* debug_info) = 0;
+ // Gets the client debug info. Be sure to clear the info to ensure the data
+ // isn't sent multiple times.
+ virtual void GetDebugInfo(sync_pb::DebugInfo* debug_info) = 0;
+
+ // Clears the debug info.
+ virtual void ClearDebugInfo() = 0;
+
virtual ~DebugInfoGetter() {}
};
@@ -25,4 +29,3 @@ class SYNC_EXPORT_PRIVATE DebugInfoGetter {
} // namespace syncer
#endif // SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
-
diff --git a/chromium/sync/sessions/nudge_tracker.cc b/chromium/sync/sessions/nudge_tracker.cc
index 8ec8970ef36..94bef81a350 100644
--- a/chromium/sync/sessions/nudge_tracker.cc
+++ b/chromium/sync/sessions/nudge_tracker.cc
@@ -40,6 +40,8 @@ bool NudgeTracker::IsSyncRequired() const {
}
bool NudgeTracker::IsGetUpdatesRequired() const {
+ if (invalidations_out_of_sync_)
+ return true;
for (TypeTrackerMap::const_iterator it = type_trackers_.begin();
it != type_trackers_.end(); ++it) {
if (it->second.IsGetUpdatesRequired()) {
@@ -96,16 +98,17 @@ void NudgeTracker::RecordRemoteInvalidation(
const ObjectIdInvalidationMap& invalidation_map) {
updates_source_ = sync_pb::GetUpdatesCallerInfo::NOTIFICATION;
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
ModelType type;
- if (!ObjectIdToRealModelType(it->first, &type)) {
+ if (!ObjectIdToRealModelType(*it, &type)) {
NOTREACHED()
- << "Object ID " << ObjectIdToString(it->first)
+ << "Object ID " << ObjectIdToString(*it)
<< " does not map to valid model type";
}
DCHECK(type_trackers_.find(type) != type_trackers_.end());
- type_trackers_[type].RecordRemoteInvalidation(it->second.payload);
+ type_trackers_[type].RecordRemoteInvalidations(
+ invalidation_map.ForObject(*it));
}
}
diff --git a/chromium/sync/sessions/nudge_tracker.h b/chromium/sync/sessions/nudge_tracker.h
index aa4414cb7ae..fcd01503410 100644
--- a/chromium/sync/sessions/nudge_tracker.h
+++ b/chromium/sync/sessions/nudge_tracker.h
@@ -13,11 +13,13 @@
#include "base/compiler_specific.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/model_type.h"
-#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/protocol/sync.pb.h"
#include "sync/sessions/data_type_tracker.h"
namespace syncer {
+
+class ObjectIdInvalidationMap;
+
namespace sessions {
class SYNC_EXPORT_PRIVATE NudgeTracker {
diff --git a/chromium/sync/sessions/nudge_tracker_unittest.cc b/chromium/sync/sessions/nudge_tracker_unittest.cc
index ea7f4c74765..450d17fe3da 100644
--- a/chromium/sync/sessions/nudge_tracker_unittest.cc
+++ b/chromium/sync/sessions/nudge_tracker_unittest.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "sync/internal_api/public/base/model_type_test_util.h"
+#include "sync/notifier/invalidation_util.h"
+#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/sessions/nudge_tracker.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -26,97 +28,102 @@ namespace sessions {
class NudgeTrackerTest : public ::testing::Test {
public:
+ NudgeTrackerTest() {
+ SetInvalidationsInSync();
+ }
+
static size_t GetHintBufferSize() {
// Assumes that no test has adjusted this size.
return NudgeTracker::kDefaultMaxPayloadsPerType;
}
- bool InvalidationsOutOfSync(const NudgeTracker& nudge_tracker) {
+ bool InvalidationsOutOfSync() const {
// We don't currently track invalidations out of sync on a per-type basis.
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
return gu_trigger.invalidations_out_of_sync();
}
- int ProtoLocallyModifiedCount(const NudgeTracker& nudge_tracker,
- ModelType type) {
+ int ProtoLocallyModifiedCount(ModelType type) const {
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(type, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(type, &gu_trigger);
return gu_trigger.local_modification_nudges();
}
- int ProtoRefreshRequestedCount(const NudgeTracker& nudge_tracker,
- ModelType type) {
+ int ProtoRefreshRequestedCount(ModelType type) const {
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(type, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(type, &gu_trigger);
return gu_trigger.datatype_refresh_nudges();
}
+
+ void SetInvalidationsInSync() {
+ nudge_tracker_.OnInvalidationsEnabled();
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ }
+
+ protected:
+ NudgeTracker nudge_tracker_;
};
// Exercise an empty NudgeTracker.
// Use with valgrind to detect uninitialized members.
TEST_F(NudgeTrackerTest, EmptyNudgeTracker) {
- NudgeTracker nudge_tracker;
-
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ // Now we're at the normal, "idle" state.
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::UNKNOWN,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::UNKNOWN,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
}
// Verify that nudges override each other based on a priority order.
// LOCAL < DATATYPE_REFRESH < NOTIFICATION
TEST_F(NudgeTrackerTest, SourcePriorities) {
- NudgeTracker nudge_tracker;
-
// Track a local nudge.
- nudge_tracker.RecordLocalChange(ModelTypeSet(BOOKMARKS));
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::LOCAL,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
// A refresh request will override it.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(TYPED_URLS));
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(TYPED_URLS));
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::DATATYPE_REFRESH,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
// Another local nudge will not be enough to change it.
- nudge_tracker.RecordLocalChange(ModelTypeSet(BOOKMARKS));
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::DATATYPE_REFRESH,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
// An invalidation will override the refresh request source.
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(PREFERENCES, 1, "hint");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
// Neither local nudges nor refresh requests will override it.
- nudge_tracker.RecordLocalChange(ModelTypeSet(BOOKMARKS));
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- nudge_tracker.updates_source());
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(TYPED_URLS));
+ nudge_tracker_.updates_source());
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(TYPED_URLS));
EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- nudge_tracker.updates_source());
+ nudge_tracker_.updates_source());
}
TEST_F(NudgeTrackerTest, HintCoalescing) {
- NudgeTracker nudge_tracker;
-
// Easy case: record one hint.
{
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(BOOKMARKS, 1, "bm_hint_1");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
ASSERT_EQ(1, gu_trigger.notification_hint_size());
EXPECT_EQ("bm_hint_1", gu_trigger.notification_hint(0));
EXPECT_FALSE(gu_trigger.client_dropped_hints());
@@ -126,10 +133,10 @@ TEST_F(NudgeTrackerTest, HintCoalescing) {
{
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(BOOKMARKS, 2, "bm_hint_2");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
ASSERT_EQ(2, gu_trigger.notification_hint_size());
// Expect the most hint recent is last in the list.
@@ -142,11 +149,11 @@ TEST_F(NudgeTrackerTest, HintCoalescing) {
{
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(PASSWORDS, 1, "pw_hint_1");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
// Re-verify the bookmarks to make sure they're unaffected.
sync_pb::GetUpdateTriggers bm_gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &bm_gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &bm_gu_trigger);
ASSERT_EQ(2, bm_gu_trigger.notification_hint_size());
EXPECT_EQ("bm_hint_1", bm_gu_trigger.notification_hint(0));
EXPECT_EQ("bm_hint_2",
@@ -155,7 +162,7 @@ TEST_F(NudgeTrackerTest, HintCoalescing) {
// Verify the new type, too.
sync_pb::GetUpdateTriggers pw_gu_trigger;
- nudge_tracker.FillProtoMessage(PASSWORDS, &pw_gu_trigger);
+ nudge_tracker_.FillProtoMessage(PASSWORDS, &pw_gu_trigger);
ASSERT_EQ(1, pw_gu_trigger.notification_hint_size());
EXPECT_EQ("pw_hint_1", pw_gu_trigger.notification_hint(0));
EXPECT_FALSE(pw_gu_trigger.client_dropped_hints());
@@ -163,16 +170,15 @@ TEST_F(NudgeTrackerTest, HintCoalescing) {
}
TEST_F(NudgeTrackerTest, DropHintsLocally) {
- NudgeTracker nudge_tracker;
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(BOOKMARKS, 1, "hint");
for (size_t i = 0; i < GetHintBufferSize(); ++i) {
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
}
{
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
EXPECT_EQ(GetHintBufferSize(),
static_cast<size_t>(gu_trigger.notification_hint_size()));
EXPECT_FALSE(gu_trigger.client_dropped_hints());
@@ -181,11 +187,11 @@ TEST_F(NudgeTrackerTest, DropHintsLocally) {
// Force an overflow.
ObjectIdInvalidationMap invalidation_map2 =
BuildInvalidationMap(BOOKMARKS, 1000, "new_hint");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map2);
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map2);
{
sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker.FillProtoMessage(BOOKMARKS, &gu_trigger);
+ nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
EXPECT_EQ(GetHintBufferSize(),
static_cast<size_t>(gu_trigger.notification_hint_size()));
EXPECT_TRUE(gu_trigger.client_dropped_hints());
@@ -203,225 +209,218 @@ TEST_F(NudgeTrackerTest, DropHintsLocally) {
// Checks the behaviour of the invalidations-out-of-sync flag.
TEST_F(NudgeTrackerTest, EnableDisableInvalidations) {
- NudgeTracker nudge_tracker;
-
- // By default, assume we're out of sync with the invalidation server.
- EXPECT_TRUE(InvalidationsOutOfSync(nudge_tracker));
+ // Start with invalidations offline.
+ nudge_tracker_.OnInvalidationsDisabled();
+ EXPECT_TRUE(InvalidationsOutOfSync());
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
// Simply enabling invalidations does not bring us back into sync.
- nudge_tracker.OnInvalidationsEnabled();
- EXPECT_TRUE(InvalidationsOutOfSync(nudge_tracker));
+ nudge_tracker_.OnInvalidationsEnabled();
+ EXPECT_TRUE(InvalidationsOutOfSync());
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
// We must successfully complete a sync cycle while invalidations are enabled
// to be sure that we're in sync.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(InvalidationsOutOfSync(nudge_tracker));
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(InvalidationsOutOfSync());
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// If the invalidator malfunctions, we go become unsynced again.
- nudge_tracker.OnInvalidationsDisabled();
- EXPECT_TRUE(InvalidationsOutOfSync(nudge_tracker));
+ nudge_tracker_.OnInvalidationsDisabled();
+ EXPECT_TRUE(InvalidationsOutOfSync());
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
// A sync cycle while invalidations are disabled won't reset the flag.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_TRUE(InvalidationsOutOfSync(nudge_tracker));
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_TRUE(InvalidationsOutOfSync());
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
// Nor will the re-enabling of invalidations be sufficient, even now that
// we've had a successful sync cycle.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_TRUE(InvalidationsOutOfSync(nudge_tracker));
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_TRUE(InvalidationsOutOfSync());
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
}
// Tests that locally modified types are correctly written out to the
// GetUpdateTriggers proto.
TEST_F(NudgeTrackerTest, WriteLocallyModifiedTypesToProto) {
- NudgeTracker nudge_tracker;
-
// Should not be locally modified by default.
- EXPECT_EQ(0, ProtoLocallyModifiedCount(nudge_tracker, PREFERENCES));
+ EXPECT_EQ(0, ProtoLocallyModifiedCount(PREFERENCES));
// Record a local bookmark change. Verify it was registered correctly.
- nudge_tracker.RecordLocalChange(ModelTypeSet(PREFERENCES));
- EXPECT_EQ(1, ProtoLocallyModifiedCount(nudge_tracker, PREFERENCES));
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(PREFERENCES));
+ EXPECT_EQ(1, ProtoLocallyModifiedCount(PREFERENCES));
// Record a successful sync cycle. Verify the count is cleared.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_EQ(0, ProtoLocallyModifiedCount(nudge_tracker, PREFERENCES));
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_EQ(0, ProtoLocallyModifiedCount(PREFERENCES));
}
// Tests that refresh requested types are correctly written out to the
// GetUpdateTriggers proto.
TEST_F(NudgeTrackerTest, WriteRefreshRequestedTypesToProto) {
- NudgeTracker nudge_tracker;
-
// There should be no refresh requested by default.
- EXPECT_EQ(0, ProtoRefreshRequestedCount(nudge_tracker, SESSIONS));
+ EXPECT_EQ(0, ProtoRefreshRequestedCount(SESSIONS));
// Record a local refresh request. Verify it was registered correctly.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_EQ(1, ProtoRefreshRequestedCount(nudge_tracker, SESSIONS));
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
+ EXPECT_EQ(1, ProtoRefreshRequestedCount(SESSIONS));
// Record a successful sync cycle. Verify the count is cleared.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_EQ(0, ProtoRefreshRequestedCount(nudge_tracker, SESSIONS));
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_EQ(0, ProtoRefreshRequestedCount(SESSIONS));
}
// Basic tests for the IsSyncRequired() flag.
TEST_F(NudgeTrackerTest, IsSyncRequired) {
- NudgeTracker nudge_tracker;
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
// Local changes.
- nudge_tracker.RecordLocalChange(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsSyncRequired());
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
// Refresh requests.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsSyncRequired());
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
// Invalidations.
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(PREFERENCES, 1, "hint");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
- EXPECT_TRUE(nudge_tracker.IsSyncRequired());
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
+ EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
}
// Basic tests for the IsGetUpdatesRequired() flag.
TEST_F(NudgeTrackerTest, IsGetUpdatesRequired) {
- NudgeTracker nudge_tracker;
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// Local changes.
- nudge_tracker.RecordLocalChange(ModelTypeSet(SESSIONS));
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(SESSIONS));
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// Refresh requests.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsGetUpdatesRequired());
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// Invalidations.
ObjectIdInvalidationMap invalidation_map =
BuildInvalidationMap(PREFERENCES, 1, "hint");
- nudge_tracker.RecordRemoteInvalidation(invalidation_map);
- EXPECT_TRUE(nudge_tracker.IsGetUpdatesRequired());
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
}
// Test IsSyncRequired() responds correctly to data type throttling.
TEST_F(NudgeTrackerTest, IsSyncRequired_Throttling) {
- NudgeTracker nudge_tracker;
const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
const base::TimeDelta throttle_length = base::TimeDelta::FromMinutes(10);
const base::TimeTicks t1 = t0 + throttle_length;
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
// A local change to sessions enables the flag.
- nudge_tracker.RecordLocalChange(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.RecordLocalChange(ModelTypeSet(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
// But the throttling of sessions unsets it.
- nudge_tracker.SetTypesThrottledUntil(ModelTypeSet(SESSIONS),
+ nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS),
throttle_length,
t0);
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
// A refresh request for bookmarks means we have reason to sync again.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(BOOKMARKS));
- EXPECT_TRUE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(BOOKMARKS));
+ EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
// A successful sync cycle means we took care of bookmarks.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
// But we still haven't dealt with sessions. We'll need to remember
// that sessions are out of sync and re-enable the flag when their
// throttling interval expires.
- nudge_tracker.UpdateTypeThrottlingState(t1);
- EXPECT_FALSE(nudge_tracker.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsSyncRequired());
+ nudge_tracker_.UpdateTypeThrottlingState(t1);
+ EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
}
// Test IsGetUpdatesRequired() responds correctly to data type throttling.
TEST_F(NudgeTrackerTest, IsGetUpdatesRequired_Throttling) {
- NudgeTracker nudge_tracker;
const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
const base::TimeDelta throttle_length = base::TimeDelta::FromMinutes(10);
const base::TimeTicks t1 = t0 + throttle_length;
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// A refresh request to sessions enables the flag.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
// But the throttling of sessions unsets it.
- nudge_tracker.SetTypesThrottledUntil(ModelTypeSet(SESSIONS),
+ nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS),
throttle_length,
t0);
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// A refresh request for bookmarks means we have reason to sync again.
- nudge_tracker.RecordLocalRefreshRequest(ModelTypeSet(BOOKMARKS));
- EXPECT_TRUE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(BOOKMARKS));
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
// A successful sync cycle means we took care of bookmarks.
- nudge_tracker.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.RecordSuccessfulSyncCycle();
+ EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
// But we still haven't dealt with sessions. We'll need to remember
// that sessions are out of sync and re-enable the flag when their
// throttling interval expires.
- nudge_tracker.UpdateTypeThrottlingState(t1);
- EXPECT_FALSE(nudge_tracker.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsGetUpdatesRequired());
+ nudge_tracker_.UpdateTypeThrottlingState(t1);
+ EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
}
// Tests throttling-related getter functions when no types are throttled.
TEST_F(NudgeTrackerTest, NoTypesThrottled) {
- NudgeTracker nudge_tracker;
-
- EXPECT_FALSE(nudge_tracker.IsAnyTypeThrottled());
- EXPECT_FALSE(nudge_tracker.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker.GetThrottledTypes().Empty());
+ EXPECT_FALSE(nudge_tracker_.IsAnyTypeThrottled());
+ EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.GetThrottledTypes().Empty());
}
// Tests throttling-related getter functions when some types are throttled.
TEST_F(NudgeTrackerTest, ThrottleAndUnthrottle) {
- NudgeTracker nudge_tracker;
const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
const base::TimeDelta throttle_length = base::TimeDelta::FromMinutes(10);
const base::TimeTicks t1 = t0 + throttle_length;
- nudge_tracker.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, PREFERENCES),
+ nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, PREFERENCES),
throttle_length,
t0);
- EXPECT_TRUE(nudge_tracker.IsAnyTypeThrottled());
- EXPECT_TRUE(nudge_tracker.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker.IsTypeThrottled(PREFERENCES));
- EXPECT_FALSE(nudge_tracker.GetThrottledTypes().Empty());
- EXPECT_EQ(throttle_length, nudge_tracker.GetTimeUntilNextUnthrottle(t0));
+ EXPECT_TRUE(nudge_tracker_.IsAnyTypeThrottled());
+ EXPECT_TRUE(nudge_tracker_.IsTypeThrottled(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.IsTypeThrottled(PREFERENCES));
+ EXPECT_FALSE(nudge_tracker_.GetThrottledTypes().Empty());
+ EXPECT_EQ(throttle_length, nudge_tracker_.GetTimeUntilNextUnthrottle(t0));
- nudge_tracker.UpdateTypeThrottlingState(t1);
+ nudge_tracker_.UpdateTypeThrottlingState(t1);
- EXPECT_FALSE(nudge_tracker.IsAnyTypeThrottled());
- EXPECT_FALSE(nudge_tracker.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker.GetThrottledTypes().Empty());
+ EXPECT_FALSE(nudge_tracker_.IsAnyTypeThrottled());
+ EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
+ EXPECT_TRUE(nudge_tracker_.GetThrottledTypes().Empty());
}
TEST_F(NudgeTrackerTest, OverlappingThrottleIntervals) {
- NudgeTracker nudge_tracker;
const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
const base::TimeDelta throttle1_length = base::TimeDelta::FromMinutes(10);
const base::TimeDelta throttle2_length = base::TimeDelta::FromMinutes(20);
@@ -429,39 +428,39 @@ TEST_F(NudgeTrackerTest, OverlappingThrottleIntervals) {
const base::TimeTicks t2 = t0 + throttle2_length;
// Setup the longer of two intervals.
- nudge_tracker.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, PREFERENCES),
+ nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, PREFERENCES),
throttle2_length,
t0);
EXPECT_TRUE(ModelTypeSetEquals(
ModelTypeSet(SESSIONS, PREFERENCES),
- nudge_tracker.GetThrottledTypes()));
+ nudge_tracker_.GetThrottledTypes()));
EXPECT_EQ(throttle2_length,
- nudge_tracker.GetTimeUntilNextUnthrottle(t0));
+ nudge_tracker_.GetTimeUntilNextUnthrottle(t0));
// Setup the shorter interval.
- nudge_tracker.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, BOOKMARKS),
+ nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, BOOKMARKS),
throttle1_length,
t0);
EXPECT_TRUE(ModelTypeSetEquals(
ModelTypeSet(SESSIONS, PREFERENCES, BOOKMARKS),
- nudge_tracker.GetThrottledTypes()));
+ nudge_tracker_.GetThrottledTypes()));
EXPECT_EQ(throttle1_length,
- nudge_tracker.GetTimeUntilNextUnthrottle(t0));
+ nudge_tracker_.GetTimeUntilNextUnthrottle(t0));
// Expire the first interval.
- nudge_tracker.UpdateTypeThrottlingState(t1);
+ nudge_tracker_.UpdateTypeThrottlingState(t1);
// SESSIONS appeared in both intervals. We expect it will be throttled for
// the longer of the two, so it's still throttled at time t1.
EXPECT_TRUE(ModelTypeSetEquals(
ModelTypeSet(SESSIONS, PREFERENCES),
- nudge_tracker.GetThrottledTypes()));
+ nudge_tracker_.GetThrottledTypes()));
EXPECT_EQ(throttle2_length - throttle1_length,
- nudge_tracker.GetTimeUntilNextUnthrottle(t1));
+ nudge_tracker_.GetTimeUntilNextUnthrottle(t1));
// Expire the second interval.
- nudge_tracker.UpdateTypeThrottlingState(t2);
- EXPECT_TRUE(nudge_tracker.GetThrottledTypes().Empty());
+ nudge_tracker_.UpdateTypeThrottlingState(t2);
+ EXPECT_TRUE(nudge_tracker_.GetThrottledTypes().Empty());
}
} // namespace sessions
diff --git a/chromium/sync/sessions/ordered_commit_set.cc b/chromium/sync/sessions/ordered_commit_set.cc
deleted file mode 100644
index 3bbddb9c289..00000000000
--- a/chromium/sync/sessions/ordered_commit_set.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/ordered_commit_set.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-
-namespace syncer {
-namespace sessions {
-
-OrderedCommitSet::OrderedCommitSet(const ModelSafeRoutingInfo& routes)
- : routes_(routes) {
-}
-
-OrderedCommitSet::~OrderedCommitSet() {}
-
-void OrderedCommitSet::AddCommitItem(const int64 metahandle,
- ModelType type) {
- if (!HaveCommitItem(metahandle)) {
- inserted_metahandles_.insert(metahandle);
- metahandle_order_.push_back(metahandle);
- projections_[GetGroupForModelType(type, routes_)].push_back(
- inserted_metahandles_.size() - 1);
- types_.push_back(type);
- types_in_list_.Put(type);
- }
-}
-
-void OrderedCommitSet::AddCommitItems(
- const std::vector<int64> metahandles,
- ModelType type) {
- for (std::vector<int64>::const_iterator it = metahandles.begin();
- it != metahandles.end(); ++it) {
- AddCommitItem(*it, type);
- }
-}
-
-const OrderedCommitSet::Projection& OrderedCommitSet::GetCommitIdProjection(
- ModelSafeGroup group) const {
- Projections::const_iterator i = projections_.find(group);
- DCHECK(i != projections_.end());
- return i->second;
-}
-
-void OrderedCommitSet::Append(const OrderedCommitSet& other) {
- for (size_t i = 0; i < other.Size(); ++i) {
- CommitItem item = other.GetCommitItemAt(i);
- AddCommitItem(item.meta, item.group);
- }
-}
-
-void OrderedCommitSet::AppendReverse(const OrderedCommitSet& other) {
- for (int i = other.Size() - 1; i >= 0; i--) {
- CommitItem item = other.GetCommitItemAt(i);
- AddCommitItem(item.meta, item.group);
- }
-}
-
-void OrderedCommitSet::Truncate(size_t max_size) {
- if (max_size < metahandle_order_.size()) {
- for (size_t i = max_size; i < metahandle_order_.size(); ++i) {
- inserted_metahandles_.erase(metahandle_order_[i]);
- }
-
- // Some projections may refer to indices that are getting chopped.
- // Since projections are in increasing order, it's easy to fix. Except
- // that you can't erase(..) using a reverse_iterator, so we use binary
- // search to find the chop point.
- Projections::iterator it = projections_.begin();
- for (; it != projections_.end(); ++it) {
- // For each projection, chop off any indices larger than or equal to
- // max_size by looking for max_size using binary search.
- Projection& p = it->second;
- Projection::iterator element = std::lower_bound(p.begin(), p.end(),
- max_size);
- if (element != p.end())
- p.erase(element, p.end());
- }
- metahandle_order_.resize(max_size);
- types_.resize(max_size);
- }
-}
-
-void OrderedCommitSet::Clear() {
- inserted_metahandles_.clear();
- metahandle_order_.clear();
- for (Projections::iterator it = projections_.begin();
- it != projections_.end(); ++it) {
- it->second.clear();
- }
- types_.clear();
- types_in_list_.Clear();
-}
-
-OrderedCommitSet::CommitItem OrderedCommitSet::GetCommitItemAt(
- const size_t position) const {
- DCHECK(position < Size());
- CommitItem return_item = {metahandle_order_[position],
- types_[position]};
- return return_item;
-}
-
-bool OrderedCommitSet::HasBookmarkCommitId() const {
- ModelSafeRoutingInfo::const_iterator group = routes_.find(BOOKMARKS);
- if (group == routes_.end())
- return false;
- Projections::const_iterator proj = projections_.find(group->second);
- if (proj == projections_.end())
- return false;
- DCHECK_LE(proj->second.size(), types_.size());
- for (size_t i = 0; i < proj->second.size(); i++) {
- if (types_[proj->second[i]] == BOOKMARKS)
- return true;
- }
- return false;
-}
-
-void OrderedCommitSet::operator=(const OrderedCommitSet& other) {
- inserted_metahandles_ = other.inserted_metahandles_;
- metahandle_order_ = other.metahandle_order_;
- projections_ = other.projections_;
- types_ = other.types_;
- routes_ = other.routes_;
-}
-
-} // namespace sessions
-} // namespace syncer
-
diff --git a/chromium/sync/sessions/ordered_commit_set.h b/chromium/sync/sessions/ordered_commit_set.h
deleted file mode 100644
index a30724e641c..00000000000
--- a/chromium/sync/sessions/ordered_commit_set.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
-#define SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
-
-#include <map>
-#include <set>
-#include <vector>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-
-namespace syncer {
-namespace sessions {
-
-// TODO(ncarter): This code is more generic than just Commit and can
-// be reused elsewhere (e.g. ChangeReorderBuffer do similar things). Merge
-// all these implementations.
-class SYNC_EXPORT_PRIVATE OrderedCommitSet {
- public:
- // A list of indices into the full list of commit ids such that:
- // 1 - each element is an index belonging to a particular ModelSafeGroup.
- // 2 - the vector is in sorted (smallest to largest) order.
- // 3 - each element is a valid index for GetCommitItemAt.
- // See GetCommitIdProjection for usage.
- typedef std::vector<size_t> Projection;
-
- // TODO(chron): Reserve space according to batch size?
- explicit OrderedCommitSet(const ModelSafeRoutingInfo& routes);
- ~OrderedCommitSet();
-
- bool HaveCommitItem(const int64 metahandle) const {
- return inserted_metahandles_.count(metahandle) > 0;
- }
-
- void AddCommitItem(const int64 metahandle, ModelType type);
- void AddCommitItems(const std::vector<int64> metahandles, ModelType type);
-
- const std::vector<int64>& GetAllCommitHandles() const {
- return metahandle_order_;
- }
-
- // Return the handle at index |position| in this OrderedCommitSet. Note that
- // the index uniquely identifies the same logical item in each of:
- // 1) this OrderedCommitSet
- // 2) the CommitRequest sent to the server
- // 3) the list of EntryResponse objects in the CommitResponse.
- // These together allow re-association of the pre-commit Id with the
- // actual committed entry.
- int64 GetCommitHandleAt(const size_t position) const {
- return metahandle_order_[position];
- }
-
- // Same as above, but for ModelType of the item.
- ModelType GetModelTypeAt(const size_t position) const {
- return types_[position];
- }
-
- // Get the projection of commit ids onto the space of commit ids
- // belonging to |group|. This is useful when you need to process a commit
- // response one ModelSafeGroup at a time. See GetCommitIdAt for how the
- // indices contained in the returned Projection can be used.
- const Projection& GetCommitIdProjection(
- ModelSafeGroup group) const;
-
- size_t Size() const {
- return metahandle_order_.size();
- }
-
- bool Empty() const {
- return Size() == 0;
- }
-
- // Returns all the types that are included in this list.
- ModelTypeSet Types() const {
- return types_in_list_;
- }
-
- // Returns true iff any of the commit ids added to this set have model type
- // BOOKMARKS.
- bool HasBookmarkCommitId() const;
-
- void Append(const OrderedCommitSet& other);
- void AppendReverse(const OrderedCommitSet& other);
- void Truncate(size_t max_size);
-
- // Removes all entries from this set.
- void Clear();
-
- void operator=(const OrderedCommitSet& other);
- private:
- // A set of CommitIdProjections associated with particular ModelSafeGroups.
- typedef std::map<ModelSafeGroup, Projection> Projections;
-
- // Helper container for return value of GetCommitItemAt.
- struct CommitItem {
- int64 meta;
- ModelType group;
- };
-
- CommitItem GetCommitItemAt(const size_t position) const;
-
- // These lists are different views of the same items; e.g they are
- // isomorphic.
- std::set<int64> inserted_metahandles_;
- std::vector<int64> metahandle_order_;
- Projections projections_;
-
- // We need this because of operations like AppendReverse that take ids from
- // one OrderedCommitSet and insert into another -- we need to know the
- // group for each ID so that the insertion can update the appropriate
- // projection.
- std::vector<ModelType> types_;
-
- // The set of types which are included in this particular list.
- ModelTypeSet types_in_list_;
-
- ModelSafeRoutingInfo routes_;
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
-
diff --git a/chromium/sync/sessions/ordered_commit_set_unittest.cc b/chromium/sync/sessions/ordered_commit_set_unittest.cc
deleted file mode 100644
index 4aca4f406c7..00000000000
--- a/chromium/sync/sessions/ordered_commit_set_unittest.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/ordered_commit_set.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using std::vector;
-
-namespace syncer {
-namespace sessions {
-namespace {
-
-class OrderedCommitSetTest : public testing::Test {
- public:
- OrderedCommitSetTest() {
- routes_[BOOKMARKS] = GROUP_UI;
- routes_[PREFERENCES] = GROUP_UI;
- routes_[AUTOFILL] = GROUP_DB;
- routes_[SESSIONS] = GROUP_PASSIVE;
- }
- protected:
- TestIdFactory ids_;
- ModelSafeRoutingInfo routes_;
-};
-
-TEST_F(OrderedCommitSetTest, Projections) {
- vector<int64> expected;
- for (int64 i = 0; i < 8; i++)
- expected.push_back(i);
-
- OrderedCommitSet commit_set1(routes_), commit_set2(routes_);
- commit_set1.AddCommitItem(expected[0], BOOKMARKS);
- commit_set1.AddCommitItem(expected[1], BOOKMARKS);
- commit_set1.AddCommitItem(expected[2], PREFERENCES);
- // Duplicates should be dropped.
- commit_set1.AddCommitItem(expected[2], PREFERENCES);
- commit_set1.AddCommitItem(expected[3], SESSIONS);
- commit_set1.AddCommitItem(expected[4], SESSIONS);
- commit_set2.AddCommitItem(expected[7], AUTOFILL);
- commit_set2.AddCommitItem(expected[6], AUTOFILL);
- commit_set2.AddCommitItem(expected[5], AUTOFILL);
- // Add something in set1 to set2, which should get dropped by AppendReverse.
- commit_set2.AddCommitItem(expected[0], BOOKMARKS);
- commit_set1.AppendReverse(commit_set2);
-
- EXPECT_EQ(8U, commit_set1.Size());
-
- // First, we should verify the projections are correct. Second, we want to
- // do the same verification after truncating by 1. Next, try truncating
- // the set to a size of 4, so that the DB projection is wiped out and
- // PASSIVE has one element removed. Finally, truncate to 1 so only UI is
- // remaining.
- std::vector<size_t> sizes;
- sizes.push_back(8);
- sizes.push_back(7);
- sizes.push_back(4);
- sizes.push_back(1);
- for (std::vector<size_t>::iterator it = sizes.begin();
- it != sizes.end(); ++it) {
- commit_set1.Truncate(*it);
- size_t expected_size = *it;
-
- SCOPED_TRACE(::testing::Message("Iteration size = ") << *it);
- std::vector<int64> all_ids = commit_set1.GetAllCommitHandles();
- EXPECT_EQ(expected_size, all_ids.size());
- for (size_t i = 0; i < expected_size; i++) {
- EXPECT_TRUE(expected[i] == all_ids[i]);
- EXPECT_TRUE(expected[i] == commit_set1.GetCommitHandleAt(i));
- }
-
- OrderedCommitSet::Projection p1, p2, p3;
- p1 = commit_set1.GetCommitIdProjection(GROUP_UI);
- p2 = commit_set1.GetCommitIdProjection(GROUP_PASSIVE);
- p3 = commit_set1.GetCommitIdProjection(GROUP_DB);
- EXPECT_TRUE(p1.size() + p2.size() + p3.size() == expected_size) << "Sum"
- << "of sizes of projections should equal full expected size!";
-
- for (size_t i = 0; i < p1.size(); i++) {
- SCOPED_TRACE(::testing::Message("UI projection mismatch at i = ") << i);
- EXPECT_TRUE(expected[p1[i]] == commit_set1.GetCommitHandleAt(p1[i]))
- << "expected[p1[i]] = " << expected[p1[i]]
- << ", commit_set1[p1[i]] = " << commit_set1.GetCommitHandleAt(p1[i]);
- }
- for (size_t i = 0; i < p2.size(); i++) {
- SCOPED_TRACE(::testing::Message("PASSIVE projection mismatch at i = ")
- << i);
- EXPECT_TRUE(expected[p2[i]] == commit_set1.GetCommitHandleAt(p2[i]))
- << "expected[p2[i]] = " << expected[p2[i]]
- << ", commit_set1[p2[i]] = " << commit_set1.GetCommitHandleAt(p2[i]);
- }
- for (size_t i = 0; i < p3.size(); i++) {
- SCOPED_TRACE(::testing::Message("DB projection mismatch at i = ") << i);
- EXPECT_TRUE(expected[p3[i]] == commit_set1.GetCommitHandleAt(p3[i]))
- << "expected[p3[i]] = " << expected[p3[i]]
- << ", commit_set1[p3[i]] = " << commit_set1.GetCommitHandleAt(p3[i]);
- }
- }
-}
-
-TEST_F(OrderedCommitSetTest, HasBookmarkCommitId) {
- OrderedCommitSet commit_set(routes_);
-
- commit_set.AddCommitItem(0, AUTOFILL);
- commit_set.AddCommitItem(1, SESSIONS);
- EXPECT_FALSE(commit_set.HasBookmarkCommitId());
-
- commit_set.AddCommitItem(2, PREFERENCES);
- commit_set.AddCommitItem(3, PREFERENCES);
- EXPECT_FALSE(commit_set.HasBookmarkCommitId());
-
- commit_set.AddCommitItem(4, BOOKMARKS);
- EXPECT_TRUE(commit_set.HasBookmarkCommitId());
-
- commit_set.Truncate(4);
- EXPECT_FALSE(commit_set.HasBookmarkCommitId());
-}
-
-TEST_F(OrderedCommitSetTest, AddAndRemoveEntries) {
- OrderedCommitSet commit_set(routes_);
-
- ASSERT_TRUE(commit_set.Empty());
-
- commit_set.AddCommitItem(0, AUTOFILL);
- ASSERT_EQ(static_cast<size_t>(1), commit_set.Size());
-
- commit_set.Clear();
- ASSERT_TRUE(commit_set.Empty());
-}
-
-} // namespace
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/status_controller.cc b/chromium/sync/sessions/status_controller.cc
index abd6f1ecbd6..752b9ab47f6 100644
--- a/chromium/sync/sessions/status_controller.cc
+++ b/chromium/sync/sessions/status_controller.cc
@@ -13,9 +13,7 @@
namespace syncer {
namespace sessions {
-StatusController::StatusController()
- : group_restriction_in_effect_(false),
- group_restriction_(GROUP_PASSIVE) {
+StatusController::StatusController() {
}
StatusController::~StatusController() {}
@@ -105,17 +103,6 @@ SyncerError StatusController::last_get_key_result() const {
return model_neutral_.last_get_key_result;
}
-// Returns the number of updates received from the sync server.
-int64 StatusController::CountUpdates() const {
- const sync_pb::ClientToServerResponse& updates =
- model_neutral_.updates_response;
- if (updates.has_get_updates()) {
- return updates.get_updates().entries().size();
- } else {
- return 0;
- }
-}
-
int StatusController::num_updates_applied() const {
return model_neutral_.num_updates_applied;
}
@@ -129,20 +116,14 @@ int StatusController::num_encryption_conflicts() const {
}
int StatusController::num_hierarchy_conflicts() const {
- DCHECK(!group_restriction_in_effect_)
- << "num_hierarchy_conflicts applies to all ModelSafeGroups";
return model_neutral_.num_hierarchy_conflicts;
}
int StatusController::num_server_conflicts() const {
- DCHECK(!group_restriction_in_effect_)
- << "num_server_conflicts applies to all ModelSafeGroups";
return model_neutral_.num_server_conflicts;
}
int StatusController::TotalNumConflictingItems() const {
- DCHECK(!group_restriction_in_effect_)
- << "TotalNumConflictingItems applies to all ModelSafeGroups";
int sum = 0;
sum += num_encryption_conflicts();
sum += num_hierarchy_conflicts();
@@ -150,26 +131,5 @@ int StatusController::TotalNumConflictingItems() const {
return sum;
}
-bool StatusController::ServerSaysNothingMoreToDownload() const {
- if (!download_updates_succeeded())
- return false;
-
- if (!updates_response().get_updates().has_changes_remaining()) {
- NOTREACHED(); // Server should always send changes remaining.
- return false; // Avoid looping forever.
- }
- // Changes remaining is an estimate, but if it's estimated to be
- // zero, that's firm and we don't have to ask again.
- return updates_response().get_updates().changes_remaining() == 0;
-}
-
-void StatusController::set_debug_info_sent() {
- model_neutral_.debug_info_sent = true;
-}
-
-bool StatusController::debug_info_sent() const {
- return model_neutral_.debug_info_sent;
-}
-
} // namespace sessions
} // namespace syncer
diff --git a/chromium/sync/sessions/status_controller.h b/chromium/sync/sessions/status_controller.h
index a547c1b67da..005f158a81e 100644
--- a/chromium/sync/sessions/status_controller.h
+++ b/chromium/sync/sessions/status_controller.h
@@ -5,25 +5,14 @@
// StatusController handles all counter and status related number crunching and
// state tracking on behalf of a SyncSession.
//
-// The most important feature of StatusController is the
-// ScopedModelSafeGroupRestriction. Some of its functions expose per-thread
-// state, and can be called only when the restriction is in effect. For
-// example, if GROUP_UI is set then the value returned from
-// commit_id_projection() will be useful for iterating over the commit IDs of
-// items that live on the UI thread.
+// This object may be accessed from many different threads. It will be accessed
+// most often from the syncer thread. However, when update application is in
+// progress it may also be accessed from the worker threads. This is safe
+// because only one of them will run at a time, and the syncer thread will be
+// blocked until update application completes.
//
-// Other parts of its state are global, and do not require the restriction.
-//
-// NOTE: There is no concurrent access protection provided by this class. It
-// assumes one single thread is accessing this class for each unique
-// ModelSafeGroup, and also only one single thread (in practice, the
-// SyncerThread) responsible for all "shared" access when no restriction is in
-// place. Thus, every bit of data is to be accessed mutually exclusively with
-// respect to threads.
-//
-// StatusController can also track if changes occur to certain parts of state
-// so that various parts of the sync engine can avoid broadcasting
-// notifications if no changes occurred.
+// This object contains only global state. None of its members are per model
+// type counters.
#ifndef SYNC_SESSIONS_STATUS_CONTROLLER_H_
#define SYNC_SESSIONS_STATUS_CONTROLLER_H_
@@ -35,8 +24,8 @@
#include "base/stl_util.h"
#include "base/time/time.h"
#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/sessions/model_neutral_state.h"
-#include "sync/sessions/ordered_commit_set.h"
namespace syncer {
namespace sessions {
@@ -47,37 +36,18 @@ class SYNC_EXPORT_PRIVATE StatusController {
~StatusController();
// ClientToServer messages.
- const ModelTypeSet updates_request_types() const {
- return model_neutral_.updates_request_types;
- }
- void set_updates_request_types(ModelTypeSet value) {
- model_neutral_.updates_request_types = value;
- }
const ModelTypeSet commit_request_types() const {
return model_neutral_.commit_request_types;
}
void set_commit_request_types(ModelTypeSet value) {
model_neutral_.commit_request_types = value;
}
- const sync_pb::ClientToServerResponse& updates_response() const {
- return model_neutral_.updates_response;
- }
- sync_pb::ClientToServerResponse* mutable_updates_response() {
- return &model_neutral_.updates_response;
- }
// Changelog related state.
int64 num_server_changes_remaining() const {
return model_neutral_.num_server_changes_remaining;
}
- const OrderedCommitSet::Projection& commit_id_projection(
- const sessions::OrderedCommitSet &commit_set) {
- DCHECK(group_restriction_in_effect_)
- << "No group restriction for projection.";
- return commit_set.GetCommitIdProjection(group_restriction_);
- }
-
// Various conflict counters.
int num_encryption_conflicts() const;
int num_hierarchy_conflicts() const;
@@ -91,28 +61,6 @@ class SYNC_EXPORT_PRIVATE StatusController {
int num_server_overwrites() const;
- // Returns the number of updates received from the sync server.
- int64 CountUpdates() const;
-
- // Returns true if the last download_updates_command received a valid
- // server response.
- bool download_updates_succeeded() const {
- return model_neutral_.last_download_updates_result
- == SYNCER_OK;
- }
-
- // Returns true if the last updates response indicated that we were fully
- // up to date. This is subtle: if it's false, it could either mean that
- // the server said there WAS more to download, or it could mean that we
- // were unable to reach the server. If we didn't request every enabled
- // datatype, then we can't say for sure that there's nothing left to
- // download: in that case, this also returns false.
- bool ServerSaysNothingMoreToDownload() const;
-
- ModelSafeGroup group_restriction() const {
- return group_restriction_;
- }
-
base::Time sync_start_time() const {
// The time at which we sent the first GetUpdates command for this sync.
return sync_start_time_;
@@ -154,45 +102,14 @@ class SYNC_EXPORT_PRIVATE StatusController {
void UpdateStartTime();
- void set_debug_info_sent();
-
- bool debug_info_sent() const;
-
private:
- friend class ScopedModelSafeGroupRestriction;
-
ModelNeutralState model_neutral_;
- // Used to fail read/write operations on state that don't obey the current
- // active ModelSafeWorker contract.
- bool group_restriction_in_effect_;
- ModelSafeGroup group_restriction_;
-
base::Time sync_start_time_;
DISALLOW_COPY_AND_ASSIGN(StatusController);
};
-// A utility to restrict access to only those parts of the given
-// StatusController that pertain to the specified ModelSafeGroup.
-class ScopedModelSafeGroupRestriction {
- public:
- ScopedModelSafeGroupRestriction(StatusController* to_restrict,
- ModelSafeGroup restriction)
- : status_(to_restrict) {
- DCHECK(!status_->group_restriction_in_effect_);
- status_->group_restriction_ = restriction;
- status_->group_restriction_in_effect_ = true;
- }
- ~ScopedModelSafeGroupRestriction() {
- DCHECK(status_->group_restriction_in_effect_);
- status_->group_restriction_in_effect_ = false;
- }
- private:
- StatusController* status_;
- DISALLOW_COPY_AND_ASSIGN(ScopedModelSafeGroupRestriction);
-};
-
} // namespace sessions
} // namespace syncer
diff --git a/chromium/sync/sessions/status_controller_unittest.cc b/chromium/sync/sessions/status_controller_unittest.cc
index e6b59e8b30f..c29bc5f717a 100644
--- a/chromium/sync/sessions/status_controller_unittest.cc
+++ b/chromium/sync/sessions/status_controller_unittest.cc
@@ -31,16 +31,6 @@ TEST_F(StatusControllerTest, ReadYourWrites) {
EXPECT_EQ(14, status.model_neutral_state().num_successful_commits);
}
-TEST_F(StatusControllerTest, CountUpdates) {
- StatusController status;
- EXPECT_EQ(0, status.CountUpdates());
- sync_pb::ClientToServerResponse* response(status.mutable_updates_response());
- sync_pb::SyncEntity* entity1 = response->mutable_get_updates()->add_entries();
- sync_pb::SyncEntity* entity2 = response->mutable_get_updates()->add_entries();
- ASSERT_TRUE(entity1 != NULL && entity2 != NULL);
- EXPECT_EQ(2, status.CountUpdates());
-}
-
// Test TotalNumConflictingItems
TEST_F(StatusControllerTest, TotalNumConflictingItems) {
StatusController status;
@@ -52,14 +42,5 @@ TEST_F(StatusControllerTest, TotalNumConflictingItems) {
EXPECT_EQ(6, status.TotalNumConflictingItems());
}
-// Basic test that non group-restricted state accessors don't cause violations.
-TEST_F(StatusControllerTest, Unrestricted) {
- StatusController status;
- status.model_neutral_state();
- status.download_updates_succeeded();
- status.ServerSaysNothingMoreToDownload();
- status.group_restriction();
-}
-
} // namespace sessions
} // namespace syncer
diff --git a/chromium/sync/sessions/sync_session.h b/chromium/sync/sessions/sync_session.h
index cd4a22ccc19..f5767206d2a 100644
--- a/chromium/sync/sessions/sync_session.h
+++ b/chromium/sync/sessions/sync_session.h
@@ -4,12 +4,8 @@
// A class representing an attempt to synchronize the local syncable data
// store with a sync server. A SyncSession instance is passed as a stateful
-// bundle to and from various SyncerCommands with the goal of converging the
-// client view of data with that of the server. The commands twiddle with
-// session status in response to events and hiccups along the way, set and
-// query session progress with regards to conflict resolution and applying
-// server updates, and access the SyncSessionContext for the current session
-// via SyncSession instances.
+// bundle throughout the sync cycle. The SyncSession is not reused across
+// sync cycles; each cycle starts with a new one.
#ifndef SYNC_SESSIONS_SYNC_SESSION_H_
#define SYNC_SESSIONS_SYNC_SESSION_H_
@@ -27,7 +23,6 @@
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-#include "sync/sessions/ordered_commit_set.h"
#include "sync/sessions/status_controller.h"
#include "sync/sessions/sync_session_context.h"
@@ -78,17 +73,6 @@ class SYNC_EXPORT_PRIVATE SyncSession {
virtual void OnReceivedSessionsCommitDelay(
const base::TimeDelta& new_delay) = 0;
- // The client needs to cease and desist syncing at once. This occurs when
- // the Syncer detects that the backend store has fundamentally changed or
- // is a different instance altogether (e.g. swapping from a test instance
- // to production, or a global stop syncing operation has wiped the store).
- // TODO(lipalani) : Replace this function with the one below. This function
- // stops the current sync cycle and purges the client. In the new model
- // the former would be done by the |SyncProtocolError| and
- // the latter(which is an action) would be done in ProfileSyncService
- // along with the rest of the actions.
- virtual void OnShouldStopSyncingPermanently() = 0;
-
// Called for the syncer to respond to the error sent by the server.
virtual void OnSyncProtocolError(
const sessions::SyncSessionSnapshot& snapshot) = 0;
diff --git a/chromium/sync/sessions/sync_session_context.cc b/chromium/sync/sessions/sync_session_context.cc
index 98ab5f01a92..aa5dfa54044 100644
--- a/chromium/sync/sessions/sync_session_context.cc
+++ b/chromium/sync/sessions/sync_session_context.cc
@@ -10,9 +10,6 @@
namespace syncer {
namespace sessions {
-const unsigned int kMaxMessagesToRecord = 10;
-const unsigned int kMaxMessageSizeToRecord = 5 * 1024;
-
SyncSessionContext::SyncSessionContext(
ServerConnectionManager* connection_manager,
syncable::Directory* directory,
@@ -26,6 +23,8 @@ SyncSessionContext::SyncSessionContext(
const std::string& invalidator_client_id)
: connection_manager_(connection_manager),
directory_(directory),
+ update_handler_deleter_(&update_handler_map_),
+ commit_contributor_deleter_(&commit_contributor_map_),
extensions_activity_(extensions_activity),
notifications_enabled_(false),
max_commit_batch_size_(kDefaultMaxCommitBatchSize),
@@ -36,8 +35,10 @@ SyncSessionContext::SyncSessionContext(
server_enabled_pre_commit_update_avoidance_(false),
client_enabled_pre_commit_update_avoidance_(
client_enabled_pre_commit_update_avoidance) {
- for (size_t i = 0u; i < workers.size(); ++i)
- workers_.push_back(workers[i]);
+ for (size_t i = 0u; i < workers.size(); ++i) {
+ workers_.insert(
+ std::make_pair(workers[i]->GetModelSafeGroup(), workers[i]));
+ }
std::vector<SyncEngineEventListener*>::const_iterator it;
for (it = listeners.begin(); it != listeners.end(); ++it)
@@ -47,5 +48,32 @@ SyncSessionContext::SyncSessionContext(
SyncSessionContext::~SyncSessionContext() {
}
+void SyncSessionContext::set_routing_info(
+ const ModelSafeRoutingInfo& routing_info) {
+ enabled_types_ = GetRoutingInfoTypes(routing_info);
+
+ // TODO(rlarocque): This is not a good long-term solution. We must find a
+ // better way to initialize the set of CommitContributors and UpdateHandlers.
+ STLDeleteValues<UpdateHandlerMap>(&update_handler_map_);
+ STLDeleteValues<CommitContributorMap>(&commit_contributor_map_);
+ for (ModelSafeRoutingInfo::const_iterator routing_iter = routing_info.begin();
+ routing_iter != routing_info.end(); ++routing_iter) {
+ ModelType type = routing_iter->first;
+ ModelSafeGroup group = routing_iter->second;
+ std::map<ModelSafeGroup, scoped_refptr<ModelSafeWorker> >::iterator
+ worker_it = workers_.find(group);
+ DCHECK(worker_it != workers_.end());
+ scoped_refptr<ModelSafeWorker> worker = worker_it->second;
+
+ SyncDirectoryUpdateHandler* handler =
+ new SyncDirectoryUpdateHandler(directory(), type, worker);
+ update_handler_map_.insert(std::make_pair(type, handler));
+
+ SyncDirectoryCommitContributor* contributor =
+ new SyncDirectoryCommitContributor(directory(), type);
+ commit_contributor_map_.insert(std::make_pair(type, contributor));
+ }
+}
+
} // namespace sessions
} // namespace syncer
diff --git a/chromium/sync/sessions/sync_session_context.h b/chromium/sync/sessions/sync_session_context.h
index 718cc6caad0..5995ab151db 100644
--- a/chromium/sync/sessions/sync_session_context.h
+++ b/chromium/sync/sessions/sync_session_context.h
@@ -3,15 +3,12 @@
// found in the LICENSE file.
// SyncSessionContext encapsulates the contextual information and engine
-// components specific to a SyncSession. A context is accessible via
-// a SyncSession so that session SyncerCommands and parts of the engine have
-// a convenient way to access other parts. In this way it can be thought of as
-// the surrounding environment for the SyncSession. The components of this
-// environment are either valid or not valid for the entire context lifetime,
-// or they are valid for explicitly scoped periods of time by using Scoped
-// installation utilities found below. This means that the context assumes no
-// ownership whatsoever of any object that was not created by the context
-// itself.
+// components specific to a SyncSession. Unlike the SyncSession, the context
+// can be reused across several sync cycles.
+//
+// The context does not take ownership of its pointer members. It's up to
+// the surrounding classes to ensure those members remain valid while the
+// context is in use.
//
// It can only be used from the SyncerThread.
@@ -22,7 +19,10 @@
#include <string>
#include <vector>
+#include "base/stl_util.h"
#include "sync/base/sync_export.h"
+#include "sync/engine/sync_directory_commit_contributor.h"
+#include "sync/engine/sync_directory_update_handler.h"
#include "sync/engine/sync_engine_event.h"
#include "sync/engine/syncer_types.h"
#include "sync/engine/traffic_recorder.h"
@@ -67,16 +67,18 @@ class SYNC_EXPORT_PRIVATE SyncSessionContext {
return directory_;
}
- const ModelSafeRoutingInfo& routing_info() const {
- return routing_info_;
+ ModelTypeSet enabled_types() const {
+ return enabled_types_;
}
- void set_routing_info(const ModelSafeRoutingInfo& routing_info) {
- routing_info_ = routing_info;
+ void set_routing_info(const ModelSafeRoutingInfo& routing_info);
+
+ UpdateHandlerMap* update_handler_map() {
+ return &update_handler_map_;
}
- const std::vector<scoped_refptr<ModelSafeWorker> >& workers() const {
- return workers_;
+ CommitContributorMap* commit_contributor_map() {
+ return &commit_contributor_map_;
}
ExtensionsActivity* extensions_activity() {
@@ -150,12 +152,28 @@ class SYNC_EXPORT_PRIVATE SyncSessionContext {
ServerConnectionManager* const connection_manager_;
syncable::Directory* const directory_;
- // A cached copy of SyncBackendRegistrar's routing info.
- // Must be updated manually when SBR's state is modified.
- ModelSafeRoutingInfo routing_info_;
+ // The set of enabled types. Derrived from the routing info set with
+ // set_routing_info().
+ ModelTypeSet enabled_types_;
+
+ // A map of 'update handlers', one for each enabled type.
+ // This must be kept in sync with the routing info. Our temporary solution to
+ // that problem is to initialize this map in set_routing_info().
+ UpdateHandlerMap update_handler_map_;
+
+ // Deleter for the |update_handler_map_|.
+ STLValueDeleter<UpdateHandlerMap> update_handler_deleter_;
+
+ // A map of 'commit contributors', one for each enabled type.
+ // This must be kept in sync with the routing info. Our temporary solution to
+ // that problem is to initialize this map in set_routing_info().
+ CommitContributorMap commit_contributor_map_;
+
+ // Deleter for the |commit_contributor_map_|.
+ STLValueDeleter<CommitContributorMap> commit_contributor_deleter_;
// The set of ModelSafeWorkers. Used to execute tasks of various threads.
- std::vector<scoped_refptr<ModelSafeWorker> > workers_;
+ std::map<ModelSafeGroup, scoped_refptr<ModelSafeWorker> > workers_;
// We use this to stuff extensions activity into CommitMessages so the server
// can correlate commit traffic with extension-related bookmark mutations.
diff --git a/chromium/sync/sessions/sync_session_unittest.cc b/chromium/sync/sessions/sync_session_unittest.cc
index f751e25cd90..e712552f580 100644
--- a/chromium/sync/sessions/sync_session_unittest.cc
+++ b/chromium/sync/sessions/sync_session_unittest.cc
@@ -104,9 +104,6 @@ class SyncSessionTest : public testing::Test,
FailControllerInvocationIfDisabled(
"OnReceivedClientInvalidationHintBufferSize");
}
- virtual void OnShouldStopSyncingPermanently() OVERRIDE {
- FailControllerInvocationIfDisabled("OnShouldStopSyncingPermanently");
- }
virtual void OnSyncProtocolError(
const sessions::SyncSessionSnapshot& snapshot) OVERRIDE {
FailControllerInvocationIfDisabled("SyncProtocolError");
@@ -148,38 +145,6 @@ class SyncSessionTest : public testing::Test,
scoped_refptr<ExtensionsActivity> extensions_activity_;
};
-TEST_F(SyncSessionTest, MoreToDownloadIfDownloadFailed) {
- status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
-
- status()->set_last_download_updates_result(NETWORK_IO_ERROR);
-
- // When DownloadUpdatesCommand fails, these should be false.
- EXPECT_FALSE(status()->ServerSaysNothingMoreToDownload());
- EXPECT_FALSE(status()->download_updates_succeeded());
-}
-
-TEST_F(SyncSessionTest, MoreToDownloadIfGotChangesRemaining) {
- status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
-
- // When the server returns changes_remaining, that means there's
- // more to download.
- status()->set_last_download_updates_result(SYNCER_OK);
- status()->mutable_updates_response()->mutable_get_updates()
- ->set_changes_remaining(1000L);
- EXPECT_FALSE(status()->ServerSaysNothingMoreToDownload());
- EXPECT_TRUE(status()->download_updates_succeeded());
-}
-
-TEST_F(SyncSessionTest, MoreToDownloadIfGotNoChangesRemaining) {
- status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
-
- status()->set_last_download_updates_result(SYNCER_OK);
- status()->mutable_updates_response()->mutable_get_updates()
- ->set_changes_remaining(0);
- EXPECT_TRUE(status()->ServerSaysNothingMoreToDownload());
- EXPECT_TRUE(status()->download_updates_succeeded());
-}
-
} // namespace
} // namespace sessions
} // namespace syncer
diff --git a/chromium/sync/sync_android.gypi b/chromium/sync/sync_android.gypi
index b951b07ba68..bdeb2e4d787 100644
--- a/chromium/sync/sync_android.gypi
+++ b/chromium/sync/sync_android.gypi
@@ -11,9 +11,11 @@
'type': 'none',
'variables': {
'java_in_dir': '../sync/android/java',
+ 'jni_generator_ptr_type': 'long',
},
'dependencies': [
'../base/base.gyp:base_java',
+ '../base/base.gyp:base_java_test_support',
'../net/net.gyp:net_java',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_javalib',
'../third_party/guava/guava.gyp:guava_javalib',
@@ -21,17 +23,6 @@
],
'includes': [ '../build/java.gypi' ],
},
- {
- 'target_name': 'sync_jni_headers',
- 'type': 'none',
- 'sources': [
- 'android/java/src/org/chromium/sync/notifier/InvalidationController.java',
- ],
- 'variables': {
- 'jni_gen_package': 'sync',
- },
- 'includes': [ '../build/jni_generator.gypi' ],
- },
],
}],
],
diff --git a/chromium/sync/sync_core.gypi b/chromium/sync/sync_core.gypi
index 95b3ff8baec..f4912fe055c 100644
--- a/chromium/sync/sync_core.gypi
+++ b/chromium/sync/sync_core.gypi
@@ -32,14 +32,18 @@
'engine/all_status.h',
'engine/apply_control_data_updates.cc',
'engine/apply_control_data_updates.h',
- 'engine/apply_updates_and_resolve_conflicts_command.cc',
- 'engine/apply_updates_and_resolve_conflicts_command.h',
'engine/backoff_delay_provider.cc',
'engine/backoff_delay_provider.h',
- 'engine/build_commit_command.cc',
- 'engine/build_commit_command.h',
+ 'engine/commit_util.cc',
+ 'engine/commit_util.h',
'engine/commit.cc',
'engine/commit.h',
+ 'engine/sync_directory_update_handler.cc',
+ 'engine/sync_directory_update_handler.h',
+ 'engine/sync_directory_commit_contribution.cc',
+ 'engine/sync_directory_commit_contribution.h',
+ 'engine/sync_directory_commit_contributor.cc',
+ 'engine/sync_directory_commit_contributor.h',
'engine/conflict_resolver.cc',
'engine/conflict_resolver.h',
'engine/conflict_util.cc',
@@ -48,20 +52,14 @@
'engine/download.h',
'engine/get_commit_ids.cc',
'engine/get_commit_ids.h',
- 'engine/model_changing_syncer_command.cc',
- 'engine/model_changing_syncer_command.h',
'engine/net/server_connection_manager.cc',
'engine/net/server_connection_manager.h',
'engine/net/url_translator.cc',
'engine/net/url_translator.h',
'engine/nudge_source.cc',
'engine/nudge_source.h',
- 'engine/process_commit_response_command.cc',
- 'engine/process_commit_response_command.h',
- 'engine/process_updates_command.cc',
- 'engine/process_updates_command.h',
- 'engine/store_timestamps_command.cc',
- 'engine/store_timestamps_command.h',
+ 'engine/process_updates_util.cc',
+ 'engine/process_updates_util.h',
'engine/sync_engine_event.cc',
'engine/sync_engine_event.h',
'engine/sync_scheduler.cc',
@@ -70,8 +68,6 @@
'engine/sync_scheduler_impl.h',
'engine/syncer.cc',
'engine/syncer.h',
- 'engine/syncer_command.cc',
- 'engine/syncer_command.h',
'engine/syncer_proto_util.cc',
'engine/syncer_proto_util.h',
'engine/syncer_types.h',
@@ -104,8 +100,6 @@
'sessions/debug_info_getter.h',
'sessions/nudge_tracker.cc',
'sessions/nudge_tracker.h',
- 'sessions/ordered_commit_set.cc',
- 'sessions/ordered_commit_set.h',
'sessions/status_controller.cc',
'sessions/status_controller.h',
'sessions/sync_session.cc',
@@ -128,6 +122,8 @@
'syncable/invalid_directory_backing_store.cc',
'syncable/invalid_directory_backing_store.h',
'syncable/metahandle_set.h',
+ 'syncable/model_neutral_mutable_entry.cc',
+ 'syncable/model_neutral_mutable_entry.h',
'syncable/model_type.cc',
'syncable/mutable_entry.cc',
'syncable/mutable_entry.h',
@@ -146,6 +142,8 @@
'syncable/syncable-inl.h',
'syncable/syncable_base_transaction.cc',
'syncable/syncable_base_transaction.h',
+ 'syncable/syncable_base_write_transaction.cc',
+ 'syncable/syncable_base_write_transaction.h',
'syncable/syncable_changes_version.h',
'syncable/syncable_columns.h',
'syncable/syncable_delete_journal.cc',
@@ -154,6 +152,8 @@
'syncable/syncable_enum_conversions.h',
'syncable/syncable_id.cc',
'syncable/syncable_id.h',
+ 'syncable/syncable_model_neutral_write_transaction.cc',
+ 'syncable/syncable_model_neutral_write_transaction.h',
'syncable/syncable_proto_util.cc',
'syncable/syncable_proto_util.h',
'syncable/syncable_read_transaction.cc',
diff --git a/chromium/sync/sync_internal_api.gypi b/chromium/sync/sync_internal_api.gypi
index be3ce713b06..abcfa4f1b58 100644
--- a/chromium/sync/sync_internal_api.gypi
+++ b/chromium/sync/sync_internal_api.gypi
@@ -25,6 +25,7 @@
'internal_api/debug_info_event_listener.cc',
'internal_api/debug_info_event_listener.h',
'internal_api/http_bridge.cc',
+ 'internal_api/http_bridge_network_resources.cc',
'internal_api/internal_components_factory_impl.cc',
'internal_api/js_mutation_event_observer.cc',
'internal_api/js_mutation_event_observer.h',
@@ -32,6 +33,9 @@
'internal_api/js_sync_encryption_handler_observer.h',
'internal_api/js_sync_manager_observer.cc',
'internal_api/js_sync_manager_observer.h',
+ 'internal_api/public/base/enum_set.h',
+ 'internal_api/public/base/ack_handle.cc',
+ 'internal_api/public/base/ack_handle.h',
'internal_api/public/base/cancelation_observer.cc',
'internal_api/public/base/cancelation_observer.h',
'internal_api/public/base/cancelation_signal.cc',
@@ -65,10 +69,12 @@
'internal_api/public/engine/sync_status.cc',
'internal_api/public/engine/sync_status.h',
'internal_api/public/http_bridge.h',
+ 'internal_api/public/http_bridge_network_resources.h',
'internal_api/public/http_post_provider_factory.h',
'internal_api/public/http_post_provider_interface.h',
- 'internal_api/public/internal_components_factory_impl.h',
'internal_api/public/internal_components_factory.h',
+ 'internal_api/public/internal_components_factory_impl.h',
+ 'internal_api/public/network_resources.h',
'internal_api/public/read_node.h',
'internal_api/public/read_transaction.h',
'internal_api/public/sessions/model_neutral_state.cc',
diff --git a/chromium/sync/sync_notifier.gypi b/chromium/sync/sync_notifier.gypi
index ba40772b609..7986bd70562 100644
--- a/chromium/sync/sync_notifier.gypi
+++ b/chromium/sync/sync_notifier.gypi
@@ -23,24 +23,31 @@
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation',
],
'sources': [
+ 'notifier/ack_handler.cc',
+ 'notifier/ack_handler.h',
+ 'notifier/dropped_invalidation_tracker.cc',
+ 'notifier/dropped_invalidation_tracker.h',
'notifier/invalidation_handler.h',
- 'notifier/invalidation_state_tracker.cc',
'notifier/invalidation_state_tracker.h',
'notifier/invalidation_util.cc',
'notifier/invalidation_util.h',
+ 'notifier/unacked_invalidation_set.cc',
+ 'notifier/unacked_invalidation_set.h',
'notifier/invalidator.h',
'notifier/invalidator_registrar.cc',
'notifier/invalidator_registrar.h',
'notifier/invalidator_state.cc',
'notifier/invalidator_state.h',
+ 'notifier/mock_ack_handler.cc',
+ 'notifier/mock_ack_handler.h',
'notifier/object_id_invalidation_map.cc',
'notifier/object_id_invalidation_map.h',
+ 'notifier/single_object_invalidation_set.cc',
+ 'notifier/single_object_invalidation_set.h',
],
'conditions': [
['OS != "android"', {
'sources': [
- 'notifier/ack_tracker.cc',
- 'notifier/ack_tracker.h',
'notifier/invalidation_notifier.cc',
'notifier/invalidation_notifier.h',
'notifier/non_blocking_invalidator.cc',
diff --git a/chromium/sync/sync_proto.gypi b/chromium/sync/sync_proto.gypi
index bb79b4b4cc7..968ee9dd486 100644
--- a/chromium/sync/sync_proto.gypi
+++ b/chromium/sync/sync_proto.gypi
@@ -13,6 +13,8 @@
'protocol/app_notification_specifics.proto',
'protocol/app_setting_specifics.proto',
'protocol/app_specifics.proto',
+ 'protocol/app_list_specifics.proto',
+ 'protocol/article_specifics.proto',
'protocol/autofill_specifics.proto',
'protocol/bookmark_specifics.proto',
'protocol/client_commands.proto',
diff --git a/chromium/sync/sync_tests.gypi b/chromium/sync/sync_tests.gypi
index 93d6ed8f753..683ca52204b 100644
--- a/chromium/sync/sync_tests.gypi
+++ b/chromium/sync/sync_tests.gypi
@@ -40,8 +40,6 @@
'test/engine/fake_sync_scheduler.h',
'test/engine/mock_connection_manager.cc',
'test/engine/mock_connection_manager.h',
- 'test/engine/syncer_command_test.cc',
- 'test/engine/syncer_command_test.h',
'test/engine/test_directory_setter_upper.cc',
'test/engine/test_directory_setter_upper.h',
'test/engine/test_id_factory.h',
@@ -56,6 +54,8 @@
'test/null_transaction_observer.cc',
'test/null_transaction_observer.h',
'test/sessions/test_scoped_session_event_listener.h',
+ 'test/sessions/mock_debug_info_getter.h',
+ 'test/sessions/mock_debug_info_getter.cc',
'test/test_directory_backing_store.cc',
'test/test_directory_backing_store.h',
'test/test_transaction_observer.cc',
@@ -111,16 +111,18 @@
'sync',
],
'sources': [
+ 'notifier/fake_invalidation_handler.cc',
+ 'notifier/fake_invalidation_handler.h',
'notifier/fake_invalidation_state_tracker.cc',
'notifier/fake_invalidation_state_tracker.h',
'notifier/fake_invalidator.cc',
'notifier/fake_invalidator.h',
- 'notifier/fake_invalidation_handler.cc',
- 'notifier/fake_invalidation_handler.h',
'notifier/invalidator_test_template.cc',
'notifier/invalidator_test_template.h',
- 'notifier/object_id_invalidation_map_test_util.cc',
- 'notifier/object_id_invalidation_map_test_util.h',
+ 'notifier/unacked_invalidation_set_test_util.cc',
+ 'notifier/unacked_invalidation_set_test_util.h',
+ 'internal_api/public/base/object_id_invalidation_map_test_util.h',
+ 'internal_api/public/base/object_id_invalidation_map_test_util.cc',
],
},
@@ -150,10 +152,14 @@
'internal_api/public/base/invalidation_test_util.cc',
'internal_api/public/base/invalidation_test_util.h',
'internal_api/public/test/fake_sync_manager.h',
+ 'internal_api/public/test/sync_manager_factory_for_profile_sync_test.h',
'internal_api/public/test/test_entry_factory.h',
'internal_api/public/test/test_internal_components_factory.h',
'internal_api/public/test/test_user_share.h',
'internal_api/test/fake_sync_manager.cc',
+ 'internal_api/test/sync_manager_factory_for_profile_sync_test.cc',
+ 'internal_api/test/sync_manager_for_profile_sync_test.cc',
+ 'internal_api/test/sync_manager_for_profile_sync_test.h',
'internal_api/test/test_entry_factory.cc',
'internal_api/test/test_internal_components_factory.cc',
'internal_api/test/test_user_share.cc',
@@ -235,16 +241,13 @@
'internal_api/public/util/immutable_unittest.cc',
'internal_api/public/util/weak_handle_unittest.cc',
'engine/apply_control_data_updates_unittest.cc',
- 'engine/apply_updates_and_resolve_conflicts_command_unittest.cc',
'engine/backoff_delay_provider_unittest.cc',
'engine/download_unittest.cc',
- 'engine/model_changing_syncer_command_unittest.cc',
- 'engine/process_commit_response_command_unittest.cc',
- 'engine/process_updates_command_unittest.cc',
- 'engine/store_timestamps_command_unittest.cc',
'engine/sync_scheduler_unittest.cc',
'engine/syncer_proto_util_unittest.cc',
'engine/syncer_unittest.cc',
+ 'engine/sync_directory_commit_contribution_unittest.cc',
+ 'engine/sync_directory_update_handler_unittest.cc',
'engine/traffic_recorder_unittest.cc',
'js/js_arg_list_unittest.cc',
'js/js_event_details_unittest.cc',
@@ -252,7 +255,6 @@
'protocol/proto_enum_conversions_unittest.cc',
'protocol/proto_value_conversions_unittest.cc',
'sessions/nudge_tracker_unittest.cc',
- 'sessions/ordered_commit_set_unittest.cc',
'sessions/status_controller_unittest.cc',
'sessions/sync_session_unittest.cc',
'syncable/directory_backing_store_unittest.cc',
@@ -269,20 +271,6 @@
'util/nigori_unittest.cc',
'util/protobuf_unittest.cc',
],
- 'conditions': [
- ['OS == "ios" and coverage != 0', {
- 'sources!': [
- # These sources can't be built with coverage due to a toolchain
- # bug: http://openradar.appspot.com/radar?id=1499403
- 'engine/syncer_unittest.cc',
-
- # These tests crash when run with coverage turned on due to an
- # issue with llvm_gcda_increment_indirect_counter:
- # http://crbug.com/156058
- 'syncable/directory_backing_store_unittest.cc',
- ],
- }],
- ],
},
},
@@ -326,16 +314,18 @@
'conditions': [
['OS != "android"', {
'sources': [
- 'notifier/ack_tracker_unittest.cc',
'notifier/fake_invalidator_unittest.cc',
'notifier/invalidation_notifier_unittest.cc',
'notifier/invalidator_registrar_unittest.cc',
'notifier/non_blocking_invalidator_unittest.cc',
+ 'notifier/object_id_invalidation_map_unittest.cc',
'notifier/p2p_invalidator_unittest.cc',
'notifier/push_client_channel_unittest.cc',
'notifier/registration_manager_unittest.cc',
+ 'notifier/single_object_invalidation_set_unittest.cc',
'notifier/sync_invalidation_listener_unittest.cc',
'notifier/sync_system_resources_unittest.cc',
+ 'notifier/unacked_invalidation_set_unittest.cc',
],
}],
],
diff --git a/chromium/sync/syncable/directory.cc b/chromium/sync/syncable/directory.cc
index 9754b885339..33b7e15be6c 100644
--- a/chromium/sync/syncable/directory.cc
+++ b/chromium/sync/syncable/directory.cc
@@ -343,12 +343,12 @@ EntryKernel* Directory::GetRootEntry() {
return GetEntryById(Id());
}
-bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
+bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
ScopedKernelLock lock(this);
return InsertEntry(trans, entry, &lock);
}
-bool Directory::InsertEntry(WriteTransaction* trans,
+bool Directory::InsertEntry(BaseWriteTransaction* trans,
EntryKernel* entry,
ScopedKernelLock* lock) {
DCHECK(NULL != lock);
@@ -394,9 +394,9 @@ bool Directory::InsertEntry(WriteTransaction* trans,
return true;
}
-bool Directory::ReindexId(WriteTransaction* trans,
- EntryKernel* const entry,
- const Id& new_id) {
+bool Directory::ReindexId(BaseWriteTransaction* trans,
+ EntryKernel* const entry,
+ const Id& new_id) {
ScopedKernelLock lock(this);
if (NULL != GetEntryById(new_id, &lock))
return false;
@@ -413,7 +413,7 @@ bool Directory::ReindexId(WriteTransaction* trans,
return true;
}
-bool Directory::ReindexParentId(WriteTransaction* trans,
+bool Directory::ReindexParentId(BaseWriteTransaction* trans,
EntryKernel* const entry,
const Id& new_parent_id) {
ScopedKernelLock lock(this);
@@ -910,17 +910,9 @@ int64 Directory::unsynced_entity_count() const {
return kernel_->unsynced_metahandles.size();
}
-FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
- BaseTransaction* trans) const {
- FullModelTypeSet server_types;
+bool Directory::TypeHasUnappliedUpdates(ModelType type) {
ScopedKernelLock lock(this);
- for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
- const ModelType type = ModelTypeFromInt(i);
- if (!kernel_->unapplied_update_metahandles[type].empty()) {
- server_types.Put(type);
- }
- }
- return server_types;
+ return !kernel_->unapplied_update_metahandles[type].empty();
}
void Directory::GetUnappliedUpdateMetaHandles(
@@ -957,26 +949,24 @@ void Directory::CollectMetaHandleCounts(
bool Directory::CheckInvariantsOnTransactionClose(
syncable::BaseTransaction* trans,
- const EntryKernelMutationMap& mutations) {
+ const MetahandleSet& modified_handles) {
// NOTE: The trans may be in the process of being destructed. Be careful if
// you wish to call any of its virtual methods.
- MetahandleSet handles;
-
switch (invariant_check_level_) {
- case FULL_DB_VERIFICATION:
- GetAllMetaHandles(trans, &handles);
- break;
- case VERIFY_CHANGES:
- for (EntryKernelMutationMap::const_iterator i = mutations.begin();
- i != mutations.end(); ++i) {
- handles.insert(i->first);
+ case FULL_DB_VERIFICATION: {
+ MetahandleSet all_handles;
+ GetAllMetaHandles(trans, &all_handles);
+ return CheckTreeInvariants(trans, all_handles);
+ }
+ case VERIFY_CHANGES: {
+ return CheckTreeInvariants(trans, modified_handles);
+ }
+ case OFF: {
+ return true;
}
- break;
- case OFF:
- break;
}
-
- return CheckTreeInvariants(trans, handles);
+ NOTREACHED();
+ return false;
}
bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
diff --git a/chromium/sync/syncable/directory.h b/chromium/sync/syncable/directory.h
index 2212a8c6502..0206dbb458b 100644
--- a/chromium/sync/syncable/directory.h
+++ b/chromium/sync/syncable/directory.h
@@ -32,6 +32,7 @@ class UnrecoverableErrorHandler;
namespace syncable {
class BaseTransaction;
+class BaseWriteTransaction;
class DirectoryChangeDelegate;
class DirectoryBackingStore;
class NigoriHandler;
@@ -48,6 +49,7 @@ enum InvariantCheckLevel {
class SYNC_EXPORT Directory {
friend class BaseTransaction;
friend class Entry;
+ friend class ModelNeutralMutableEntry;
friend class MutableEntry;
friend class ReadTransaction;
friend class ScopedKernelLock;
@@ -313,11 +315,8 @@ class SYNC_EXPORT Directory {
void GetUnsyncedMetaHandles(BaseTransaction* trans,
Metahandles* result);
- // Returns all server types with unapplied updates. A subset of
- // those types can then be passed into
- // GetUnappliedUpdateMetaHandles() below.
- FullModelTypeSet GetServerTypesWithUnappliedUpdates(
- BaseTransaction* trans) const;
+ // Returns whether or not this |type| has unapplied updates.
+ bool TypeHasUnappliedUpdates(ModelType type);
// Get all the metahandles for unapplied updates for a given set of
// server types.
@@ -340,7 +339,7 @@ class SYNC_EXPORT Directory {
// and may be used in release code.
bool CheckInvariantsOnTransactionClose(
syncable::BaseTransaction* trans,
- const EntryKernelMutationMap& mutations);
+ const MetahandleSet& modified_handles);
// Forces a full check of the directory. This operation may be slow and
// should not be invoked outside of tests.
@@ -376,9 +375,9 @@ class SYNC_EXPORT Directory {
EntryKernel* GetEntryByServerTag(const std::string& tag);
virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
EntryKernel* GetRootEntry();
- bool ReindexId(WriteTransaction* trans, EntryKernel* const entry,
+ bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_id);
- bool ReindexParentId(WriteTransaction* trans, EntryKernel* const entry,
+ bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_parent_id);
void ClearDirtyMetahandles();
@@ -505,9 +504,9 @@ class SYNC_EXPORT Directory {
void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
// For new entry creation only
- bool InsertEntry(WriteTransaction* trans,
+ bool InsertEntry(BaseWriteTransaction* trans,
EntryKernel* entry, ScopedKernelLock* lock);
- bool InsertEntry(WriteTransaction* trans, EntryKernel* entry);
+ bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
// Used by CheckTreeInvariants
void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
diff --git a/chromium/sync/syncable/directory_backing_store.cc b/chromium/sync/syncable/directory_backing_store.cc
index 0dd22a7881f..417a4d10e33 100644
--- a/chromium/sync/syncable/directory_backing_store.cc
+++ b/chromium/sync/syncable/directory_backing_store.cc
@@ -156,6 +156,8 @@ DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
dir_name_(dir_name),
needs_column_refresh_(false) {
db_->set_histogram_tag("SyncDirectory");
+ db_->set_page_size(4096);
+ db_->set_cache_size(32);
}
DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
diff --git a/chromium/sync/syncable/entry.cc b/chromium/sync/syncable/entry.cc
index 3891c554024..852c33e32ed 100644
--- a/chromium/sync/syncable/entry.cc
+++ b/chromium/sync/syncable/entry.cc
@@ -147,11 +147,9 @@ std::ostream& operator<<(std::ostream& os, const Entry& entry) {
os << g_metas_columns[i].name << ": " << field << ", ";
}
for ( ; i < PROTO_FIELDS_END; ++i) {
- std::string escaped_str;
- base::JsonDoubleQuote(
+ std::string escaped_str = base::EscapeBytesAsInvalidJSONString(
kernel->ref(static_cast<ProtoField>(i)).SerializeAsString(),
- false,
- &escaped_str);
+ false);
os << g_metas_columns[i].name << ": " << escaped_str << ", ";
}
for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
diff --git a/chromium/sync/syncable/metahandle_set.h b/chromium/sync/syncable/metahandle_set.h
index 05228254089..5b4e4251798 100644
--- a/chromium/sync/syncable/metahandle_set.h
+++ b/chromium/sync/syncable/metahandle_set.h
@@ -5,6 +5,8 @@
#ifndef SYNC_SYNCABLE_METAHANDLE_SET_
#define SYNC_SYNCABLE_METAHANDLE_SET_
+#include <set>
+
#include "base/basictypes.h"
namespace syncer {
diff --git a/chromium/sync/syncable/model_neutral_mutable_entry.cc b/chromium/sync/syncable/model_neutral_mutable_entry.cc
new file mode 100644
index 00000000000..d778abacef7
--- /dev/null
+++ b/chromium/sync/syncable/model_neutral_mutable_entry.cc
@@ -0,0 +1,381 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/model_neutral_mutable_entry.h"
+
+#include <string>
+
+#include "sync/internal_api/public/base/unique_position.h"
+#include "sync/syncable/directory.h"
+#include "sync/syncable/scoped_kernel_lock.h"
+#include "sync/syncable/syncable_changes_version.h"
+#include "sync/syncable/syncable_util.h"
+#include "sync/syncable/syncable_write_transaction.h"
+
+using std::string;
+
+namespace syncer {
+
+namespace syncable {
+
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans,
+ CreateNewUpdateItem,
+ const Id& id)
+ : Entry(trans), base_write_transaction_(trans) {
+ Entry same_id(trans, GET_BY_ID, id);
+ kernel_ = NULL;
+ if (same_id.good()) {
+ return; // already have an item with this ID.
+ }
+ scoped_ptr<EntryKernel> kernel(new EntryKernel());
+
+ kernel->put(ID, id);
+ kernel->put(META_HANDLE, trans->directory()->NextMetahandle());
+ kernel->mark_dirty(&trans->directory()->kernel_->dirty_metahandles);
+ kernel->put(IS_DEL, true);
+ // We match the database defaults here
+ kernel->put(BASE_VERSION, CHANGES_VERSION);
+ if (!trans->directory()->InsertEntry(trans, kernel.get())) {
+ return; // Failed inserting.
+ }
+ trans->TrackChangesTo(kernel.get());
+
+ kernel_ = kernel.release();
+}
+
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(
+ BaseWriteTransaction* trans, GetById, const Id& id)
+ : Entry(trans, GET_BY_ID, id), base_write_transaction_(trans) {
+}
+
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(
+ BaseWriteTransaction* trans, GetByHandle, int64 metahandle)
+ : Entry(trans, GET_BY_HANDLE, metahandle), base_write_transaction_(trans) {
+}
+
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(
+ BaseWriteTransaction* trans, GetByClientTag, const std::string& tag)
+ : Entry(trans, GET_BY_CLIENT_TAG, tag), base_write_transaction_(trans) {
+}
+
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(
+ BaseWriteTransaction* trans, GetByServerTag, const string& tag)
+ : Entry(trans, GET_BY_SERVER_TAG, tag), base_write_transaction_(trans) {
+}
+
+void ModelNeutralMutableEntry::PutBaseVersion(int64 value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(BASE_VERSION) != value) {
+ kernel_->put(BASE_VERSION, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+void ModelNeutralMutableEntry::PutServerVersion(int64 value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(SERVER_VERSION) != value) {
+ ScopedKernelLock lock(dir());
+ kernel_->put(SERVER_VERSION, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+void ModelNeutralMutableEntry::PutServerMtime(base::Time value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(SERVER_MTIME) != value) {
+ kernel_->put(SERVER_MTIME, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+void ModelNeutralMutableEntry::PutServerCtime(base::Time value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(SERVER_CTIME) != value) {
+ kernel_->put(SERVER_CTIME, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+bool ModelNeutralMutableEntry::PutId(const Id& value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(ID) != value) {
+ if (!dir()->ReindexId(base_write_transaction(), kernel_, value))
+ return false;
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+void ModelNeutralMutableEntry::PutServerParentId(const Id& value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+
+ if (kernel_->ref(SERVER_PARENT_ID) != value) {
+ kernel_->put(SERVER_PARENT_ID, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(IS_UNSYNCED) != value) {
+ MetahandleSet* index = &dir()->kernel_->unsynced_metahandles;
+
+ ScopedKernelLock lock(dir());
+ if (value) {
+ if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
+ FROM_HERE,
+ "Could not insert",
+ base_write_transaction())) {
+ return false;
+ }
+ } else {
+ if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
+ FROM_HERE,
+ "Entry Not succesfully erased",
+ base_write_transaction())) {
+ return false;
+ }
+ }
+ kernel_->put(IS_UNSYNCED, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if (kernel_->ref(IS_UNAPPLIED_UPDATE) != value) {
+ // Use kernel_->GetServerModelType() instead of
+ // GetServerModelType() as we may trigger some DCHECKs in the
+ // latter.
+ MetahandleSet* index = &dir()->kernel_->unapplied_update_metahandles[
+ kernel_->GetServerModelType()];
+
+ ScopedKernelLock lock(dir());
+ if (value) {
+ if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
+ FROM_HERE,
+ "Could not insert",
+ base_write_transaction())) {
+ return false;
+ }
+ } else {
+ if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
+ FROM_HERE,
+ "Entry Not succesfully erased",
+ base_write_transaction())) {
+ return false;
+ }
+ }
+ kernel_->put(IS_UNAPPLIED_UPDATE, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+void ModelNeutralMutableEntry::PutServerIsDir(bool value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ bool old_value = kernel_->ref(SERVER_IS_DIR);
+ if (old_value != value) {
+ kernel_->put(SERVER_IS_DIR, value);
+ kernel_->mark_dirty(GetDirtyIndexHelper());
+ }
+}
+
+void ModelNeutralMutableEntry::PutServerIsDel(bool value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ bool old_value = kernel_->ref(SERVER_IS_DEL);
+ if (old_value != value) {
+ kernel_->put(SERVER_IS_DEL, value);
+ kernel_->mark_dirty(GetDirtyIndexHelper());
+ }
+
+ // Update delete journal for existence status change on server side here
+ // instead of in PutIsDel() because IS_DEL may not be updated due to
+ // early returns when processing updates. And because
+ // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has
+ // to be called on sync thread.
+ dir()->delete_journal()->UpdateDeleteJournalForServerDelete(
+ base_write_transaction(), old_value, *kernel_);
+}
+
+void ModelNeutralMutableEntry::PutServerNonUniqueName(
+ const std::string& value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+
+ if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) {
+ kernel_->put(SERVER_NON_UNIQUE_NAME, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+bool ModelNeutralMutableEntry::PutUniqueServerTag(const string& new_tag) {
+ if (new_tag == kernel_->ref(UNIQUE_SERVER_TAG)) {
+ return true;
+ }
+
+ base_write_transaction_->TrackChangesTo(kernel_);
+ ScopedKernelLock lock(dir());
+ // Make sure your new value is not in there already.
+ if (dir()->kernel_->server_tags_map.find(new_tag) !=
+ dir()->kernel_->server_tags_map.end()) {
+ DVLOG(1) << "Detected duplicate server tag";
+ return false;
+ }
+ dir()->kernel_->server_tags_map.erase(
+ kernel_->ref(UNIQUE_SERVER_TAG));
+ kernel_->put(UNIQUE_SERVER_TAG, new_tag);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ if (!new_tag.empty()) {
+ dir()->kernel_->server_tags_map[new_tag] = kernel_;
+ }
+
+ return true;
+}
+
+bool ModelNeutralMutableEntry::PutUniqueClientTag(const string& new_tag) {
+ if (new_tag == kernel_->ref(UNIQUE_CLIENT_TAG)) {
+ return true;
+ }
+
+ base_write_transaction_->TrackChangesTo(kernel_);
+ ScopedKernelLock lock(dir());
+ // Make sure your new value is not in there already.
+ if (dir()->kernel_->client_tags_map.find(new_tag) !=
+ dir()->kernel_->client_tags_map.end()) {
+ DVLOG(1) << "Detected duplicate client tag";
+ return false;
+ }
+ dir()->kernel_->client_tags_map.erase(
+ kernel_->ref(UNIQUE_CLIENT_TAG));
+ kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ if (!new_tag.empty()) {
+ dir()->kernel_->client_tags_map[new_tag] = kernel_;
+ }
+
+ return true;
+}
+
+void ModelNeutralMutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
+ // This unique tag will eventually be used as the unique suffix when adjusting
+ // this bookmark's position. Let's make sure it's a valid suffix.
+ if (!UniquePosition::IsValidSuffix(tag)) {
+ NOTREACHED();
+ return;
+ }
+
+ if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty() &&
+ tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) {
+ // There is only one scenario where our tag is expected to change. That
+ // scenario occurs when our current tag is a non-correct tag assigned during
+ // the UniquePosition migration.
+ std::string migration_generated_tag =
+ GenerateSyncableBookmarkHash(std::string(),
+ kernel_->ref(ID).GetServerId());
+ DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG));
+ }
+
+ kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+}
+
+void ModelNeutralMutableEntry::PutServerSpecifics(
+ const sync_pb::EntitySpecifics& value) {
+ DCHECK(kernel_);
+ CHECK(!value.password().has_client_only_encrypted_data());
+ base_write_transaction_->TrackChangesTo(kernel_);
+ // TODO(ncarter): This is unfortunately heavyweight. Can we do
+ // better?
+ if (kernel_->ref(SERVER_SPECIFICS).SerializeAsString() !=
+ value.SerializeAsString()) {
+ if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
+ // Remove ourselves from unapplied_update_metahandles with our
+ // old server type.
+ const ModelType old_server_type = kernel_->GetServerModelType();
+ const int64 metahandle = kernel_->ref(META_HANDLE);
+ size_t erase_count =
+ dir()->kernel_->unapplied_update_metahandles[old_server_type]
+ .erase(metahandle);
+ DCHECK_EQ(erase_count, 1u);
+ }
+
+ kernel_->put(SERVER_SPECIFICS, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+
+ if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
+ // Add ourselves back into unapplied_update_metahandles with our
+ // new server type.
+ const ModelType new_server_type = kernel_->GetServerModelType();
+ const int64 metahandle = kernel_->ref(META_HANDLE);
+ dir()->kernel_->unapplied_update_metahandles[new_server_type]
+ .insert(metahandle);
+ }
+ }
+}
+
+void ModelNeutralMutableEntry::PutBaseServerSpecifics(
+ const sync_pb::EntitySpecifics& value) {
+ DCHECK(kernel_);
+ CHECK(!value.password().has_client_only_encrypted_data());
+ base_write_transaction_->TrackChangesTo(kernel_);
+ // TODO(ncarter): This is unfortunately heavyweight. Can we do
+ // better?
+ if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()
+ != value.SerializeAsString()) {
+ kernel_->put(BASE_SERVER_SPECIFICS, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+void ModelNeutralMutableEntry::PutServerUniquePosition(
+ const UniquePosition& value) {
+ DCHECK(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ if(!kernel_->ref(SERVER_UNIQUE_POSITION).Equals(value)) {
+ // We should never overwrite a valid position with an invalid one.
+ DCHECK(value.IsValid());
+ ScopedKernelLock lock(dir());
+ kernel_->put(SERVER_UNIQUE_POSITION, value);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ }
+}
+
+void ModelNeutralMutableEntry::PutSyncing(bool value) {
+ kernel_->put(SYNCING, value);
+}
+
+void ModelNeutralMutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
+ base_write_transaction_->TrackChangesTo(kernel_);
+ dir()->ReindexParentId(base_write_transaction(), kernel_, parent_id);
+ kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+}
+
+void ModelNeutralMutableEntry::UpdateTransactionVersion(int64 value) {
+ ScopedKernelLock lock(dir());
+ kernel_->put(TRANSACTION_VERSION, value);
+ kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles));
+}
+
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans)
+ : Entry(trans), base_write_transaction_(trans) {}
+
+MetahandleSet* ModelNeutralMutableEntry::GetDirtyIndexHelper() {
+ return &dir()->kernel_->dirty_metahandles;
+}
+
+} // namespace syncable
+
+} // namespace syncer
diff --git a/chromium/sync/syncable/model_neutral_mutable_entry.h b/chromium/sync/syncable/model_neutral_mutable_entry.h
new file mode 100644
index 00000000000..e2292e7045d
--- /dev/null
+++ b/chromium/sync/syncable/model_neutral_mutable_entry.h
@@ -0,0 +1,116 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_MODEL_NEUTRAL_MUTABLE_ENTRY_H_
+#define SYNC_SYNCABLE_MODEL_NEUTRAL_MUTABLE_ENTRY_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/syncable/entry.h"
+
+namespace syncer {
+class WriteNode;
+
+namespace syncable {
+
+class BaseWriteTransaction;
+
+enum CreateNewUpdateItem {
+ CREATE_NEW_UPDATE_ITEM
+};
+
+// This Entry includes all the operations one can safely perform on the sync
+// thread. In particular, it does not expose setters to make changes that need
+// to be communicated to the model (and the model's thread). It is not possible
+// to change an entry's SPECIFICS or UNIQUE_POSITION fields with this kind of
+// entry.
+class SYNC_EXPORT_PRIVATE ModelNeutralMutableEntry : public Entry {
+ public:
+ ModelNeutralMutableEntry(BaseWriteTransaction* trans,
+ CreateNewUpdateItem,
+ const Id& id);
+ ModelNeutralMutableEntry(BaseWriteTransaction* trans, GetByHandle, int64);
+ ModelNeutralMutableEntry(BaseWriteTransaction* trans, GetById, const Id&);
+ ModelNeutralMutableEntry(
+ BaseWriteTransaction* trans,
+ GetByClientTag,
+ const std::string& tag);
+ ModelNeutralMutableEntry(
+ BaseWriteTransaction* trans,
+ GetByServerTag,
+ const std::string& tag);
+
+ inline BaseWriteTransaction* base_write_transaction() const {
+ return base_write_transaction_;
+ }
+
+ // Non-model-changing setters. These setters will change properties internal
+ // to the node. These fields are important for bookkeeping in the sync
+ // internals, but it is not necessary to communicate changes in these fields
+ // to the local models.
+ //
+ // Some of them trigger the re-indexing of the entry. They return true on
+ // success and false on failure, which occurs when putting the value would
+ // have caused a duplicate in the index. The setters that never fail return
+ // void.
+ void PutBaseVersion(int64 value);
+ void PutServerVersion(int64 value);
+ void PutServerMtime(base::Time value);
+ void PutServerCtime(base::Time value);
+ bool PutId(const Id& value);
+ void PutServerParentId(const Id& value);
+ bool PutIsUnsynced(bool value);
+ bool PutIsUnappliedUpdate(bool value);
+ void PutServerIsDir(bool value);
+ void PutServerIsDel(bool value);
+ void PutServerNonUniqueName(const std::string& value);
+ bool PutUniqueServerTag(const std::string& value);
+ bool PutUniqueClientTag(const std::string& value);
+ void PutUniqueBookmarkTag(const std::string& tag);
+ void PutServerSpecifics(const sync_pb::EntitySpecifics& value);
+ void PutBaseServerSpecifics(const sync_pb::EntitySpecifics& value);
+ void PutServerUniquePosition(const UniquePosition& value);
+ void PutSyncing(bool value);
+
+ // Do a simple property-only update of the PARENT_ID field. Use with caution.
+ //
+ // The normal Put(IS_PARENT) call will move the item to the front of the
+ // sibling order to maintain the linked list invariants when the parent
+ // changes. That's usually what you want to do, but it's inappropriate
+ // when the caller is trying to change the parent ID of a the whole set
+ // of children (e.g. because the ID changed during a commit). For those
+ // cases, there's this function. It will corrupt the sibling ordering
+ // if you're not careful.
+ void PutParentIdPropertyOnly(const Id& parent_id);
+
+ // This is similar to what one would expect from Put(TRANSACTION_VERSION),
+ // except that it doesn't bother to invoke 'SaveOriginals'. Calling that
+ // function is at best unnecessary, since the transaction will have already
+ // used its list of mutations by the time this function is called.
+ void UpdateTransactionVersion(int64 version);
+
+ protected:
+ explicit ModelNeutralMutableEntry(BaseWriteTransaction* trans);
+
+ syncable::MetahandleSet* GetDirtyIndexHelper();
+
+ private:
+ friend class syncer::WriteNode;
+ friend class Directory;
+
+ // Don't allow creation on heap, except by sync API wrappers.
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ // Kind of redundant. We should reduce the number of pointers
+ // floating around if at all possible. Could we store this in Directory?
+ // Scope: Set on construction, never changed after that.
+ BaseWriteTransaction* const base_write_transaction_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelNeutralMutableEntry);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_MODEL_NEUTRAL_MUTABLE_ENTRY_H_
diff --git a/chromium/sync/syncable/model_type.cc b/chromium/sync/syncable/model_type.cc
index b9dacbecf15..fa331187323 100644
--- a/chromium/sync/syncable/model_type.cc
+++ b/chromium/sync/syncable/model_type.cc
@@ -68,6 +68,9 @@ void AddDefaultFieldValue(ModelType datatype,
case APPS:
specifics->mutable_app();
break;
+ case APP_LIST:
+ specifics->mutable_app_list();
+ break;
case APP_SETTINGS:
specifics->mutable_app_setting();
break;
@@ -107,6 +110,9 @@ void AddDefaultFieldValue(ModelType datatype,
case MANAGED_USERS:
specifics->mutable_managed_user();
break;
+ case ARTICLES:
+ specifics->mutable_article();
+ break;
default:
NOTREACHED() << "No known extension for model type.";
}
@@ -164,6 +170,9 @@ int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
case APPS:
return sync_pb::EntitySpecifics::kAppFieldNumber;
break;
+ case APP_LIST:
+ return sync_pb::EntitySpecifics::kAppListFieldNumber;
+ break;
case APP_SETTINGS:
return sync_pb::EntitySpecifics::kAppSettingFieldNumber;
break;
@@ -197,6 +206,8 @@ int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
return sync_pb::EntitySpecifics::kManagedUserSettingFieldNumber;
case MANAGED_USERS:
return sync_pb::EntitySpecifics::kManagedUserFieldNumber;
+ case ARTICLES:
+ return sync_pb::EntitySpecifics::kArticleFieldNumber;
default:
NOTREACHED() << "No known extension for model type.";
return 0;
@@ -218,9 +229,6 @@ FullModelTypeSet ToFullModelTypeSet(ModelTypeSet in) {
ModelType GetModelType(const sync_pb::SyncEntity& sync_entity) {
DCHECK(!IsRoot(sync_entity)); // Root shouldn't ever go over the wire.
- if (sync_entity.deleted())
- return UNSPECIFIED;
-
// Backwards compatibility with old (pre-specifics) protocol.
if (sync_entity.has_bookmarkdata())
return BOOKMARKS;
@@ -274,6 +282,9 @@ ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
if (specifics.has_app())
return APPS;
+ if (specifics.has_app_list())
+ return APP_LIST;
+
if (specifics.has_search_engine())
return SEARCH_ENGINES;
@@ -319,6 +330,9 @@ ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
if (specifics.has_managed_user())
return MANAGED_USERS;
+ if (specifics.has_article())
+ return ARTICLES;
+
return UNSPECIFIED;
}
@@ -456,6 +470,8 @@ const char* ModelTypeToString(ModelType model_type) {
return "Sessions";
case APPS:
return "Apps";
+ case APP_LIST:
+ return "App List";
case AUTOFILL_PROFILE:
return "Autofill Profiles";
case APP_SETTINGS:
@@ -484,6 +500,8 @@ const char* ModelTypeToString(ModelType model_type) {
return "Managed User Settings";
case MANAGED_USERS:
return "Managed Users";
+ case ARTICLES:
+ return "Articles";
case PROXY_TABS:
return "Tabs";
default:
@@ -555,6 +573,10 @@ int ModelTypeToHistogramInt(ModelType model_type) {
return 26;
case MANAGED_USERS:
return 27;
+ case ARTICLES:
+ return 28;
+ case APP_LIST:
+ return 29;
// Silence a compiler warning.
case MODEL_TYPE_COUNT:
return 0;
@@ -614,6 +636,8 @@ ModelType ModelTypeFromString(const std::string& model_type_string) {
return SESSIONS;
else if (model_type_string == "Apps")
return APPS;
+ else if (model_type_string == "App List")
+ return APP_LIST;
else if (model_type_string == "App settings")
return APP_SETTINGS;
else if (model_type_string == "Extension settings")
@@ -640,6 +664,8 @@ ModelType ModelTypeFromString(const std::string& model_type_string) {
return MANAGED_USER_SETTINGS;
else if (model_type_string == "Managed Users")
return MANAGED_USERS;
+ else if (model_type_string == "Articles")
+ return ARTICLES;
else if (model_type_string == "Tabs")
return PROXY_TABS;
else
@@ -704,6 +730,8 @@ std::string ModelTypeToRootTag(ModelType type) {
return "google_chrome_sessions";
case APPS:
return "google_chrome_apps";
+ case APP_LIST:
+ return "google_chrome_app_list";
case AUTOFILL_PROFILE:
return "google_chrome_autofill_profiles";
case APP_SETTINGS:
@@ -732,6 +760,8 @@ std::string ModelTypeToRootTag(ModelType type) {
return "google_chrome_managed_user_settings";
case MANAGED_USERS:
return "google_chrome_managed_users";
+ case ARTICLES:
+ return "google_chrome_articles";
case PROXY_TABS:
return std::string();
default:
@@ -756,6 +786,7 @@ const char kExtensionSettingNotificationType[] = "EXTENSION_SETTING";
const char kNigoriNotificationType[] = "NIGORI";
const char kAppSettingNotificationType[] = "APP_SETTING";
const char kAppNotificationType[] = "APP";
+const char kAppListNotificationType[] = "APP_LIST";
const char kSearchEngineNotificationType[] = "SEARCH_ENGINE";
const char kSessionNotificationType[] = "SESSION";
const char kAutofillProfileNotificationType[] = "AUTOFILL_PROFILE";
@@ -771,6 +802,7 @@ const char kFaviconImageNotificationType[] = "FAVICON_IMAGE";
const char kFaviconTrackingNotificationType[] = "FAVICON_TRACKING";
const char kManagedUserSettingNotificationType[] = "MANAGED_USER_SETTING";
const char kManagedUserNotificationType[] = "MANAGED_USER";
+const char kArticleNotificationType[] = "ARTICLE";
} // namespace
bool RealModelTypeToNotificationType(ModelType model_type,
@@ -806,6 +838,9 @@ bool RealModelTypeToNotificationType(ModelType model_type,
case APPS:
*notification_type = kAppNotificationType;
return true;
+ case APP_LIST:
+ *notification_type = kAppListNotificationType;
+ return true;
case SEARCH_ENGINES:
*notification_type = kSearchEngineNotificationType;
return true;
@@ -851,6 +886,9 @@ bool RealModelTypeToNotificationType(ModelType model_type,
case MANAGED_USERS:
*notification_type = kManagedUserNotificationType;
return true;
+ case ARTICLES:
+ *notification_type = kArticleNotificationType;
+ return true;
default:
break;
}
@@ -887,6 +925,9 @@ bool NotificationTypeToRealModelType(const std::string& notification_type,
} else if (notification_type == kAppNotificationType) {
*model_type = APPS;
return true;
+ } else if (notification_type == kAppListNotificationType) {
+ *model_type = APP_LIST;
+ return true;
} else if (notification_type == kSearchEngineNotificationType) {
*model_type = SEARCH_ENGINES;
return true;
@@ -935,6 +976,9 @@ bool NotificationTypeToRealModelType(const std::string& notification_type,
} else if (notification_type == kManagedUserNotificationType) {
*model_type = MANAGED_USERS;
return true;
+ } else if (notification_type == kArticleNotificationType) {
+ *model_type = ARTICLES;
+ return true;
}
*model_type = UNSPECIFIED;
return false;
diff --git a/chromium/sync/syncable/mutable_entry.cc b/chromium/sync/syncable/mutable_entry.cc
index 0c160c27dd7..863e65b8b32 100644
--- a/chromium/sync/syncable/mutable_entry.cc
+++ b/chromium/sync/syncable/mutable_entry.cc
@@ -47,7 +47,7 @@ void MutableEntry::Init(WriteTransaction* trans,
// Because this entry is new, it was originally deleted.
kernel->put(IS_DEL, true);
- trans->SaveOriginal(kernel.get());
+ trans->TrackChangesTo(kernel.get());
kernel->put(IS_DEL, false);
// Now swap the pointers.
@@ -59,8 +59,7 @@ MutableEntry::MutableEntry(WriteTransaction* trans,
ModelType model_type,
const Id& parent_id,
const string& name)
- : Entry(trans),
- write_transaction_(trans) {
+ : ModelNeutralMutableEntry(trans), write_transaction_(trans) {
Init(trans, model_type, parent_id, name);
// We need to have a valid position ready before we can index the item.
if (model_type == BOOKMARKS) {
@@ -79,69 +78,35 @@ MutableEntry::MutableEntry(WriteTransaction* trans,
MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
const Id& id)
- : Entry(trans), write_transaction_(trans) {
- Entry same_id(trans, GET_BY_ID, id);
- kernel_ = NULL;
- if (same_id.good()) {
- return; // already have an item with this ID.
- }
- scoped_ptr<EntryKernel> kernel(new EntryKernel());
-
- kernel->put(ID, id);
- kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
- kernel->mark_dirty(&trans->directory_->kernel_->dirty_metahandles);
- kernel->put(IS_DEL, true);
- // We match the database defaults here
- kernel->put(BASE_VERSION, CHANGES_VERSION);
- if (!trans->directory()->InsertEntry(trans, kernel.get())) {
- return; // Failed inserting.
- }
- trans->SaveOriginal(kernel.get());
-
- kernel_ = kernel.release();
-}
+ : ModelNeutralMutableEntry(trans, CREATE_NEW_UPDATE_ITEM, id),
+ write_transaction_(trans) {}
MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
- : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
+ : ModelNeutralMutableEntry(trans, GET_BY_ID, id),
+ write_transaction_(trans) {
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
int64 metahandle)
- : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
+ : ModelNeutralMutableEntry(trans, GET_BY_HANDLE, metahandle),
+ write_transaction_(trans) {
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
const std::string& tag)
- : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
+ : ModelNeutralMutableEntry(trans, GET_BY_CLIENT_TAG, tag),
+ write_transaction_(trans) {
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
const string& tag)
- : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) {
-}
-
-void MutableEntry::PutBaseVersion(int64 value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(BASE_VERSION) != value) {
- kernel_->put(BASE_VERSION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutServerVersion(int64 value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(SERVER_VERSION) != value) {
- ScopedKernelLock lock(dir());
- kernel_->put(SERVER_VERSION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
+ : ModelNeutralMutableEntry(trans, GET_BY_SERVER_TAG, tag),
+ write_transaction_(trans) {
}
void MutableEntry::PutLocalExternalId(int64 value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(LOCAL_EXTERNAL_ID) != value) {
ScopedKernelLock lock(dir());
kernel_->put(LOCAL_EXTERNAL_ID, value);
@@ -151,54 +116,25 @@ void MutableEntry::PutLocalExternalId(int64 value) {
void MutableEntry::PutMtime(base::Time value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(MTIME) != value) {
kernel_->put(MTIME, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
}
}
-void MutableEntry::PutServerMtime(base::Time value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(SERVER_MTIME) != value) {
- kernel_->put(SERVER_MTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
void MutableEntry::PutCtime(base::Time value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(CTIME) != value) {
kernel_->put(CTIME, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
}
}
-void MutableEntry::PutServerCtime(base::Time value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(SERVER_CTIME) != value) {
- kernel_->put(SERVER_CTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool MutableEntry::PutId(const Id& value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(ID) != value) {
- if (!dir()->ReindexId(write_transaction(), kernel_, value))
- return false;
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
void MutableEntry::PutParentId(const Id& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(PARENT_ID) != value) {
PutParentIdPropertyOnly(value);
if (!GetIsDel()) {
@@ -210,79 +146,9 @@ void MutableEntry::PutParentId(const Id& value) {
}
}
-void MutableEntry::PutServerParentId(const Id& value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
-
- if (kernel_->ref(SERVER_PARENT_ID) != value) {
- kernel_->put(SERVER_PARENT_ID, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool MutableEntry::PutIsUnsynced(bool value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(IS_UNSYNCED) != value) {
- MetahandleSet* index = &dir()->kernel_->unsynced_metahandles;
-
- ScopedKernelLock lock(dir());
- if (value) {
- if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
- FROM_HERE,
- "Could not insert",
- write_transaction())) {
- return false;
- }
- } else {
- if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
- FROM_HERE,
- "Entry Not succesfully erased",
- write_transaction())) {
- return false;
- }
- }
- kernel_->put(IS_UNSYNCED, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::PutIsUnappliedUpdate(bool value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(IS_UNAPPLIED_UPDATE) != value) {
- // Use kernel_->GetServerModelType() instead of
- // GetServerModelType() as we may trigger some DCHECKs in the
- // latter.
- MetahandleSet* index = &dir()->kernel_->unapplied_update_metahandles[
- kernel_->GetServerModelType()];
-
- ScopedKernelLock lock(dir());
- if (value) {
- if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
- FROM_HERE,
- "Could not insert",
- write_transaction())) {
- return false;
- }
- } else {
- if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
- FROM_HERE,
- "Entry Not succesfully erased",
- write_transaction())) {
- return false;
- }
- }
- kernel_->put(IS_UNAPPLIED_UPDATE, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
void MutableEntry::PutIsDir(bool value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
bool old_value = kernel_->ref(IS_DIR);
if (old_value != value) {
kernel_->put(IS_DIR, value);
@@ -290,19 +156,9 @@ void MutableEntry::PutIsDir(bool value) {
}
}
-void MutableEntry::PutServerIsDir(bool value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- bool old_value = kernel_->ref(SERVER_IS_DIR);
- if (old_value != value) {
- kernel_->put(SERVER_IS_DIR, value);
- kernel_->mark_dirty(GetDirtyIndexHelper());
- }
-}
-
void MutableEntry::PutIsDel(bool value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (value == kernel_->ref(IS_DEL)) {
return;
}
@@ -332,27 +188,9 @@ void MutableEntry::PutIsDel(bool value) {
}
}
-void MutableEntry::PutServerIsDel(bool value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- bool old_value = kernel_->ref(SERVER_IS_DEL);
- if (old_value != value) {
- kernel_->put(SERVER_IS_DEL, value);
- kernel_->mark_dirty(GetDirtyIndexHelper());
- }
-
- // Update delete journal for existence status change on server side here
- // instead of in PutIsDel() because IS_DEL may not be updated due to
- // early returns when processing updates. And because
- // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has
- // to be called on sync thread.
- dir()->delete_journal()->UpdateDeleteJournalForServerDelete(
- write_transaction(), old_value, *kernel_);
-}
-
void MutableEntry::PutNonUniqueName(const std::string& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(NON_UNIQUE_NAME) != value) {
kernel_->put(NON_UNIQUE_NAME, value);
@@ -360,91 +198,10 @@ void MutableEntry::PutNonUniqueName(const std::string& value) {
}
}
-void MutableEntry::PutServerNonUniqueName(const std::string& value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
-
- if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) {
- kernel_->put(SERVER_NON_UNIQUE_NAME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool MutableEntry::PutUniqueServerTag(const string& new_tag) {
- if (new_tag == kernel_->ref(UNIQUE_SERVER_TAG)) {
- return true;
- }
-
- write_transaction_->SaveOriginal(kernel_);
- ScopedKernelLock lock(dir());
- // Make sure your new value is not in there already.
- if (dir()->kernel_->server_tags_map.find(new_tag) !=
- dir()->kernel_->server_tags_map.end()) {
- DVLOG(1) << "Detected duplicate server tag";
- return false;
- }
- dir()->kernel_->server_tags_map.erase(
- kernel_->ref(UNIQUE_SERVER_TAG));
- kernel_->put(UNIQUE_SERVER_TAG, new_tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- if (!new_tag.empty()) {
- dir()->kernel_->server_tags_map[new_tag] = kernel_;
- }
-
- return true;
-}
-
-bool MutableEntry::PutUniqueClientTag(const string& new_tag) {
- if (new_tag == kernel_->ref(UNIQUE_CLIENT_TAG)) {
- return true;
- }
-
- write_transaction_->SaveOriginal(kernel_);
- ScopedKernelLock lock(dir());
- // Make sure your new value is not in there already.
- if (dir()->kernel_->client_tags_map.find(new_tag) !=
- dir()->kernel_->client_tags_map.end()) {
- DVLOG(1) << "Detected duplicate client tag";
- return false;
- }
- dir()->kernel_->client_tags_map.erase(
- kernel_->ref(UNIQUE_CLIENT_TAG));
- kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- if (!new_tag.empty()) {
- dir()->kernel_->client_tags_map[new_tag] = kernel_;
- }
-
- return true;
-}
-
-void MutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
- // This unique tag will eventually be used as the unique suffix when adjusting
- // this bookmark's position. Let's make sure it's a valid suffix.
- if (!UniquePosition::IsValidSuffix(tag)) {
- NOTREACHED();
- return;
- }
-
- if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty() &&
- tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) {
- // There is only one scenario where our tag is expected to change. That
- // scenario occurs when our current tag is a non-correct tag assigned during
- // the UniquePosition migration.
- std::string migration_generated_tag =
- GenerateSyncableBookmarkHash(std::string(),
- kernel_->ref(ID).GetServerId());
- DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG));
- }
-
- kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
-}
-
void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) {
DCHECK(kernel_);
CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
// TODO(ncarter): This is unfortunately heavyweight. Can we do
// better?
if (kernel_->ref(SPECIFICS).SerializeAsString() !=
@@ -454,56 +211,9 @@ void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) {
}
}
-void MutableEntry::PutServerSpecifics(const sync_pb::EntitySpecifics& value) {
- DCHECK(kernel_);
- CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction_->SaveOriginal(kernel_);
- // TODO(ncarter): This is unfortunately heavyweight. Can we do
- // better?
- if (kernel_->ref(SERVER_SPECIFICS).SerializeAsString() !=
- value.SerializeAsString()) {
- if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
- // Remove ourselves from unapplied_update_metahandles with our
- // old server type.
- const ModelType old_server_type = kernel_->GetServerModelType();
- const int64 metahandle = kernel_->ref(META_HANDLE);
- size_t erase_count =
- dir()->kernel_->unapplied_update_metahandles[old_server_type]
- .erase(metahandle);
- DCHECK_EQ(erase_count, 1u);
- }
-
- kernel_->put(SERVER_SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
-
- if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
- // Add ourselves back into unapplied_update_metahandles with our
- // new server type.
- const ModelType new_server_type = kernel_->GetServerModelType();
- const int64 metahandle = kernel_->ref(META_HANDLE);
- dir()->kernel_->unapplied_update_metahandles[new_server_type]
- .insert(metahandle);
- }
- }
-}
-
-void MutableEntry::PutBaseServerSpecifics(
- const sync_pb::EntitySpecifics& value) {
- DCHECK(kernel_);
- CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction_->SaveOriginal(kernel_);
- // TODO(ncarter): This is unfortunately heavyweight. Can we do
- // better?
- if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()
- != value.SerializeAsString()) {
- kernel_->put(BASE_SERVER_SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
void MutableEntry::PutUniquePosition(const UniquePosition& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if(!kernel_->ref(UNIQUE_POSITION).Equals(value)) {
// We should never overwrite a valid position with an invalid one.
DCHECK(value.IsValid());
@@ -515,46 +225,14 @@ void MutableEntry::PutUniquePosition(const UniquePosition& value) {
}
}
-void MutableEntry::PutServerUniquePosition(const UniquePosition& value) {
- DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
- if(!kernel_->ref(SERVER_UNIQUE_POSITION).Equals(value)) {
- // We should never overwrite a valid position with an invalid one.
- DCHECK(value.IsValid());
- ScopedKernelLock lock(dir());
- kernel_->put(SERVER_UNIQUE_POSITION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutSyncing(bool value) {
- kernel_->put(SYNCING, value);
-}
-
-void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
- write_transaction_->SaveOriginal(kernel_);
- dir()->ReindexParentId(write_transaction(), kernel_, parent_id);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
-}
-
-MetahandleSet* MutableEntry::GetDirtyIndexHelper() {
- return &dir()->kernel_->dirty_metahandles;
-}
-
bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
- MutableEntry predecessor(write_transaction_, GET_BY_ID, predecessor_id);
+ MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
if (!predecessor.good())
return false;
dir()->PutPredecessor(kernel_, predecessor.kernel_);
return true;
}
-void MutableEntry::UpdateTransactionVersion(int64 value) {
- ScopedKernelLock lock(dir());
- kernel_->put(TRANSACTION_VERSION, value);
- kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles));
-}
-
// This function sets only the flags needed to get this entry to sync.
bool MarkForSyncing(MutableEntry* e) {
DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
diff --git a/chromium/sync/syncable/mutable_entry.h b/chromium/sync/syncable/mutable_entry.h
index 40079e17ccb..8c2f2ab5492 100644
--- a/chromium/sync/syncable/mutable_entry.h
+++ b/chromium/sync/syncable/mutable_entry.h
@@ -9,32 +9,29 @@
#include "sync/internal_api/public/base/model_type.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/metahandle_set.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
namespace syncer {
class WriteNode;
namespace syncable {
-class WriteTransaction;
-
enum Create {
CREATE
};
-enum CreateNewUpdateItem {
- CREATE_NEW_UPDATE_ITEM
-};
+class WriteTransaction;
// A mutable meta entry. Changes get committed to the database when the
// WriteTransaction is destroyed.
-class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
+class SYNC_EXPORT_PRIVATE MutableEntry : public ModelNeutralMutableEntry {
void Init(WriteTransaction* trans, ModelType model_type,
const Id& parent_id, const std::string& name);
public:
+ MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
MutableEntry(WriteTransaction* trans, Create, ModelType model_type,
const Id& parent_id, const std::string& name);
- MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
MutableEntry(WriteTransaction* trans, GetByHandle, int64);
MutableEntry(WriteTransaction* trans, GetById, const Id&);
MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
@@ -44,48 +41,17 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
return write_transaction_;
}
- // Field Accessors. Some of them trigger the re-indexing of the entry.
- // Return true on success, return false on failure, which means that putting
- // the value would have caused a duplicate in the index. The setters that
- // never fail return void.
- void PutBaseVersion(int64 value);
- void PutServerVersion(int64 value);
+ // Model-changing setters. These setters make user-visible changes that will
+ // need to be communicated either to the local model or the sync server.
void PutLocalExternalId(int64 value);
void PutMtime(base::Time value);
- void PutServerMtime(base::Time value);
void PutCtime(base::Time value);
- void PutServerCtime(base::Time value);
- bool PutId(const Id& value);
void PutParentId(const Id& value);
- void PutServerParentId(const Id& value);
- bool PutIsUnsynced(bool value);
- bool PutIsUnappliedUpdate(bool value);
void PutIsDir(bool value);
- void PutServerIsDir(bool value);
void PutIsDel(bool value);
- void PutServerIsDel(bool value);
void PutNonUniqueName(const std::string& value);
- void PutServerNonUniqueName(const std::string& value);
- bool PutUniqueServerTag(const std::string& value);
- bool PutUniqueClientTag(const std::string& value);
- void PutUniqueBookmarkTag(const std::string& tag);
void PutSpecifics(const sync_pb::EntitySpecifics& value);
- void PutServerSpecifics(const sync_pb::EntitySpecifics& value);
- void PutBaseServerSpecifics(const sync_pb::EntitySpecifics& value);
void PutUniquePosition(const UniquePosition& value);
- void PutServerUniquePosition(const UniquePosition& value);
- void PutSyncing(bool value);
-
- // Do a simple property-only update if the PARENT_ID field. Use with caution.
- //
- // The normal Put(IS_PARENT) call will move the item to the front of the
- // sibling order to maintain the linked list invariants when the parent
- // changes. That's usually what you want to do, but it's inappropriate
- // when the caller is trying to change the parent ID of a the whole set
- // of children (e.g. because the ID changed during a commit). For those
- // cases, there's this function. It will corrupt the sibling ordering
- // if you're not careful.
- void PutParentIdPropertyOnly(const Id& parent_id);
// Sets the position of this item, and updates the entry kernels of the
// adjacent siblings so that list invariants are maintained. Returns false
@@ -93,35 +59,12 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
// ID to put the node in first position.
bool PutPredecessor(const Id& predecessor_id);
- // This is similar to what one would expect from Put(TRANSACTION_VERSION),
- // except that it doesn't bother to invoke 'SaveOriginals'. Calling that
- // function is at best unnecessary, since the transaction will have already
- // used its list of mutations by the time this function is called.
- void UpdateTransactionVersion(int64 version);
-
- protected:
- syncable::MetahandleSet* GetDirtyIndexHelper();
-
private:
- friend class Directory;
- friend class WriteTransaction;
- friend class syncer::WriteNode;
-
- // Don't allow creation on heap, except by sync API wrappers.
- void* operator new(size_t size) { return (::operator new)(size); }
-
- // Adjusts the successor and predecessor entries so that they no longer
- // refer to this entry.
- bool UnlinkFromOrder();
-
// Kind of redundant. We should reduce the number of pointers
// floating around if at all possible. Could we store this in Directory?
// Scope: Set on construction, never changed after that.
WriteTransaction* const write_transaction_;
- protected:
- MutableEntry();
-
DISALLOW_COPY_AND_ASSIGN(MutableEntry);
};
diff --git a/chromium/sync/syncable/nigori_util.cc b/chromium/sync/syncable/nigori_util.cc
index 9100e9d4fad..107a68f233b 100644
--- a/chromium/sync/syncable/nigori_util.cc
+++ b/chromium/sync/syncable/nigori_util.cc
@@ -242,7 +242,7 @@ void UpdateNigoriFromEncryptedTypes(ModelTypeSet encrypted_types,
bool encrypt_everything,
sync_pb::NigoriSpecifics* nigori) {
nigori->set_encrypt_everything(encrypt_everything);
- COMPILE_ASSERT(28 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
+ COMPILE_ASSERT(30 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
nigori->set_encrypt_bookmarks(
encrypted_types.Has(BOOKMARKS));
nigori->set_encrypt_preferences(
@@ -268,6 +268,8 @@ void UpdateNigoriFromEncryptedTypes(ModelTypeSet encrypted_types,
nigori->set_encrypt_dictionary(encrypted_types.Has(DICTIONARY));
nigori->set_encrypt_favicon_images(encrypted_types.Has(FAVICON_IMAGES));
nigori->set_encrypt_favicon_tracking(encrypted_types.Has(FAVICON_TRACKING));
+ nigori->set_encrypt_articles(encrypted_types.Has(ARTICLES));
+ nigori->set_encrypt_app_list(encrypted_types.Has(APP_LIST));
}
ModelTypeSet GetEncryptedTypesFromNigori(
@@ -276,7 +278,7 @@ ModelTypeSet GetEncryptedTypesFromNigori(
return ModelTypeSet::All();
ModelTypeSet encrypted_types;
- COMPILE_ASSERT(28 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
+ COMPILE_ASSERT(30 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
if (nigori.encrypt_bookmarks())
encrypted_types.Put(BOOKMARKS);
if (nigori.encrypt_preferences())
@@ -309,6 +311,10 @@ ModelTypeSet GetEncryptedTypesFromNigori(
encrypted_types.Put(FAVICON_IMAGES);
if (nigori.encrypt_favicon_tracking())
encrypted_types.Put(FAVICON_TRACKING);
+ if (nigori.encrypt_articles())
+ encrypted_types.Put(ARTICLES);
+ if (nigori.encrypt_app_list())
+ encrypted_types.Put(APP_LIST);
return encrypted_types;
}
diff --git a/chromium/sync/syncable/syncable_base_write_transaction.cc b/chromium/sync/syncable/syncable_base_write_transaction.cc
new file mode 100644
index 00000000000..a575c699fb5
--- /dev/null
+++ b/chromium/sync/syncable/syncable_base_write_transaction.cc
@@ -0,0 +1,22 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_base_write_transaction.h"
+
+namespace syncer {
+namespace syncable {
+
+BaseWriteTransaction::BaseWriteTransaction(
+ const tracked_objects::Location location,
+ const char* name,
+ WriterTag writer,
+ Directory* directory)
+ : BaseTransaction(location, name, writer, directory) {
+}
+
+BaseWriteTransaction::~BaseWriteTransaction() {}
+
+} // namespace syncable
+} // namespace syncer
+
diff --git a/chromium/sync/syncable/syncable_base_write_transaction.h b/chromium/sync/syncable/syncable_base_write_transaction.h
new file mode 100644
index 00000000000..8ea91a1b106
--- /dev/null
+++ b/chromium/sync/syncable/syncable_base_write_transaction.h
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
+#define SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/syncable/syncable_base_transaction.h"
+
+namespace syncer {
+namespace syncable {
+
+// A base class shared by both ModelNeutralWriteTransaction and
+// WriteTransaction.
+class SYNC_EXPORT BaseWriteTransaction : public BaseTransaction {
+ public:
+ virtual void TrackChangesTo(const EntryKernel* entry) = 0;
+
+ protected:
+ BaseWriteTransaction(
+ const tracked_objects::Location location,
+ const char* name,
+ WriterTag writer,
+ Directory* directory);
+ virtual ~BaseWriteTransaction();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BaseWriteTransaction);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
diff --git a/chromium/sync/syncable/syncable_model_neutral_write_transaction.cc b/chromium/sync/syncable/syncable_model_neutral_write_transaction.cc
new file mode 100644
index 00000000000..9aaf7400726
--- /dev/null
+++ b/chromium/sync/syncable/syncable_model_neutral_write_transaction.cc
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
+
+#include "sync/syncable/directory.h"
+
+namespace syncer {
+namespace syncable {
+
+ModelNeutralWriteTransaction::ModelNeutralWriteTransaction(
+ const tracked_objects::Location& location,
+ WriterTag writer, Directory* directory)
+ : BaseWriteTransaction(location,
+ "ModelNeutralWriteTransaction",
+ writer,
+ directory) {
+ Lock();
+}
+
+ModelNeutralWriteTransaction::~ModelNeutralWriteTransaction() {
+ directory()->CheckInvariantsOnTransactionClose(this, modified_handles_);
+ HandleUnrecoverableErrorIfSet();
+ Unlock();
+}
+
+void ModelNeutralWriteTransaction::TrackChangesTo(const EntryKernel* entry) {
+ modified_handles_.insert(entry->ref(META_HANDLE));
+}
+
+} // namespace syncer
+} // namespace syncable
diff --git a/chromium/sync/syncable/syncable_model_neutral_write_transaction.h b/chromium/sync/syncable/syncable_model_neutral_write_transaction.h
new file mode 100644
index 00000000000..f96725ed69f
--- /dev/null
+++ b/chromium/sync/syncable/syncable_model_neutral_write_transaction.h
@@ -0,0 +1,44 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
+#define SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/syncable/metahandle_set.h"
+#include "sync/syncable/syncable_base_write_transaction.h"
+
+namespace syncer {
+namespace syncable {
+
+// A transaction used to instantiate Entries or ModelNeutralMutableEntries.
+//
+// This allows it to be used when making changes to sync entity properties that
+// do not need to be kept in sync with the associated native model.
+//
+// This class differs internally from WriteTransactions in that it does a less
+// good job of tracking and reporting on changes to the entries modified within
+// its scope. This is because its changes do not need to be reported to the
+// DirectoryChangeDelegate.
+class SYNC_EXPORT_PRIVATE ModelNeutralWriteTransaction
+ : public BaseWriteTransaction {
+ public:
+ ModelNeutralWriteTransaction(
+ const tracked_objects::Location& location,
+ WriterTag writer,
+ Directory* directory);
+ virtual ~ModelNeutralWriteTransaction();
+
+ virtual void TrackChangesTo(const EntryKernel* entry) OVERRIDE;
+
+ private:
+ MetahandleSet modified_handles_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelNeutralWriteTransaction);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
diff --git a/chromium/sync/syncable/syncable_util.cc b/chromium/sync/syncable/syncable_util.cc
index 05cc2a96385..d92aa47ed46 100644
--- a/chromium/sync/syncable/syncable_util.cc
+++ b/chromium/sync/syncable/syncable_util.cc
@@ -47,8 +47,8 @@ bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
}
void ChangeEntryIDAndUpdateChildren(
- WriteTransaction* trans,
- MutableEntry* entry,
+ BaseWriteTransaction* trans,
+ ModelNeutralMutableEntry* entry,
const Id& new_id) {
Id old_id = entry->GetId();
if (!entry->PutId(new_id)) {
@@ -64,7 +64,7 @@ void ChangeEntryIDAndUpdateChildren(
trans->directory()->GetChildHandlesById(trans, old_id, &children);
Directory::Metahandles::iterator i = children.begin();
while (i != children.end()) {
- MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
+ ModelNeutralMutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
CHECK(child_entry.good());
// Use the unchecked setter here to avoid touching the child's
// UNIQUE_POSITION field. In this case, UNIQUE_POSITION among the
@@ -101,7 +101,7 @@ std::string GenerateSyncableHash(
hash_input.append(client_tag);
std::string encode_output;
- CHECK(base::Base64Encode(base::SHA1HashString(hash_input), &encode_output));
+ base::Base64Encode(base::SHA1HashString(hash_input), &encode_output);
return encode_output;
}
diff --git a/chromium/sync/syncable/syncable_util.h b/chromium/sync/syncable/syncable_util.h
index f7d351e0e27..be903fd5954 100644
--- a/chromium/sync/syncable/syncable_util.h
+++ b/chromium/sync/syncable/syncable_util.h
@@ -20,13 +20,14 @@ namespace syncer {
namespace syncable {
class BaseTransaction;
-class WriteTransaction;
-class MutableEntry;
+class BaseWriteTransaction;
+class ModelNeutralMutableEntry;
class Id;
-SYNC_EXPORT_PRIVATE void ChangeEntryIDAndUpdateChildren(WriteTransaction* trans,
- MutableEntry* entry,
- const Id& new_id);
+SYNC_EXPORT_PRIVATE void ChangeEntryIDAndUpdateChildren(
+ BaseWriteTransaction* trans,
+ ModelNeutralMutableEntry* entry,
+ const Id& new_id);
SYNC_EXPORT_PRIVATE bool IsLegalNewParent(BaseTransaction* trans,
const Id& id,
diff --git a/chromium/sync/syncable/syncable_write_transaction.cc b/chromium/sync/syncable/syncable_write_transaction.cc
index 057b258718f..d97ff6728aa 100644
--- a/chromium/sync/syncable/syncable_write_transaction.cc
+++ b/chromium/sync/syncable/syncable_write_transaction.cc
@@ -17,7 +17,7 @@ const int64 kInvalidTransactionVersion = -1;
WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
WriterTag writer, Directory* directory)
- : BaseTransaction(location, "WriteTransaction", writer, directory),
+ : BaseWriteTransaction(location, "WriteTransaction", writer, directory),
transaction_version_(NULL) {
Lock();
}
@@ -25,14 +25,14 @@ WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
Directory* directory,
int64* transaction_version)
- : BaseTransaction(location, "WriteTransaction", SYNCAPI, directory),
+ : BaseWriteTransaction(location, "WriteTransaction", SYNCAPI, directory),
transaction_version_(transaction_version) {
Lock();
if (transaction_version_)
*transaction_version_ = kInvalidTransactionVersion;
}
-void WriteTransaction::SaveOriginal(const EntryKernel* entry) {
+void WriteTransaction::TrackChangesTo(const EntryKernel* entry) {
if (!entry) {
return;
}
@@ -147,7 +147,13 @@ void WriteTransaction::UpdateTransactionVersion(
WriteTransaction::~WriteTransaction() {
const ImmutableEntryKernelMutationMap& mutations = RecordMutations();
- directory()->CheckInvariantsOnTransactionClose(this, mutations.Get());
+
+ MetahandleSet modified_handles;
+ for (EntryKernelMutationMap::const_iterator i = mutations.Get().begin();
+ i != mutations.Get().end(); ++i) {
+ modified_handles.insert(i->first);
+ }
+ directory()->CheckInvariantsOnTransactionClose(this, modified_handles);
// |CheckTreeInvariants| could have thrown an unrecoverable error.
if (unrecoverable_error_set_) {
diff --git a/chromium/sync/syncable/syncable_write_transaction.h b/chromium/sync/syncable/syncable_write_transaction.h
index 0debaa5a366..4d16aca33ce 100644
--- a/chromium/sync/syncable/syncable_write_transaction.h
+++ b/chromium/sync/syncable/syncable_write_transaction.h
@@ -7,7 +7,7 @@
#include "sync/base/sync_export.h"
#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_base_transaction.h"
+#include "sync/syncable/syncable_base_write_transaction.h"
namespace syncer {
namespace syncable {
@@ -15,7 +15,7 @@ namespace syncable {
SYNC_EXPORT extern const int64 kInvalidTransactionVersion;
// Locks db in constructor, unlocks in destructor.
-class SYNC_EXPORT WriteTransaction : public BaseTransaction {
+class SYNC_EXPORT WriteTransaction : public BaseWriteTransaction {
public:
WriteTransaction(const tracked_objects::Location& from_here,
WriterTag writer, Directory* directory);
@@ -30,7 +30,7 @@ class SYNC_EXPORT WriteTransaction : public BaseTransaction {
virtual ~WriteTransaction();
- void SaveOriginal(const EntryKernel* entry);
+ virtual void TrackChangesTo(const EntryKernel* entry) OVERRIDE;
protected:
// Overridden by tests.
diff --git a/chromium/sync/tools/null_invalidation_state_tracker.cc b/chromium/sync/tools/null_invalidation_state_tracker.cc
index 192060bf97f..68237595f49 100644
--- a/chromium/sync/tools/null_invalidation_state_tracker.cc
+++ b/chromium/sync/tools/null_invalidation_state_tracker.cc
@@ -17,26 +17,6 @@ namespace syncer {
NullInvalidationStateTracker::NullInvalidationStateTracker() {}
NullInvalidationStateTracker::~NullInvalidationStateTracker() {}
-InvalidationStateMap
-NullInvalidationStateTracker::GetAllInvalidationStates() const {
- return InvalidationStateMap();
-}
-
-void NullInvalidationStateTracker::SetMaxVersionAndPayload(
- const invalidation::ObjectId& id,
- int64 max_invalidation_version,
- const std::string& payload) {
- LOG(INFO) << "Setting max invalidation version for "
- << ObjectIdToString(id) << " to " << max_invalidation_version
- << " with payload " << payload;
-}
-
-void NullInvalidationStateTracker::Forget(const ObjectIdSet& ids) {
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- LOG(INFO) << "Forgetting invalidation state for " << ObjectIdToString(*it);
- }
-}
-
void NullInvalidationStateTracker::SetInvalidatorClientId(
const std::string& data) {
LOG(INFO) << "Setting invalidator client ID to: " << data;
@@ -58,7 +38,7 @@ std::string NullInvalidationStateTracker::GetBootstrapData() const {
void NullInvalidationStateTracker::SetBootstrapData(const std::string& data) {
std::string base64_data;
- CHECK(base::Base64Encode(data, &base64_data));
+ base::Base64Encode(data, &base64_data);
LOG(INFO) << "Setting bootstrap data to: " << base64_data;
}
@@ -66,20 +46,14 @@ void NullInvalidationStateTracker::Clear() {
// We have no members to clear.
}
-void NullInvalidationStateTracker::GenerateAckHandles(
- const ObjectIdSet& ids,
- const scoped_refptr<base::TaskRunner>& task_runner,
- base::Callback<void(const AckHandleMap&)> callback) {
- AckHandleMap ack_handles;
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- ack_handles.insert(std::make_pair(*it, AckHandle::InvalidAckHandle()));
- }
- CHECK(task_runner->PostTask(FROM_HERE, base::Bind(callback, ack_handles)));
+void NullInvalidationStateTracker::SetSavedInvalidations(
+ const UnackedInvalidationsMap& states) {
+ // Do nothing.
}
-void NullInvalidationStateTracker::Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) {
- LOG(INFO) << "Received ack for " << ObjectIdToString(id);
+UnackedInvalidationsMap
+NullInvalidationStateTracker::GetSavedInvalidations() const {
+ return UnackedInvalidationsMap();
}
} // namespace syncer
diff --git a/chromium/sync/tools/null_invalidation_state_tracker.h b/chromium/sync/tools/null_invalidation_state_tracker.h
index ce05c3327a3..a12844c3d06 100644
--- a/chromium/sync/tools/null_invalidation_state_tracker.h
+++ b/chromium/sync/tools/null_invalidation_state_tracker.h
@@ -18,26 +18,17 @@ class NullInvalidationStateTracker
NullInvalidationStateTracker();
virtual ~NullInvalidationStateTracker();
- virtual InvalidationStateMap GetAllInvalidationStates() const OVERRIDE;
- virtual void SetMaxVersionAndPayload(const invalidation::ObjectId& id,
- int64 max_invalidation_version,
- const std::string& payload) OVERRIDE;
- virtual void Forget(const ObjectIdSet& ids) OVERRIDE;
-
virtual void SetInvalidatorClientId(const std::string& data) OVERRIDE;
virtual std::string GetInvalidatorClientId() const OVERRIDE;
virtual std::string GetBootstrapData() const OVERRIDE;
virtual void SetBootstrapData(const std::string& data) OVERRIDE;
- virtual void Clear() OVERRIDE;
+ virtual void SetSavedInvalidations(
+ const UnackedInvalidationsMap& states) OVERRIDE;
+ virtual UnackedInvalidationsMap GetSavedInvalidations() const OVERRIDE;
- virtual void GenerateAckHandles(
- const ObjectIdSet& ids,
- const scoped_refptr<base::TaskRunner>& task_runner,
- base::Callback<void(const AckHandleMap&)> callback) OVERRIDE;
- virtual void Acknowledge(const invalidation::ObjectId& id,
- const AckHandle& ack_handle) OVERRIDE;
+ virtual void Clear() OVERRIDE;
};
} // namespace syncer
diff --git a/chromium/sync/tools/sync_client.cc b/chromium/sync/tools/sync_client.cc
index 95c4211a43a..e5051040539 100644
--- a/chromium/sync/tools/sync_client.cc
+++ b/chromium/sync/tools/sync_client.cc
@@ -64,7 +64,6 @@ const char kXmppHostPortSwitch[] = "xmpp-host-port";
const char kXmppTrySslTcpFirstSwitch[] = "xmpp-try-ssltcp-first";
const char kXmppAllowInsecureConnectionSwitch[] =
"xmpp-allow-insecure-connection";
-const char kNotificationMethodSwitch[] = "notification-method";
// Needed to use a real host resolver.
class MyTestURLRequestContext : public net::TestURLRequestContext {
@@ -196,6 +195,7 @@ notifier::NotifierOptions ParseNotifierOptions(
request_context_getter) {
notifier::NotifierOptions notifier_options;
notifier_options.request_context_getter = request_context_getter;
+ notifier_options.auth_mechanism = "X-OAUTH2";
if (command_line.HasSwitch(kXmppHostPortSwitch)) {
notifier_options.xmpp_host_port =
@@ -275,7 +275,7 @@ int SyncClientMain(int argc, char* argv[]) {
scoped_ptr<Invalidator> invalidator(new NonBlockingInvalidator(
notifier_options,
invalidator_id,
- null_invalidation_state_tracker.GetAllInvalidationStates(),
+ null_invalidation_state_tracker.GetSavedInvalidations(),
null_invalidation_state_tracker.GetBootstrapData(),
WeakHandle<InvalidationStateTracker>(
null_invalidation_state_tracker.AsWeakPtr()),
@@ -372,7 +372,7 @@ int SyncClientMain(int argc, char* argv[]) {
&null_encryptor,
scoped_ptr<UnrecoverableErrorHandler>(
new LoggingUnrecoverableErrorHandler).Pass(),
- &LogUnrecoverableErrorContext, false,
+ &LogUnrecoverableErrorContext,
&scm_cancelation_signal);
// TODO(akalin): Avoid passing in model parameters multiple times by
// organizing handling of model types.
diff --git a/chromium/sync/tools/sync_listen_notifications.cc b/chromium/sync/tools/sync_listen_notifications.cc
index 9cebcee65bc..5d212f3b8e2 100644
--- a/chromium/sync/tools/sync_listen_notifications.cc
+++ b/chromium/sync/tools/sync_listen_notifications.cc
@@ -28,6 +28,7 @@
#include "sync/notifier/invalidation_util.h"
#include "sync/notifier/invalidator.h"
#include "sync/notifier/non_blocking_invalidator.h"
+#include "sync/notifier/object_id_invalidation_map.h"
#include "sync/tools/null_invalidation_state_tracker.h"
#if defined(OS_MACOSX)
@@ -45,7 +46,6 @@ const char kTokenSwitch[] = "token";
const char kHostPortSwitch[] = "host-port";
const char kTrySslTcpFirstSwitch[] = "try-ssltcp-first";
const char kAllowInsecureConnectionSwitch[] = "allow-insecure-connection";
-const char kNotificationMethodSwitch[] = "notification-method";
// Class to print received notifications events.
class NotificationPrinter : public InvalidationHandler {
@@ -60,12 +60,10 @@ class NotificationPrinter : public InvalidationHandler {
virtual void OnIncomingInvalidation(
const ObjectIdInvalidationMap& invalidation_map) OVERRIDE {
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- LOG(INFO) << "Remote invalidation: id = "
- << ObjectIdToString(it->first)
- << ", version = " << it->second.version
- << ", payload = " << it->second.payload;
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
+ LOG(INFO) << "Remote invalidation: "
+ << invalidation_map.ToString();
}
}
@@ -185,7 +183,7 @@ int SyncListenNotificationsMain(int argc, char* argv[]) {
new NonBlockingInvalidator(
notifier_options,
base::RandBytesAsString(8),
- null_invalidation_state_tracker.GetAllInvalidationStates(),
+ null_invalidation_state_tracker.GetSavedInvalidations(),
null_invalidation_state_tracker.GetBootstrapData(),
WeakHandle<InvalidationStateTracker>(
null_invalidation_state_tracker.AsWeakPtr()),
diff --git a/chromium/sync/tools/testserver/chromiumsync.py b/chromium/sync/tools/testserver/chromiumsync.py
index b95c6be6e4b..496cb6ae7e8 100644
--- a/chromium/sync/tools/testserver/chromiumsync.py
+++ b/chromium/sync/tools/testserver/chromiumsync.py
@@ -8,8 +8,11 @@ The details of the protocol are described mostly by comments in the protocol
buffer definition at chrome/browser/sync/protocol/sync.proto.
"""
+import base64
import cgi
import copy
+import google.protobuf.text_format
+import hashlib
import operator
import pickle
import random
@@ -18,12 +21,16 @@ import sys
import threading
import time
import urlparse
+import uuid
+import app_list_specifics_pb2
import app_notification_specifics_pb2
import app_setting_specifics_pb2
import app_specifics_pb2
+import article_specifics_pb2
import autofill_specifics_pb2
import bookmark_specifics_pb2
+import client_commands_pb2
import dictionary_specifics_pb2
import get_updates_caller_info_pb2
import extension_setting_specifics_pb2
@@ -41,6 +48,8 @@ import search_engine_specifics_pb2
import session_specifics_pb2
import sync_pb2
import sync_enums_pb2
+import synced_notification_data_pb2
+import synced_notification_render_pb2
import synced_notification_specifics_pb2
import theme_specifics_pb2
import typed_url_specifics_pb2
@@ -52,8 +61,10 @@ import typed_url_specifics_pb2
ALL_TYPES = (
TOP_LEVEL, # The type of the 'Google Chrome' folder.
APPS,
+ APP_LIST,
APP_NOTIFICATION,
APP_SETTINGS,
+ ARTICLE,
AUTOFILL,
AUTOFILL_PROFILE,
BOOKMARK,
@@ -75,7 +86,7 @@ ALL_TYPES = (
TYPED_URL,
EXTENSION_SETTINGS,
FAVICON_IMAGES,
- FAVICON_TRACKING) = range(26)
+ FAVICON_TRACKING) = range(28)
# An enumeration on the frequency at which the server should send errors
# to the client. This would be specified by the url that triggers the error.
@@ -92,9 +103,11 @@ TOP_LEVEL_FOLDER_TAG = 'google_chrome'
# to that datatype. Note that TOP_LEVEL has no such token.
SYNC_TYPE_FIELDS = sync_pb2.EntitySpecifics.DESCRIPTOR.fields_by_name
SYNC_TYPE_TO_DESCRIPTOR = {
+ APP_LIST: SYNC_TYPE_FIELDS['app_list'],
APP_NOTIFICATION: SYNC_TYPE_FIELDS['app_notification'],
APP_SETTINGS: SYNC_TYPE_FIELDS['app_setting'],
APPS: SYNC_TYPE_FIELDS['app'],
+ ARTICLE: SYNC_TYPE_FIELDS['article'],
AUTOFILL: SYNC_TYPE_FIELDS['autofill'],
AUTOFILL_PROFILE: SYNC_TYPE_FIELDS['autofill_profile'],
BOOKMARK: SYNC_TYPE_FIELDS['bookmark'],
@@ -176,6 +189,10 @@ class InducedErrorFrequencyNotDefined(Error):
"""The error frequency defined is not handled."""
+class ClientNotConnectedError(Error):
+ """The client is not connected to the server."""
+
+
def GetEntryType(entry):
"""Extract the sync type from a SyncEntry.
@@ -448,8 +465,7 @@ class UpdateSieve(object):
final_stamp = max(old_timestamp, new_timestamp)
final_migration = self._migration_history.GetLatestVersion(data_type)
new_marker.token = pickle.dumps((final_stamp, final_migration))
- if new_marker not in self._original_request.from_progress_marker:
- get_updates_response.new_progress_marker.add().MergeFrom(new_marker)
+ get_updates_response.new_progress_marker.add().MergeFrom(new_marker)
elif self._original_request.HasField('from_timestamp'):
if self._original_request.from_timestamp < new_timestamp:
get_updates_response.new_timestamp = new_timestamp
@@ -463,6 +479,8 @@ class SyncDataModel(object):
_PERMANENT_ITEM_SPECS = [
PermanentItem('google_chrome_apps', name='Apps',
parent_tag=ROOT_ID, sync_type=APPS),
+ PermanentItem('google_chrome_app_list', name='App List',
+ parent_tag=ROOT_ID, sync_type=APP_LIST),
PermanentItem('google_chrome_app_notifications', name='App Notifications',
parent_tag=ROOT_ID, sync_type=APP_NOTIFICATION),
PermanentItem('google_chrome_app_settings',
@@ -530,6 +548,8 @@ class SyncDataModel(object):
parent_tag=ROOT_ID, sync_type=TYPED_URL),
PermanentItem('google_chrome_dictionary', name='Dictionary',
parent_tag=ROOT_ID, sync_type=DICTIONARY),
+ PermanentItem('google_chrome_articles', name='Articles',
+ parent_tag=ROOT_ID, sync_type=ARTICLE),
]
def __init__(self):
@@ -925,7 +945,7 @@ class SyncDataModel(object):
# tombstone. A sync server must track deleted IDs forever, since it does
# not keep track of client knowledge (there's no deletion ACK event).
if entry.deleted:
- def MakeTombstone(id_string):
+ def MakeTombstone(id_string, datatype):
"""Make a tombstone entry that will replace the entry being deleted.
Args:
@@ -934,13 +954,11 @@ class SyncDataModel(object):
A new SyncEntity reflecting the fact that the entry is deleted.
"""
# Only the ID, version and deletion state are preserved on a tombstone.
- # TODO(nick): Does the production server not preserve the type? Not
- # doing so means that tombstones cannot be filtered based on
- # requested_types at GetUpdates time.
tombstone = sync_pb2.SyncEntity()
tombstone.id_string = id_string
tombstone.deleted = True
tombstone.name = ''
+ tombstone.specifics.CopyFrom(GetDefaultEntitySpecifics(datatype))
return tombstone
def IsChild(child_id):
@@ -963,10 +981,12 @@ class SyncDataModel(object):
# Mark all children that were identified as deleted.
for child_id in child_ids:
- self._SaveEntry(MakeTombstone(child_id))
+ datatype = GetEntryType(self._entries[child_id])
+ self._SaveEntry(MakeTombstone(child_id, datatype))
# Delete entry itself.
- entry = MakeTombstone(entry.id_string)
+ datatype = GetEntryType(self._entries[entry.id_string])
+ entry = MakeTombstone(entry.id_string, datatype)
else:
# Comments in sync.proto detail how the representation of positional
# ordering works.
@@ -1134,6 +1154,85 @@ class SyncDataModel(object):
def GetInducedError(self):
return self.induced_error
+ def AddSyncedNotification(self, serialized_notification):
+ """Adds a synced notification to the server data.
+
+ The notification will be delivered to the client on the next GetUpdates
+ call.
+
+ Args:
+ serialized_notification: A serialized CoalescedSyncedNotification.
+
+ Returns:
+ The string representation of the added SyncEntity.
+
+ Raises:
+ ClientNotConnectedError: if the client has not yet connected to this
+ server
+ """
+ # A unique string used wherever a unique ID for this notification is
+ # required.
+ unique_notification_id = str(uuid.uuid4())
+
+ specifics = self._CreateSyncedNotificationEntitySpecifics(
+ unique_notification_id, serialized_notification)
+
+ # Create the root SyncEntity representing a single notification.
+ entity = sync_pb2.SyncEntity()
+ entity.specifics.CopyFrom(specifics)
+ entity.parent_id_string = self._ServerTagToId(
+ 'google_chrome_synced_notifications')
+ entity.name = 'Synced notification added for testing'
+ entity.server_defined_unique_tag = unique_notification_id
+
+ # Set the version to one more than the greatest version number already seen.
+ entries = sorted(self._entries.values(), key=operator.attrgetter('version'))
+ if len(entries) < 1:
+ raise ClientNotConnectedError
+ entity.version = entries[-1].version + 1
+
+ entity.client_defined_unique_tag = self._CreateSyncedNotificationClientTag(
+ specifics.synced_notification.coalesced_notification.key)
+ entity.id_string = self._ClientTagToId(GetEntryType(entity),
+ entity.client_defined_unique_tag)
+
+ self._entries[entity.id_string] = copy.deepcopy(entity)
+
+ return google.protobuf.text_format.MessageToString(entity)
+
+ def _CreateSyncedNotificationEntitySpecifics(self, unique_id,
+ serialized_notification):
+ """Create the EntitySpecifics proto for a synced notification."""
+ coalesced = synced_notification_data_pb2.CoalescedSyncedNotification()
+ google.protobuf.text_format.Merge(serialized_notification, coalesced)
+
+ # Override the provided key so that we have a unique one.
+ coalesced.key = unique_id
+
+ specifics = sync_pb2.EntitySpecifics()
+ notification_specifics = \
+ synced_notification_specifics_pb2.SyncedNotificationSpecifics()
+ notification_specifics.coalesced_notification.CopyFrom(coalesced)
+ specifics.synced_notification.CopyFrom(notification_specifics)
+
+ return specifics
+
+
+ def _CreateSyncedNotificationClientTag(self, key):
+ """Create the client_defined_unique_tag value for a SyncedNotification.
+
+ Args:
+ key: The entity used to create the client tag.
+
+ Returns:
+ The string value of the to be used as the client_defined_unique_tag.
+ """
+ serialized_type = sync_pb2.EntitySpecifics()
+ specifics = synced_notification_specifics_pb2.SyncedNotificationSpecifics()
+ serialized_type.synced_notification.CopyFrom(specifics)
+ hash_input = serialized_type.SerializeToString() + key
+ return base64.b64encode(hashlib.sha1(hash_input).digest())
+
class TestServer(object):
"""An object to handle requests for one (and only one) Chrome Sync account.
@@ -1154,6 +1253,16 @@ class TestServer(object):
for times in xrange(0, sys.maxint) for c in xrange(ord('A'), ord('Z')))
self.transient_error = False
self.sync_count = 0
+ # Gaia OAuth2 Token fields and their default values.
+ self.response_code = 200
+ self.request_token = 'rt1'
+ self.access_token = 'at1'
+ self.expires_in = 3600
+ self.token_type = 'Bearer'
+ # The ClientCommand to send back on each ServerToClientResponse. If set to
+ # None, no ClientCommand should be sent.
+ self._client_command = None
+
def GetShortClientName(self, query):
parsed = cgi.parse_qs(query[query.find('?')+1:])
@@ -1340,6 +1449,10 @@ class TestServer(object):
response = sync_pb2.ClientToServerResponse()
response.error_code = sync_enums_pb2.SyncEnums.SUCCESS
+
+ if self._client_command:
+ response.client_command.CopyFrom(self._client_command)
+
self.CheckStoreBirthday(request)
response.store_birthday = self.account.store_birthday
self.CheckTransientError()
@@ -1478,3 +1591,53 @@ class TestServer(object):
if update_request.need_encryption_key or sending_nigori_node:
update_response.encryption_keys.extend(self.account.GetKeystoreKeys())
+
+ def HandleGetOauth2Token(self):
+ return (int(self.response_code),
+ '{\n'
+ ' \"refresh_token\": \"' + self.request_token + '\",\n'
+ ' \"access_token\": \"' + self.access_token + '\",\n'
+ ' \"expires_in\": ' + str(self.expires_in) + ',\n'
+ ' \"token_type\": \"' + self.token_type +'\"\n'
+ '}')
+
+ def HandleSetOauth2Token(self, response_code, request_token, access_token,
+ expires_in, token_type):
+ if response_code != 0:
+ self.response_code = response_code
+ if request_token != '':
+ self.request_token = request_token
+ if access_token != '':
+ self.access_token = access_token
+ if expires_in != 0:
+ self.expires_in = expires_in
+ if token_type != '':
+ self.token_type = token_type
+
+ return (200,
+ '<html><title>Set OAuth2 Token</title>'
+ '<H1>This server will now return the OAuth2 Token:</H1>'
+ '<p>response_code: ' + str(self.response_code) + '</p>'
+ '<p>request_token: ' + self.request_token + '</p>'
+ '<p>access_token: ' + self.access_token + '</p>'
+ '<p>expires_in: ' + str(self.expires_in) + '</p>'
+ '<p>token_type: ' + self.token_type + '</p>'
+ '</html>')
+
+ def CustomizeClientCommand(self, sessions_commit_delay_seconds):
+ """Customizes the value of the ClientCommand of ServerToClientResponse.
+
+ Currently, this only allows for changing the sessions_commit_delay_seconds
+ field. This is useful for testing in conjunction with
+ AddSyncedNotification so that synced notifications are seen immediately
+ after triggering them with an HTTP call to the test server.
+
+ Args:
+ sessions_commit_delay_seconds: The desired sync delay time for sessions.
+ """
+ if not self._client_command:
+ self._client_command = client_commands_pb2.ClientCommand()
+
+ self._client_command.sessions_commit_delay_seconds = \
+ sessions_commit_delay_seconds
+ return self._client_command
diff --git a/chromium/sync/tools/testserver/sync_testserver.py b/chromium/sync/tools/testserver/sync_testserver.py
index 53532cb5b7d..5954e012ca0 100755
--- a/chromium/sync/tools/testserver/sync_testserver.py
+++ b/chromium/sync/tools/testserver/sync_testserver.py
@@ -151,10 +151,17 @@ class SyncPageHandler(testserver_base.BasePageHandler):
self.ChromiumSyncEnableKeystoreEncryptionOpHandler,
self.ChromiumSyncRotateKeystoreKeysOpHandler,
self.ChromiumSyncEnableManagedUserAcknowledgementHandler,
- self.ChromiumSyncEnablePreCommitGetUpdateAvoidanceHandler]
+ self.ChromiumSyncEnablePreCommitGetUpdateAvoidanceHandler,
+ self.GaiaOAuth2TokenHandler,
+ self.GaiaSetOAuth2TokenResponseHandler,
+ self.TriggerSyncedNotificationHandler,
+ self.SyncedNotificationsPageHandler,
+ self.CustomizeClientCommandHandler]
post_handlers = [self.ChromiumSyncCommandHandler,
- self.ChromiumSyncTimeHandler]
+ self.ChromiumSyncTimeHandler,
+ self.GaiaOAuth2TokenHandler,
+ self.GaiaSetOAuth2TokenResponseHandler]
testserver_base.BasePageHandler.__init__(self, request, client_address,
sync_http_server, [], get_handlers,
[], post_handlers, [])
@@ -441,6 +448,135 @@ class SyncPageHandler(testserver_base.BasePageHandler):
self.wfile.write(raw_reply)
return True
+ def GaiaOAuth2TokenHandler(self):
+ test_name = "/o/oauth2/token"
+ if not self._ShouldHandleRequest(test_name):
+ return False
+ if self.headers.getheader('content-length'):
+ length = int(self.headers.getheader('content-length'))
+ _raw_request = self.rfile.read(length)
+ result, raw_reply = (
+ self.server._sync_handler.HandleGetOauth2Token())
+ self.send_response(result)
+ self.send_header('Content-Type', 'application/json')
+ self.send_header('Content-Length', len(raw_reply))
+ self.end_headers()
+ self.wfile.write(raw_reply)
+ return True
+
+ def GaiaSetOAuth2TokenResponseHandler(self):
+ test_name = "/setfakeoauth2token"
+ if not self._ShouldHandleRequest(test_name):
+ return False
+
+ # The index of 'query' is 4.
+ # See http://docs.python.org/2/library/urlparse.html
+ query = urlparse.urlparse(self.path)[4]
+ query_params = urlparse.parse_qs(query)
+
+ response_code = 0
+ request_token = ''
+ access_token = ''
+ expires_in = 0
+ token_type = ''
+
+ if 'response_code' in query_params:
+ response_code = query_params['response_code'][0]
+ if 'request_token' in query_params:
+ request_token = query_params['request_token'][0]
+ if 'access_token' in query_params:
+ access_token = query_params['access_token'][0]
+ if 'expires_in' in query_params:
+ expires_in = query_params['expires_in'][0]
+ if 'token_type' in query_params:
+ token_type = query_params['token_type'][0]
+
+ result, raw_reply = (
+ self.server._sync_handler.HandleSetOauth2Token(
+ response_code, request_token, access_token, expires_in, token_type))
+ self.send_response(result)
+ self.send_header('Content-Type', 'text/html')
+ self.send_header('Content-Length', len(raw_reply))
+ self.end_headers()
+ self.wfile.write(raw_reply)
+ return True
+
+ def TriggerSyncedNotificationHandler(self):
+ test_name = "/triggersyncednotification"
+ if not self._ShouldHandleRequest(test_name):
+ return False
+
+ query = urlparse.urlparse(self.path)[4]
+ query_params = urlparse.parse_qs(query)
+
+ serialized_notification = ''
+
+ if 'serialized_notification' in query_params:
+ serialized_notification = query_params['serialized_notification'][0]
+
+ try:
+ notification_string = self.server._sync_handler.account \
+ .AddSyncedNotification(serialized_notification)
+ reply = "A synced notification was triggered:\n\n"
+ reply += "<code>{}</code>.".format(notification_string)
+ response_code = 200
+ except chromiumsync.ClientNotConnectedError:
+ reply = ('The client is not connected to the server, so the notification'
+ ' could not be created.')
+ response_code = 400
+
+ self.send_response(response_code)
+ self.send_header('Content-Type', 'text/html')
+ self.send_header('Content-Length', len(reply))
+ self.end_headers()
+ self.wfile.write(reply)
+ return True
+
+ def CustomizeClientCommandHandler(self):
+ test_name = "/customizeclientcommand"
+ if not self._ShouldHandleRequest(test_name):
+ return False
+
+ query = urlparse.urlparse(self.path)[4]
+ query_params = urlparse.parse_qs(query)
+
+ if 'sessions_commit_delay_seconds' in query_params:
+ sessions_commit_delay = query_params['sessions_commit_delay_seconds'][0]
+ try:
+ command_string = self.server._sync_handler.CustomizeClientCommand(
+ int(sessions_commit_delay))
+ response_code = 200
+ reply = "The ClientCommand was customized:\n\n"
+ reply += "<code>{}</code>.".format(command_string)
+ except ValueError:
+ response_code = 400
+ reply = "sessions_commit_delay_seconds was not an int"
+ else:
+ response_code = 400
+ reply = "sessions_commit_delay_seconds is required"
+
+ self.send_response(response_code)
+ self.send_header('Content-Type', 'text/html')
+ self.send_header('Content-Length', len(reply))
+ self.end_headers()
+ self.wfile.write(reply)
+ return True
+
+ def SyncedNotificationsPageHandler(self):
+ test_name = "/syncednotifications"
+ if not self._ShouldHandleRequest(test_name):
+ return False
+
+ html = open('sync/tools/testserver/synced_notifications.html', 'r').read()
+
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/html')
+ self.send_header('Content-Length', len(html))
+ self.end_headers()
+ self.wfile.write(html)
+ return True
+
+
class SyncServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for the net test servers."""
@@ -452,8 +588,12 @@ class SyncServerRunner(testserver_base.TestServerRunner):
host = self.options.host
xmpp_port = self.options.xmpp_port
server = SyncHTTPServer((host, port), xmpp_port, SyncPageHandler)
- print 'Sync HTTP server started on port %d...' % server.server_port
- print 'Sync XMPP server started on port %d...' % server.xmpp_port
+ print ('Sync HTTP server started at %s:%d/chromiumsync...' %
+ (host, server.server_port))
+ print ('Fake OAuth2 Token server started at %s:%d/o/oauth2/token...' %
+ (host, server.server_port))
+ print ('Sync XMPP server started at %s:%d...' %
+ (host, server.xmpp_port))
server_data['port'] = server.server_port
server_data['xmpp_port'] = server.xmpp_port
return server
diff --git a/chromium/sync/tools/testserver/synced_notifications.html b/chromium/sync/tools/testserver/synced_notifications.html
new file mode 100644
index 00000000000..c06f80b2036
--- /dev/null
+++ b/chromium/sync/tools/testserver/synced_notifications.html
@@ -0,0 +1,51 @@
+<html>
+ <head>
+ <title>Synced notifications</title>
+
+ <script type="text/javascript">
+ // Creates link (appended to the bottom of the page body) to trigger a
+ // synced notifications. The link's title will be |title| and
+ // |serialized_notification| is the ASCII-serialized version of the
+ // CoalescedSyncedNotification to be triggered.
+ function appendNotificationLink(title, serialized_notification) {
+ var link = document.createElement('a');
+ link.innerHTML = title;
+ link.setAttribute('target', '_blank');
+ link.setAttribute('href', 'triggersyncednotification?' +
+ 'serialized_notification=' +
+ encodeURIComponent(serialized_notification));
+ document.body.appendChild(link);
+ }
+ </script>
+ </head>
+
+ <body>
+ <h1>Synced notifications</h1>
+
+ <h2>Step 0: Sign in to the browser and set up Sync</h2>
+
+ <h2>Step 1: Click this link (only required once per server lifetime)</h2>
+
+ <a href="/customizeclientcommand?sessions_commit_delay_seconds=0">
+ Make notifications triggering instant</a>
+
+ <h2>Step 2: Ctrl-Click the links below to trigger synced notifications</h2>
+
+ <script type="text/javascript">
+ appendNotificationLink('Simple notification',
+ 'key: \"foo\"\n' +
+ 'priority: 2\n' +
+ 'read_state: 1\n' +
+ 'render_info {\n' +
+ ' collapsed_info {\n' +
+ ' creation_timestamp_usec: 42\n' +
+ ' simple_collapsed_layout {\n' +
+ ' annotation: \"Space Needle, 12:00 pm\"\n' +
+ ' description: \"Space Needle, 12:00 pm\"\n' +
+ ' heading: \"New appointment\"\n' +
+ ' }\n' +
+ ' }\n' +
+ '}');
+ </script>
+ </body>
+</html>
diff --git a/chromium/sync/util/DEPS b/chromium/sync/util/DEPS
index 13d7bf1ac44..d311654f50c 100644
--- a/chromium/sync/util/DEPS
+++ b/chromium/sync/util/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+chromeos",
"+crypto",
"+sync/base",
"+sync/internal_api/public/base",
@@ -6,9 +7,6 @@ include_rules = [
"+sync/protocol",
"+sync/test/fake_encryptor.h",
- # TODO(rsimha): Remove this after http://crbug.com/126732 is fixed.
- "+chromeos/chromeos_switches.h",
-
# TODO(zea): remove this once we don't need the cryptographer to get the set
# of encrypted types.
"+sync/syncable/nigori_handler.h"
diff --git a/chromium/sync/util/cryptographer.cc b/chromium/sync/util/cryptographer.cc
index 0fed51e1674..29f378125a7 100644
--- a/chromium/sync/util/cryptographer.cc
+++ b/chromium/sync/util/cryptographer.cc
@@ -261,10 +261,8 @@ bool Cryptographer::GetBootstrapToken(std::string* token) const {
return false;
}
- if (!base::Base64Encode(encrypted_token, token)) {
- NOTREACHED();
- return false;
- }
+ base::Base64Encode(encrypted_token, token);
+
return true;
}
diff --git a/chromium/sync/util/data_type_histogram.h b/chromium/sync/util/data_type_histogram.h
index 5c8e840a74e..e3a8d6ff795 100644
--- a/chromium/sync/util/data_type_histogram.h
+++ b/chromium/sync/util/data_type_histogram.h
@@ -72,6 +72,9 @@
case ::syncer::APPS: \
PER_DATA_TYPE_MACRO("Apps"); \
break; \
+ case ::syncer::APP_LIST: \
+ PER_DATA_TYPE_MACRO("AppList"); \
+ break; \
case ::syncer::APP_SETTINGS: \
PER_DATA_TYPE_MACRO("AppSettings"); \
break; \
@@ -111,6 +114,9 @@
case ::syncer::MANAGED_USERS: \
PER_DATA_TYPE_MACRO("ManagedUser"); \
break; \
+ case ::syncer::ARTICLES: \
+ PER_DATA_TYPE_MACRO("Article"); \
+ break; \
case ::syncer::PROXY_TABS: \
PER_DATA_TYPE_MACRO("Tabs"); \
break; \
diff --git a/chromium/sync/util/get_session_name.cc b/chromium/sync/util/get_session_name.cc
index 73041aebddc..3a09c51379d 100644
--- a/chromium/sync/util/get_session_name.cc
+++ b/chromium/sync/util/get_session_name.cc
@@ -11,10 +11,7 @@
#include "base/sys_info.h"
#include "base/task_runner.h"
-#if defined(OS_CHROMEOS)
-#include "base/command_line.h"
-#include "chromeos/chromeos_switches.h"
-#elif defined(OS_LINUX)
+#if defined(OS_LINUX)
#include "sync/util/get_session_name_linux.h"
#elif defined(OS_IOS)
#include "sync/util/get_session_name_ios.h"
@@ -33,27 +30,7 @@ namespace {
std::string GetSessionNameSynchronously() {
std::string session_name;
#if defined(OS_CHROMEOS)
- // The approach below is similar to that used by the CrOs implementation of
- // StatisticsProvider::GetMachineStatistic(CHROMEOS_RELEASE_BOARD).
- // See chrome/browser/chromeos/system/statistics_provider.{h|cc}.
- //
- // We cannot use StatisticsProvider here because of the mutual dependency
- // it creates between sync.gyp:sync and chrome.gyp:browser.
- //
- // Even though this code is ad hoc and fragile, it remains the only means of
- // determining the Chrome OS hardware platform so we can display the right
- // device name in the "Other devices" section of the new tab page.
- // TODO(rsimha): Change this once a better alternative is available.
- // See http://crbug.com/126732.
- std::string board;
- const CommandLine* command_line = CommandLine::ForCurrentProcess();
- if (command_line->HasSwitch(chromeos::switches::kChromeOSReleaseBoard)) {
- board = command_line->
- GetSwitchValueASCII(chromeos::switches::kChromeOSReleaseBoard);
- } else {
- LOG(ERROR) << "Failed to get board information";
- }
-
+ std::string board = base::SysInfo::GetLsbReleaseBoard();
// Currently, only "stumpy" type of board is considered Chromebox, and
// anything else is Chromebook. On these devices, session_name should look
// like "stumpy-signed-mp-v2keys" etc. The information can be checked on
diff --git a/chromium/sync/util/get_session_name_unittest.cc b/chromium/sync/util/get_session_name_unittest.cc
index e9739456939..724cd8b7f69 100644
--- a/chromium/sync/util/get_session_name_unittest.cc
+++ b/chromium/sync/util/get_session_name_unittest.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "base/sys_info.h"
#include "sync/util/get_session_name.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -42,15 +43,8 @@ TEST_F(GetSessionNameTest, GetSessionNameSynchronously) {
// Call GetSessionNameSynchronouslyForTesting on ChromeOS where the board type
// is "lumpy-signed-mp-v2keys" and make sure the return value is "Chromebook".
TEST_F(GetSessionNameTest, GetSessionNameSynchronouslyChromebook) {
- // This test cannot be run on a real CrOs device, since it will already have a
- // board type, and we cannot override it.
- // TODO(rsimha): Rewrite this test once http://crbug.com/126732 is fixed.
- CommandLine* command_line = CommandLine::ForCurrentProcess();
- if (command_line->HasSwitch(chromeos::switches::kChromeOSReleaseBoard))
- return;
-
- command_line->AppendSwitchASCII(chromeos::switches::kChromeOSReleaseBoard,
- "lumpy-signed-mp-v2keys");
+ const char* kLsbRelease = "CHROMEOS_RELEASE_BOARD=lumpy-signed-mp-v2keys\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
const std::string& session_name = GetSessionNameSynchronouslyForTesting();
EXPECT_EQ("Chromebook", session_name);
}
@@ -58,15 +52,8 @@ TEST_F(GetSessionNameTest, GetSessionNameSynchronouslyChromebook) {
// Call GetSessionNameSynchronouslyForTesting on ChromeOS where the board type
// is "stumpy-signed-mp-v2keys" and make sure the return value is "Chromebox".
TEST_F(GetSessionNameTest, GetSessionNameSynchronouslyChromebox) {
- // This test cannot be run on a real CrOs device, since it will already have a
- // board type, and we cannot override it.
- // TODO(rsimha): Rewrite this test once http://crbug.com/126732 is fixed.
- CommandLine* command_line = CommandLine::ForCurrentProcess();
- if (command_line->HasSwitch(chromeos::switches::kChromeOSReleaseBoard))
- return;
-
- command_line->AppendSwitchASCII(chromeos::switches::kChromeOSReleaseBoard,
- "stumpy-signed-mp-v2keys");
+ const char* kLsbRelease = "CHROMEOS_RELEASE_BOARD=stumpy-signed-mp-v2keys\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
const std::string& session_name = GetSessionNameSynchronouslyForTesting();
EXPECT_EQ("Chromebox", session_name);
}
diff --git a/chromium/sync/util/nigori.cc b/chromium/sync/util/nigori.cc
index b0158f36738..e74d81a9185 100644
--- a/chromium/sync/util/nigori.cc
+++ b/chromium/sync/util/nigori.cc
@@ -150,7 +150,8 @@ bool Nigori::Permute(Type type, const std::string& name,
output.assign(ciphertext);
output.append(hash.begin(), hash.end());
- return Base64Encode(output, permuted);
+ Base64Encode(output, permuted);
+ return true;
}
// Enc[Kenc,Kmac](value)
@@ -186,7 +187,8 @@ bool Nigori::Encrypt(const std::string& value, std::string* encrypted) const {
output.append(ciphertext);
output.append(hash.begin(), hash.end());
- return Base64Encode(output, encrypted);
+ Base64Encode(output, encrypted);
+ return true;
}
bool Nigori::Decrypt(const std::string& encrypted, std::string* value) const {