summaryrefslogtreecommitdiff
path: root/chromium/net/reporting
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/net/reporting')
-rw-r--r--chromium/net/reporting/reporting_browsing_data_remover.cc7
-rw-r--r--chromium/net/reporting/reporting_browsing_data_remover.h5
-rw-r--r--chromium/net/reporting/reporting_browsing_data_remover_unittest.cc22
-rw-r--r--chromium/net/reporting/reporting_cache_impl.cc4
-rw-r--r--chromium/net/reporting/reporting_delivery_agent.cc290
-rw-r--r--chromium/net/reporting/reporting_delivery_agent.h41
-rw-r--r--chromium/net/reporting/reporting_delivery_agent_unittest.cc315
-rw-r--r--chromium/net/reporting/reporting_header_parser.cc147
-rw-r--r--chromium/net/reporting/reporting_header_parser.h58
-rw-r--r--chromium/net/reporting/reporting_report.cc12
-rw-r--r--chromium/net/reporting/reporting_report.h12
-rw-r--r--chromium/net/reporting/reporting_service.cc17
-rw-r--r--chromium/net/reporting/reporting_service.h4
-rw-r--r--chromium/net/reporting/reporting_service_unittest.cc6
-rw-r--r--chromium/net/reporting/reporting_test_util.cc4
-rw-r--r--chromium/net/reporting/reporting_test_util.h4
-rw-r--r--chromium/net/reporting/reporting_uploader.cc34
-rw-r--r--chromium/net/reporting/reporting_uploader_unittest.cc4
18 files changed, 465 insertions, 521 deletions
diff --git a/chromium/net/reporting/reporting_browsing_data_remover.cc b/chromium/net/reporting/reporting_browsing_data_remover.cc
index 310242aa14a..084b8d6c96f 100644
--- a/chromium/net/reporting/reporting_browsing_data_remover.cc
+++ b/chromium/net/reporting/reporting_browsing_data_remover.cc
@@ -15,7 +15,7 @@ namespace net {
// static
void ReportingBrowsingDataRemover::RemoveBrowsingData(
ReportingCache* cache,
- int data_type_mask,
+ uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>& origin_filter) {
if ((data_type_mask & DATA_TYPE_REPORTS) != 0) {
std::vector<const ReportingReport*> all_reports;
@@ -42,8 +42,9 @@ void ReportingBrowsingDataRemover::RemoveBrowsingData(
}
// static
-void ReportingBrowsingDataRemover::RemoveAllBrowsingData(ReportingCache* cache,
- int data_type_mask) {
+void ReportingBrowsingDataRemover::RemoveAllBrowsingData(
+ ReportingCache* cache,
+ uint64_t data_type_mask) {
if ((data_type_mask & DATA_TYPE_REPORTS) != 0) {
cache->RemoveAllReports(
ReportingReport::Outcome::ERASED_BROWSING_DATA_REMOVED);
diff --git a/chromium/net/reporting/reporting_browsing_data_remover.h b/chromium/net/reporting/reporting_browsing_data_remover.h
index ce3df31ffc0..f4a9e2b68b9 100644
--- a/chromium/net/reporting/reporting_browsing_data_remover.h
+++ b/chromium/net/reporting/reporting_browsing_data_remover.h
@@ -32,13 +32,14 @@ class NET_EXPORT ReportingBrowsingDataRemover {
// persisted, it will need to be cleared as well.
static void RemoveBrowsingData(
ReportingCache* cache,
- int data_type_mask,
+ uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>& origin_filter);
// Like RemoveBrowsingData except removes data for all origins without a
// filter. Allows slight optimization over passing an always-true filter to
// RemoveBrowsingData.
- static void RemoveAllBrowsingData(ReportingCache* cache, int data_type_mask);
+ static void RemoveAllBrowsingData(ReportingCache* cache,
+ uint64_t data_type_mask);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ReportingBrowsingDataRemover);
diff --git a/chromium/net/reporting/reporting_browsing_data_remover_unittest.cc b/chromium/net/reporting/reporting_browsing_data_remover_unittest.cc
index 1ddb5c266ad..687bf5a4e95 100644
--- a/chromium/net/reporting/reporting_browsing_data_remover_unittest.cc
+++ b/chromium/net/reporting/reporting_browsing_data_remover_unittest.cc
@@ -24,7 +24,7 @@ class ReportingBrowsingDataRemoverTest : public ReportingTestBase {
void RemoveBrowsingData(bool remove_reports,
bool remove_clients,
std::string host) {
- int data_type_mask = 0;
+ uint64_t data_type_mask = 0;
if (remove_reports)
data_type_mask |= ReportingBrowsingDataRemover::DATA_TYPE_REPORTS;
if (remove_clients)
@@ -43,15 +43,15 @@ class ReportingBrowsingDataRemoverTest : public ReportingTestBase {
// TODO(chlily): Take NIK.
void AddReport(const GURL& url) {
- cache()->AddReport(NetworkIsolationKey::Todo(), url, kUserAgent_, kGroup_,
- kType_, std::make_unique<base::DictionaryValue>(), 0,
+ cache()->AddReport(NetworkIsolationKey(), url, kUserAgent_, kGroup_, kType_,
+ std::make_unique<base::DictionaryValue>(), 0,
tick_clock()->NowTicks(), 0);
}
// TODO(chlily): Take NIK.
void SetEndpoint(const url::Origin& origin) {
SetEndpointInCache(
- ReportingEndpointGroupKey(NetworkIsolationKey::Todo(), origin, kGroup_),
+ ReportingEndpointGroupKey(NetworkIsolationKey(), origin, kGroup_),
kEndpoint_, base::Time::Now() + base::TimeDelta::FromDays(7));
}
@@ -154,14 +154,12 @@ TEST_F(ReportingBrowsingDataRemoverTest, RemoveSomeClients) {
RemoveBrowsingData(/* remove_reports= */ false, /* remove_clients= */ true,
/* host= */ kUrl1_.host());
EXPECT_EQ(2u, report_count());
- EXPECT_FALSE(
- FindEndpointInCache(ReportingEndpointGroupKey(NetworkIsolationKey::Todo(),
- kOrigin1_, kGroup_),
- kEndpoint_));
- EXPECT_TRUE(
- FindEndpointInCache(ReportingEndpointGroupKey(NetworkIsolationKey::Todo(),
- kOrigin2_, kGroup_),
- kEndpoint_));
+ EXPECT_FALSE(FindEndpointInCache(
+ ReportingEndpointGroupKey(NetworkIsolationKey(), kOrigin1_, kGroup_),
+ kEndpoint_));
+ EXPECT_TRUE(FindEndpointInCache(
+ ReportingEndpointGroupKey(NetworkIsolationKey(), kOrigin2_, kGroup_),
+ kEndpoint_));
}
} // namespace
diff --git a/chromium/net/reporting/reporting_cache_impl.cc b/chromium/net/reporting/reporting_cache_impl.cc
index 14fc211c02d..7b4e0345b27 100644
--- a/chromium/net/reporting/reporting_cache_impl.cc
+++ b/chromium/net/reporting/reporting_cache_impl.cc
@@ -176,15 +176,11 @@ void ReportingCacheImpl::IncrementEndpointDeliveries(
void ReportingCacheImpl::RemoveReports(
const std::vector<const ReportingReport*>& reports,
ReportingReport::Outcome outcome) {
- base::Optional<base::TimeTicks> delivered = base::nullopt;
- if (outcome == ReportingReport::Outcome::DELIVERED)
- delivered = tick_clock().NowTicks();
for (const ReportingReport* report : reports) {
auto it = reports_.find(report);
DCHECK(it != reports_.end());
it->get()->outcome = outcome;
- it->get()->delivered = delivered;
if (it->get()->IsUploadPending()) {
it->get()->status = ReportingReport::Status::DOOMED;
diff --git a/chromium/net/reporting/reporting_delivery_agent.cc b/chromium/net/reporting/reporting_delivery_agent.cc
index fca2421b239..80a7a8c9e75 100644
--- a/chromium/net/reporting/reporting_delivery_agent.cc
+++ b/chromium/net/reporting/reporting_delivery_agent.cc
@@ -4,9 +4,10 @@
#include "net/reporting/reporting_delivery_agent.h"
+#include <algorithm>
#include <map>
+#include <set>
#include <string>
-#include <unordered_set>
#include <utility>
#include <vector>
@@ -17,6 +18,7 @@
#include "base/timer/timer.h"
#include "base/values.h"
#include "net/base/network_isolation_key.h"
+#include "net/base/url_util.h"
#include "net/reporting/reporting_cache.h"
#include "net/reporting/reporting_cache_observer.h"
#include "net/reporting/reporting_context.h"
@@ -31,9 +33,9 @@ namespace net {
namespace {
-void SerializeReports(const std::vector<const ReportingReport*>& reports,
- base::TimeTicks now,
- std::string* json_out) {
+using ReportList = std::vector<const ReportingReport*>;
+
+std::string SerializeReports(const ReportList& reports, base::TimeTicks now) {
base::ListValue reports_value;
for (const ReportingReport* report : reports) {
@@ -49,10 +51,112 @@ void SerializeReports(const std::vector<const ReportingReport*>& reports,
reports_value.Append(std::move(report_value));
}
- bool json_written = base::JSONWriter::Write(reports_value, json_out);
+ std::string json_out;
+ bool json_written = base::JSONWriter::Write(reports_value, &json_out);
DCHECK(json_written);
+ return json_out;
+}
+
+bool CompareReportGroupKeys(const ReportingReport* lhs,
+ const ReportingReport* rhs) {
+ return lhs->GetGroupKey() < rhs->GetGroupKey();
}
+// Each Delivery corresponds to one upload URLRequest.
+class Delivery {
+ public:
+ // The target of a delivery. All reports uploaded together must share the
+ // same values for these parameters.
+ // Note that |origin| here (which matches the report's |origin|) is not
+ // necessarily the same as the |origin| of the ReportingEndpoint's group key
+ // (if the endpoint is configured to include subdomains). Reports with
+ // different group keys can be in the same delivery, as long as the NIK and
+ // report origin are the same, and they all get assigned to the same endpoint
+ // URL.
+ struct Target {
+ Target(const NetworkIsolationKey& network_isolation_key,
+ const url::Origin& origin,
+ const GURL& endpoint_url)
+ : network_isolation_key(network_isolation_key),
+ origin(origin),
+ endpoint_url(endpoint_url) {}
+
+ ~Target() = default;
+
+ bool operator<(const Target& other) const {
+ return std::tie(network_isolation_key, origin, endpoint_url) <
+ std::tie(other.network_isolation_key, other.origin,
+ other.endpoint_url);
+ }
+
+ NetworkIsolationKey network_isolation_key;
+ url::Origin origin;
+ GURL endpoint_url;
+ };
+
+ explicit Delivery(const Target& target) : target_(target) {}
+
+ ~Delivery() = default;
+
+ // Add the reports in [reports_begin, reports_end) into this delivery.
+ // Modify the report counter for the |endpoint| to which this delivery is
+ // destined.
+ void AddReports(const ReportingEndpoint& endpoint,
+ const ReportList::const_iterator reports_begin,
+ const ReportList::const_iterator reports_end) {
+ DCHECK(reports_begin != reports_end);
+ DCHECK_EQ(endpoint.group_key.network_isolation_key,
+ network_isolation_key());
+ DCHECK(IsSubdomainOf(target_.origin.host() /* subdomain */,
+ endpoint.group_key.origin.host() /* superdomain */));
+ for (auto it = reports_begin; it != reports_end; ++it) {
+ DCHECK_EQ((*reports_begin)->GetGroupKey(), (*it)->GetGroupKey());
+ DCHECK_EQ((*it)->network_isolation_key, network_isolation_key());
+ DCHECK_EQ(url::Origin::Create((*it)->url), target_.origin);
+ DCHECK_EQ((*it)->group, endpoint.group_key.group_name);
+ // Report origin is equal to, or a subdomain of, the endpoint
+ // configuration's origin.
+ DCHECK(IsSubdomainOf((*it)->url.host_piece() /* subdomain */,
+ endpoint.group_key.origin.host() /* superdomain */));
+ }
+
+ reports_per_group_[endpoint.group_key] +=
+ std::distance(reports_begin, reports_end);
+ reports_.insert(reports_.end(), reports_begin, reports_end);
+ }
+
+ // Records statistics for reports after an upload has completed.
+ // Either removes successfully delivered reports, or increments the failure
+ // counter if delivery was unsuccessful.
+ void ProcessOutcome(ReportingCache* cache, bool success) {
+ for (const auto& group_name_and_count : reports_per_group_) {
+ cache->IncrementEndpointDeliveries(group_name_and_count.first,
+ target_.endpoint_url,
+ group_name_and_count.second, success);
+ }
+ if (success) {
+ cache->RemoveReports(reports_, ReportingReport::Outcome::DELIVERED);
+ } else {
+ cache->IncrementReportsAttempts(reports_);
+ }
+ }
+
+ const NetworkIsolationKey& network_isolation_key() const {
+ return target_.network_isolation_key;
+ }
+ const GURL& endpoint_url() const { return target_.endpoint_url; }
+ const ReportList& reports() const { return reports_; }
+
+ private:
+ const Target target_;
+ ReportList reports_;
+
+ // Used to track statistics for each ReportingEndpoint.
+ // The endpoint is uniquely identified by the key in conjunction with
+ // |target_.endpoint_url|. See ProcessOutcome().
+ std::map<ReportingEndpointGroupKey, int> reports_per_group_;
+};
+
class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
public ReportingCacheObserver {
public:
@@ -89,33 +193,8 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
}
private:
- // TODO(chlily): Add NIK.
- using OriginEndpoint = std::pair<url::Origin, GURL>;
- using GroupEndpoint = std::pair<ReportingEndpointGroupKey, GURL>;
-
- class Delivery {
- public:
- explicit Delivery(const OriginEndpoint& report_origin_endpoint)
- : report_origin(report_origin_endpoint.first),
- endpoint(report_origin_endpoint.second) {}
-
- ~Delivery() = default;
-
- void AddReports(const ReportingEndpoint& endpoint,
- const std::vector<const ReportingReport*>& to_add) {
- GroupEndpoint key = std::make_pair(endpoint.group_key, endpoint.info.url);
- reports_per_endpoint[key] += to_add.size();
- reports.insert(reports.end(), to_add.begin(), to_add.end());
- }
-
- const url::Origin report_origin;
- const GURL endpoint;
- std::vector<const ReportingReport*> reports;
- std::map<GroupEndpoint, int> reports_per_endpoint;
- };
-
bool CacheHasReports() {
- std::vector<const ReportingReport*> reports;
+ ReportList reports;
context_->cache()->GetReports(&reports);
return !reports.empty();
}
@@ -134,8 +213,9 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
}
void SendReports() {
- std::vector<const ReportingReport*> reports =
- cache()->GetReportsToDeliver();
+ ReportList reports = cache()->GetReportsToDeliver();
+ if (reports.empty())
+ return;
// First determine which origins we're allowed to upload reports about.
std::set<url::Origin> report_origins;
@@ -148,84 +228,79 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
weak_factory_.GetWeakPtr(), std::move(reports)));
}
- void OnSendPermissionsChecked(std::vector<const ReportingReport*> reports,
+ void OnSendPermissionsChecked(ReportList reports,
std::set<url::Origin> allowed_report_origins) {
- // Sort reports into buckets by endpoint group.
- std::map<ReportingEndpointGroupKey, std::vector<const ReportingReport*>>
- origin_group_reports;
- for (const ReportingReport* report : reports) {
- url::Origin report_origin = url::Origin::Create(report->url);
- if (allowed_report_origins.find(report_origin) ==
- allowed_report_origins.end()) {
+ DCHECK(!reports.empty());
+ std::map<Delivery::Target, std::unique_ptr<Delivery>> deliveries;
+
+ // Sort by group key
+ std::sort(reports.begin(), reports.end(), &CompareReportGroupKeys);
+
+ // Iterate over "buckets" of reports with the same group key.
+ for (auto bucket_it = reports.begin(); bucket_it != reports.end();) {
+ auto bucket_start = bucket_it;
+ // Set the iterator to the beginning of the next group bucket.
+ bucket_it = std::upper_bound(bucket_it, reports.end(), *bucket_it,
+ &CompareReportGroupKeys);
+
+ // Skip this group if we don't have origin permissions for this origin.
+ const ReportingEndpointGroupKey& report_group_key =
+ (*bucket_start)->GetGroupKey();
+ if (!base::Contains(allowed_report_origins, report_group_key.origin))
continue;
- }
- // TODO(chlily): Use proper NIK once reports are double-keyed.
- ReportingEndpointGroupKey group_key(NetworkIsolationKey::Todo(),
- report_origin, report->group);
- origin_group_reports[group_key].push_back(report);
- }
- // Find an endpoint for each (origin, group) bucket and sort reports into
- // endpoint buckets. Don't allow concurrent deliveries to the same (origin,
- // group) bucket.
- std::map<OriginEndpoint, std::unique_ptr<Delivery>> deliveries;
- for (auto& it : origin_group_reports) {
- const ReportingEndpointGroupKey& group_key = it.first;
-
- if (base::Contains(pending_groups_, group_key))
+ // Skip this group if there is already a pending upload for it.
+ // We don't allow multiple concurrent uploads for the same group.
+ if (base::Contains(pending_groups_, report_group_key))
continue;
+ // Find an endpoint to deliver these reports to.
const ReportingEndpoint endpoint =
- endpoint_manager_->FindEndpointForDelivery(group_key);
-
- if (!endpoint) {
- // TODO(chlily): Remove reports for which there are no valid
- // delivery endpoints.
+ endpoint_manager_->FindEndpointForDelivery(report_group_key);
+ // TODO(chlily): Remove reports for which there are no valid delivery
+ // endpoints.
+ if (!endpoint)
continue;
- }
- OriginEndpoint report_origin_endpoint(group_key.origin,
- endpoint.info.url);
- Delivery* delivery;
- auto delivery_it = deliveries.find(report_origin_endpoint);
+ pending_groups_.insert(report_group_key);
+
+ // Add the reports to the appropriate delivery.
+ Delivery::Target target(report_group_key.network_isolation_key,
+ report_group_key.origin, endpoint.info.url);
+ auto delivery_it = deliveries.find(target);
if (delivery_it == deliveries.end()) {
- auto new_delivery = std::make_unique<Delivery>(report_origin_endpoint);
- delivery = new_delivery.get();
- deliveries[report_origin_endpoint] = std::move(new_delivery);
- } else {
- delivery = delivery_it->second.get();
+ bool inserted;
+ auto new_delivery = std::make_unique<Delivery>(target);
+ std::tie(delivery_it, inserted) = deliveries.insert(
+ std::make_pair(std::move(target), std::move(new_delivery)));
+ DCHECK(inserted);
}
-
- delivery->AddReports(endpoint, it.second);
- pending_groups_.insert(group_key);
+ delivery_it->second->AddReports(endpoint, bucket_start, bucket_it);
}
// Keep track of which of these reports we don't queue for delivery; we'll
// need to mark them as not-pending.
- std::unordered_set<const ReportingReport*> undelivered_reports(
- reports.begin(), reports.end());
+ std::set<const ReportingReport*> undelivered_reports(reports.begin(),
+ reports.end());
// Start an upload for each delivery.
- for (auto& it : deliveries) {
- const OriginEndpoint& report_origin_endpoint = it.first;
- const url::Origin& report_origin = report_origin_endpoint.first;
- const GURL& endpoint = report_origin_endpoint.second;
- std::unique_ptr<Delivery>& delivery = it.second;
-
- std::string json;
- SerializeReports(delivery->reports, tick_clock().NowTicks(), &json);
+ for (auto& target_and_delivery : deliveries) {
+ const Delivery::Target& target = target_and_delivery.first;
+ std::unique_ptr<Delivery>& delivery = target_and_delivery.second;
int max_depth = 0;
- for (const ReportingReport* report : delivery->reports) {
+ for (const ReportingReport* report : delivery->reports()) {
undelivered_reports.erase(report);
- if (report->depth > max_depth)
- max_depth = report->depth;
+ max_depth = std::max(report->depth, max_depth);
}
+ std::string upload_data =
+ SerializeReports(delivery->reports(), tick_clock().NowTicks());
+
// TODO: Calculate actual max depth.
- // TODO(mmenke): Populate NetworkIsolationKey.
uploader()->StartUpload(
- report_origin, endpoint, NetworkIsolationKey::Todo(), json, max_depth,
+ target.origin, target.endpoint_url, target.network_isolation_key,
+ upload_data, max_depth,
base::BindOnce(&ReportingDeliveryAgentImpl::OnUploadComplete,
weak_factory_.GetWeakPtr(), std::move(delivery)));
}
@@ -236,39 +311,24 @@ class ReportingDeliveryAgentImpl : public ReportingDeliveryAgent,
void OnUploadComplete(std::unique_ptr<Delivery> delivery,
ReportingUploader::Outcome outcome) {
- for (const auto& endpoint_and_count : delivery->reports_per_endpoint) {
- const ReportingEndpointGroupKey& group_key =
- endpoint_and_count.first.first;
- const GURL& endpoint = endpoint_and_count.first.second;
- int report_count = endpoint_and_count.second;
- cache()->IncrementEndpointDeliveries(
- group_key, endpoint, report_count,
- outcome == ReportingUploader::Outcome::SUCCESS);
- }
+ bool success = outcome == ReportingUploader::Outcome::SUCCESS;
+ delivery->ProcessOutcome(cache(), success);
- if (outcome == ReportingUploader::Outcome::SUCCESS) {
- cache()->RemoveReports(delivery->reports,
- ReportingReport::Outcome::DELIVERED);
- // TODO(mmenke): Populate NetworkIsolationKey argument.
- endpoint_manager_->InformOfEndpointRequest(NetworkIsolationKey::Todo(),
- delivery->endpoint, true);
- } else {
- cache()->IncrementReportsAttempts(delivery->reports);
- // TODO(mmenke): Populate NetworkIsolationKey argument.
- endpoint_manager_->InformOfEndpointRequest(NetworkIsolationKey::Todo(),
- delivery->endpoint, false);
- }
+ endpoint_manager_->InformOfEndpointRequest(
+ delivery->network_isolation_key(), delivery->endpoint_url(), success);
+ // TODO(chlily): This leaks information across NIKs. If the endpoint URL is
+ // configured for both NIK1 and NIK2, and it responds with a 410 on a NIK1
+ // connection, then the change in configuration will be detectable on a NIK2
+ // connection.
if (outcome == ReportingUploader::Outcome::REMOVE_ENDPOINT)
- cache()->RemoveEndpointsForUrl(delivery->endpoint);
+ cache()->RemoveEndpointsForUrl(delivery->endpoint_url());
- for (const ReportingReport* report : delivery->reports) {
- ReportingEndpointGroupKey group_key(
- NetworkIsolationKey::Todo(), delivery->report_origin, report->group);
- pending_groups_.erase(group_key);
+ for (const ReportingReport* report : delivery->reports()) {
+ pending_groups_.erase(report->GetGroupKey());
}
- cache()->ClearReportsPending(delivery->reports);
+ cache()->ClearReportsPending(delivery->reports());
}
const ReportingPolicy& policy() const { return context_->policy(); }
diff --git a/chromium/net/reporting/reporting_delivery_agent.h b/chromium/net/reporting/reporting_delivery_agent.h
index b857fd16b98..08cb97f4c91 100644
--- a/chromium/net/reporting/reporting_delivery_agent.h
+++ b/chromium/net/reporting/reporting_delivery_agent.h
@@ -19,32 +19,27 @@ namespace net {
class ReportingContext;
-// Takes reports from the ReportingCache, assembles reports into deliveries to
-// endpoints, and sends those deliveries using ReportingUploader.
+// Batches reports fetched from the ReportingCache and uploads them using the
+// ReportingUploader.
//
-// Since the Reporting spec is completely silent on issues of concurrency, the
-// delivery agent handles it as so:
+// Reports are only considered for delivery if all of the following are true:
+// - The report is not already part of a pending upload request.
+// - Uploads are allowed for the report's origin (i.e. the origin of the URL
+// associated with the reported event).
+// - There is not already a pending upload for any reports sharing the same
+// (NIK, origin, group) key.
//
-// 1. An individual report can only be included in one delivery at once -- if
-// SendReports is called again while a report is being delivered, it won't
-// be included in another delivery during that call to SendReports. (This is,
-// in fact, made redundant by rule 3, but it's included anyway in case rule 3
-// changes.)
+// Reports are batched for upload to an endpoint URL such that:
+// - The available reports with the same (NIK, origin, group) are always
+// uploaded together.
+// - All reports uploaded together must share a NIK and origin.
+// - Reports for the same (NIK, origin) can be uploaded separately if they are
+// for different groups.
+// - Reports for different groups can be batched together, if they are assigned
+// to ReportingEndpoints sharing a URL (that is, the upload URL).
//
-// 2. An endpoint can only be the target of one delivery at once -- if
-// SendReports is called again with reports that could be delivered to that
-// endpoint, they won't be delivered to that endpoint.
-//
-// 3. Reports for an (origin, group) tuple can only be included in one delivery
-// at once -- if SendReports is called again with reports in that (origin,
-// group), they won't be included in any delivery during that call to
-// SendReports. (This prevents the agent from getting around rule 2 by using
-// other endpoints in the same group.)
-//
-// 4. Reports for the same origin *can* be included in multiple parallel
-// deliveries if they are in different groups within that origin.
-//
-// (Note that a single delivery can contain an infinite number of reports.)
+// There is no limit to the number of reports that can be uploaded together.
+// (Aside from the global cap on total reports.)
//
// TODO(juliatuttle): Consider capping the maximum number of reports per
// delivery attempt.
diff --git a/chromium/net/reporting/reporting_delivery_agent_unittest.cc b/chromium/net/reporting/reporting_delivery_agent_unittest.cc
index 893cdd61eb2..7812de93c4d 100644
--- a/chromium/net/reporting/reporting_delivery_agent_unittest.cc
+++ b/chromium/net/reporting/reporting_delivery_agent_unittest.cc
@@ -37,12 +37,47 @@ class ReportingDeliveryAgentTest : public ReportingTestBase {
policy.endpoint_backoff_policy.entry_lifetime_ms = 0;
policy.endpoint_backoff_policy.always_use_initial_delay = false;
UsePolicy(policy);
+ report_body_.SetStringKey("key", "value");
}
- const NetworkIsolationKey kNik_ = NetworkIsolationKey::Todo();
+ void AddReport(const NetworkIsolationKey& network_isolation_key,
+ const GURL& url,
+ const std::string& group) {
+ cache()->AddReport(network_isolation_key, url, kUserAgent_, group, kType_,
+ std::make_unique<base::Value>(report_body_.Clone()),
+ 0 /* depth */, tick_clock()->NowTicks() /* queued */,
+ 0 /* attempts */);
+ }
+
+ // The first report added to the cache is uploaded immediately, and a timer is
+ // started for all subsequent reports (which may then be batched). To test
+ // behavior involving batching multiple reports, we need to add, upload, and
+ // immediately resolve a dummy report to prime the delivery timer.
+ void UploadFirstReportAndStartTimer() {
+ ReportingEndpointGroupKey dummy_group(
+ NetworkIsolationKey(), url::Origin::Create(GURL("https://dummy.test")),
+ "dummy");
+ ASSERT_TRUE(SetEndpointInCache(
+ dummy_group, GURL("https://dummy.test/upload"), kExpires_));
+ AddReport(dummy_group.network_isolation_key, dummy_group.origin.GetURL(),
+ dummy_group.group_name);
+
+ ASSERT_EQ(1u, pending_uploads().size());
+ pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
+ EXPECT_EQ(0u, pending_uploads().size());
+ EXPECT_TRUE(delivery_timer()->IsRunning());
+ }
+
+ base::Value report_body_{base::Value::Type::DICTIONARY};
const GURL kUrl_ = GURL("https://origin/path");
+ const GURL kOtherUrl_ = GURL("https://other-origin/path");
const GURL kSubdomainUrl_ = GURL("https://sub.origin/path");
const url::Origin kOrigin_ = url::Origin::Create(GURL("https://origin/"));
+ const url::Origin kOtherOrigin_ =
+ url::Origin::Create(GURL("https://other-origin/"));
+ const NetworkIsolationKey kNik_;
+ const NetworkIsolationKey kOtherNik_ =
+ NetworkIsolationKey(kOrigin_, kOtherOrigin_);
const GURL kEndpoint_ = GURL("https://endpoint/");
const std::string kUserAgent_ = "Mozilla/1.0";
const std::string kGroup_ = "group";
@@ -53,12 +88,8 @@ class ReportingDeliveryAgentTest : public ReportingTestBase {
};
TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateUpload) {
- base::DictionaryValue body;
- body.SetString("key", "value");
-
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- body.CreateDeepCopy(), 0, tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
// Upload is automatically started when cache is modified.
@@ -79,7 +110,8 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateUpload) {
ExpectDictStringValue(kType_, *report, "type");
ExpectDictStringValue(kUrl_.spec(), *report, "url");
ExpectDictStringValue(kUserAgent_, *report, "user_agent");
- ExpectDictDictionaryValue(body, *report, "body");
+ base::Value* body = report->FindDictKey("body");
+ EXPECT_EQ("value", *body->FindStringKey("key"));
}
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
@@ -89,7 +121,7 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateUpload) {
EXPECT_TRUE(reports.empty());
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(1, stats.attempted_uploads);
EXPECT_EQ(1, stats.successful_uploads);
@@ -101,13 +133,9 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateUpload) {
}
TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateSubdomainUpload) {
- base::DictionaryValue body;
- body.SetString("key", "value");
-
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_,
OriginSubdomains::INCLUDE));
- cache()->AddReport(kNik_, kSubdomainUrl_, kUserAgent_, kGroup_, kType_,
- body.CreateDeepCopy(), 0, tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kSubdomainUrl_, kGroup_);
// Upload is automatically started when cache is modified.
@@ -128,7 +156,8 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateSubdomainUpload) {
ExpectDictStringValue(kType_, *report, "type");
ExpectDictStringValue(kSubdomainUrl_.spec(), *report, "url");
ExpectDictStringValue(kUserAgent_, *report, "user_agent");
- ExpectDictDictionaryValue(body, *report, "body");
+ base::Value* body = report->FindDictKey("body");
+ EXPECT_EQ("value", *body->FindStringKey("key"));
}
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
@@ -138,7 +167,7 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateSubdomainUpload) {
EXPECT_TRUE(reports.empty());
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(1, stats.attempted_uploads);
EXPECT_EQ(1, stats.successful_uploads);
@@ -151,13 +180,9 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulImmediateSubdomainUpload) {
TEST_F(ReportingDeliveryAgentTest,
SuccessfulImmediateSubdomainUploadWithOverwrittenEndpoint) {
- base::DictionaryValue body;
- body.SetString("key", "value");
-
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_,
OriginSubdomains::INCLUDE));
- cache()->AddReport(kNik_, kSubdomainUrl_, kUserAgent_, kGroup_, kType_,
- body.CreateDeepCopy(), 0, tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kSubdomainUrl_, kGroup_);
// Upload is automatically started when cache is modified.
@@ -168,7 +193,7 @@ TEST_F(ReportingDeliveryAgentTest,
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(1, stats.attempted_uploads);
EXPECT_EQ(1, stats.successful_uploads);
@@ -183,18 +208,13 @@ TEST_F(ReportingDeliveryAgentTest,
}
TEST_F(ReportingDeliveryAgentTest, SuccessfulDelayedUpload) {
- base::DictionaryValue body;
- body.SetString("key", "value");
-
// Trigger and complete an upload to start the delivery timer.
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- body.CreateDeepCopy(), 0, tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
// Add another report to upload after a delay.
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- body.CreateDeepCopy(), 0, tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
@@ -216,12 +236,13 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulDelayedUpload) {
ExpectDictStringValue(kType_, *report, "type");
ExpectDictStringValue(kUrl_.spec(), *report, "url");
ExpectDictStringValue(kUserAgent_, *report, "user_agent");
- ExpectDictDictionaryValue(body, *report, "body");
+ base::Value* body = report->FindDictKey("body");
+ EXPECT_EQ("value", *body->FindStringKey("key"));
}
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(2, stats.attempted_uploads);
EXPECT_EQ(2, stats.successful_uploads);
@@ -239,9 +260,7 @@ TEST_F(ReportingDeliveryAgentTest, SuccessfulDelayedUpload) {
TEST_F(ReportingDeliveryAgentTest, FailedUpload) {
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
@@ -250,7 +269,7 @@ TEST_F(ReportingDeliveryAgentTest, FailedUpload) {
pending_uploads()[0]->Complete(ReportingUploader::Outcome::FAILURE);
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(1, stats.attempted_uploads);
EXPECT_EQ(0, stats.successful_uploads);
@@ -272,7 +291,7 @@ TEST_F(ReportingDeliveryAgentTest, FailedUpload) {
EXPECT_TRUE(pending_uploads().empty());
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(1, stats.attempted_uploads);
EXPECT_EQ(0, stats.successful_uploads);
@@ -292,8 +311,7 @@ TEST_F(ReportingDeliveryAgentTest, DisallowedUpload) {
body.SetString("key", "value");
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- body.CreateDeepCopy(), 0, tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
tick_clock()->Advance(base::TimeDelta::FromMilliseconds(kAgeMillis));
@@ -305,7 +323,7 @@ TEST_F(ReportingDeliveryAgentTest, DisallowedUpload) {
EXPECT_TRUE(pending_uploads().empty());
{
- const ReportingEndpoint::Statistics stats =
+ ReportingEndpoint::Statistics stats =
GetEndpointStatistics(kGroupKey_, kEndpoint_);
EXPECT_EQ(0, stats.attempted_uploads);
EXPECT_EQ(0, stats.successful_uploads);
@@ -320,17 +338,13 @@ TEST_F(ReportingDeliveryAgentTest, DisallowedUpload) {
}
TEST_F(ReportingDeliveryAgentTest, RemoveEndpointUpload) {
- static const url::Origin kDifferentOrigin =
- url::Origin::Create(GURL("https://origin2/"));
- static const ReportingEndpointGroupKey kOtherGroupKey(
- NetworkIsolationKey(), kDifferentOrigin, kGroup_);
+ static const ReportingEndpointGroupKey kOtherGroupKey(kNik_, kOtherOrigin_,
+ kGroup_);
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
ASSERT_TRUE(SetEndpointInCache(kOtherGroupKey, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
@@ -357,9 +371,7 @@ TEST_F(ReportingDeliveryAgentTest, RemoveEndpointUpload) {
TEST_F(ReportingDeliveryAgentTest, ConcurrentRemove) {
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
@@ -393,9 +405,7 @@ TEST_F(ReportingDeliveryAgentTest, ConcurrentRemoveDuringPermissionsCheck) {
context()->test_delegate()->set_pause_permissions_check(true);
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
ASSERT_TRUE(context()->test_delegate()->PermissionsCheckPaused());
@@ -421,134 +431,126 @@ TEST_F(ReportingDeliveryAgentTest, ConcurrentRemoveDuringPermissionsCheck) {
EXPECT_TRUE(reports.empty());
}
-// Test that the agent will combine reports destined for the same endpoint, even
-// if the reports are from different origins.
-TEST_F(ReportingDeliveryAgentTest,
- BatchReportsFromDifferentOriginsToSameEndpoint) {
- static const GURL kDifferentUrl("https://origin2/path");
- static const url::Origin kDifferentOrigin =
- url::Origin::Create(kDifferentUrl);
- const ReportingEndpointGroupKey kDifferentGroupKey(NetworkIsolationKey(),
- kDifferentOrigin, kGroup_);
-
- ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
- ASSERT_TRUE(SetEndpointInCache(kDifferentGroupKey, kEndpoint_, kExpires_));
+// Reports uploaded together must share a NIK and origin.
+// Test that the agent will not combine reports destined for the same endpoint
+// if the reports are from different origins or NIKs, but does combine all
+// reports for the same (NIK, origin).
+TEST_F(ReportingDeliveryAgentTest, OnlyBatchSameNikAndOrigin) {
+ const ReportingEndpointGroupKey kGroupKeys[] = {
+ ReportingEndpointGroupKey(kNik_, kOrigin_, kGroup_),
+ ReportingEndpointGroupKey(kNik_, kOtherOrigin_, kGroup_),
+ ReportingEndpointGroupKey(kOtherNik_, kOrigin_, kGroup_),
+ ReportingEndpointGroupKey(kOtherNik_, kOtherOrigin_, kGroup_),
+ };
+ for (const ReportingEndpointGroupKey& group_key : kGroupKeys) {
+ ASSERT_TRUE(SetEndpointInCache(group_key, kEndpoint_, kExpires_));
+ }
// Trigger and complete an upload to start the delivery timer.
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
- pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
+ UploadFirstReportAndStartTimer();
// Now that the delivery timer is running, these reports won't be immediately
// uploaded.
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
- cache()->AddReport(kNik_, kDifferentUrl, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ AddReport(kNik_, kUrl_, kGroup_);
+ AddReport(kNik_, kOtherUrl_, kGroup_);
+ AddReport(kNik_, kOtherUrl_, kGroup_);
+ AddReport(kOtherNik_, kUrl_, kGroup_);
+ AddReport(kOtherNik_, kUrl_, kGroup_);
+ AddReport(kOtherNik_, kUrl_, kGroup_);
+ AddReport(kOtherNik_, kOtherUrl_, kGroup_);
+ AddReport(kOtherNik_, kOtherUrl_, kGroup_);
+ AddReport(kOtherNik_, kOtherUrl_, kGroup_);
+ AddReport(kOtherNik_, kOtherUrl_, kGroup_);
EXPECT_EQ(0u, pending_uploads().size());
- // When we fire the delivery timer, we should NOT batch these two reports into
- // a single upload, since each upload must only contain reports about a single
- // origin.
+ // There should be one upload per (NIK, origin).
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
- ASSERT_EQ(2u, pending_uploads().size());
+ ASSERT_EQ(4u, pending_uploads().size());
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
- EXPECT_EQ(0u, pending_uploads().size());
-}
-
-// Test that the agent won't start a second upload to the same endpoint for a
-// particular origin while one is pending, but will once it is no longer
-// pending.
-TEST_F(ReportingDeliveryAgentTest, SerializeUploadsToEndpoint) {
- ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
-
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
-
- EXPECT_TRUE(delivery_timer()->IsRunning());
- delivery_timer()->Fire();
- EXPECT_EQ(1u, pending_uploads().size());
-
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
-
- EXPECT_TRUE(delivery_timer()->IsRunning());
- delivery_timer()->Fire();
- ASSERT_EQ(1u, pending_uploads().size());
-
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
- EXPECT_EQ(0u, pending_uploads().size());
-
- EXPECT_TRUE(delivery_timer()->IsRunning());
- delivery_timer()->Fire();
- ASSERT_EQ(1u, pending_uploads().size());
-
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
EXPECT_EQ(0u, pending_uploads().size());
+
+ for (int i = 0; i < 4; ++i) {
+ ReportingEndpoint::Statistics stats =
+ GetEndpointStatistics(kGroupKeys[i], kEndpoint_);
+ EXPECT_EQ(1, stats.attempted_uploads);
+ EXPECT_EQ(1, stats.successful_uploads);
+ EXPECT_EQ(i + 1, stats.attempted_reports);
+ EXPECT_EQ(i + 1, stats.successful_reports);
+ }
}
-// Test that the agent won't start a second upload for an (origin, group) while
-// one is pending, even if a different endpoint is available, but will once the
-// original delivery is complete and the (origin, group) is no longer pending.
+// Test that the agent won't start a second upload for a (NIK, origin, group)
+// while one is pending, even if a different endpoint is available, but will
+// once the original delivery is complete and the (NIK, origin, group) is no
+// longer pending.
TEST_F(ReportingDeliveryAgentTest, SerializeUploadsToGroup) {
static const GURL kDifferentEndpoint("https://endpoint2/");
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kDifferentEndpoint, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ // Trigger and complete an upload to start the delivery timer.
+ UploadFirstReportAndStartTimer();
+ // First upload causes this group key to become pending.
+ AddReport(kNik_, kUrl_, kGroup_);
+ EXPECT_EQ(0u, pending_uploads().size());
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
EXPECT_EQ(1u, pending_uploads().size());
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
-
+ // Second upload isn't started because the group is pending.
+ AddReport(kNik_, kUrl_, kGroup_);
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
ASSERT_EQ(1u, pending_uploads().size());
+ // Resolve the first upload.
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
EXPECT_EQ(0u, pending_uploads().size());
+ // Now the other upload can happen.
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
ASSERT_EQ(1u, pending_uploads().size());
-
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
EXPECT_EQ(0u, pending_uploads().size());
+
+ // A total of 2 reports were uploaded.
+ {
+ ReportingEndpoint::Statistics stats =
+ GetEndpointStatistics(kGroupKey_, kEndpoint_);
+ ReportingEndpoint::Statistics different_stats =
+ GetEndpointStatistics(kGroupKey_, kDifferentEndpoint);
+ EXPECT_EQ(2, stats.attempted_uploads + different_stats.attempted_uploads);
+ EXPECT_EQ(2, stats.successful_uploads + different_stats.successful_uploads);
+ EXPECT_EQ(2, stats.attempted_reports + different_stats.attempted_reports);
+ EXPECT_EQ(2, stats.successful_reports + different_stats.successful_reports);
+ }
}
// Tests that the agent will start parallel uploads to different groups within
-// the same origin.
+// the same (NIK, origin) to endpoints with different URLs.
TEST_F(ReportingDeliveryAgentTest, ParallelizeUploadsAcrossGroups) {
static const GURL kDifferentEndpoint("https://endpoint2/");
static const std::string kDifferentGroup("group2");
- const ReportingEndpointGroupKey kDifferentGroupKey(NetworkIsolationKey(),
- kOrigin_, kDifferentGroup);
+ const ReportingEndpointGroupKey kDifferentGroupKey(kNik_, kOrigin_,
+ kDifferentGroup);
ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
ASSERT_TRUE(
SetEndpointInCache(kDifferentGroupKey, kDifferentEndpoint, kExpires_));
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kGroup_, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
- cache()->AddReport(kNik_, kUrl_, kUserAgent_, kDifferentGroup, kType_,
- std::make_unique<base::DictionaryValue>(), 0,
- tick_clock()->NowTicks(), 0);
+ // Trigger and complete an upload to start the delivery timer.
+ UploadFirstReportAndStartTimer();
+
+ AddReport(kNik_, kUrl_, kGroup_);
+ AddReport(kNik_, kUrl_, kDifferentGroup);
EXPECT_TRUE(delivery_timer()->IsRunning());
delivery_timer()->Fire();
@@ -557,6 +559,63 @@ TEST_F(ReportingDeliveryAgentTest, ParallelizeUploadsAcrossGroups) {
pending_uploads()[1]->Complete(ReportingUploader::Outcome::SUCCESS);
pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
EXPECT_EQ(0u, pending_uploads().size());
+
+ {
+ ReportingEndpoint::Statistics stats =
+ GetEndpointStatistics(kGroupKey_, kEndpoint_);
+ EXPECT_EQ(1, stats.attempted_uploads);
+ EXPECT_EQ(1, stats.successful_uploads);
+ EXPECT_EQ(1, stats.attempted_reports);
+ EXPECT_EQ(1, stats.successful_reports);
+ }
+ {
+ ReportingEndpoint::Statistics stats =
+ GetEndpointStatistics(kDifferentGroupKey, kDifferentEndpoint);
+ EXPECT_EQ(1, stats.attempted_uploads);
+ EXPECT_EQ(1, stats.successful_uploads);
+ EXPECT_EQ(1, stats.attempted_reports);
+ EXPECT_EQ(1, stats.successful_reports);
+ }
+}
+
+// Tests that the agent will include reports for different groups for the same
+// (NIK, origin) in the same upload if they are destined for the same endpoint
+// URL.
+TEST_F(ReportingDeliveryAgentTest, BatchReportsAcrossGroups) {
+ static const std::string kDifferentGroup("group2");
+ const ReportingEndpointGroupKey kDifferentGroupKey(kNik_, kOrigin_,
+ kDifferentGroup);
+
+ ASSERT_TRUE(SetEndpointInCache(kGroupKey_, kEndpoint_, kExpires_));
+ ASSERT_TRUE(SetEndpointInCache(kDifferentGroupKey, kEndpoint_, kExpires_));
+
+ UploadFirstReportAndStartTimer();
+
+ AddReport(kNik_, kUrl_, kGroup_);
+ AddReport(kNik_, kUrl_, kDifferentGroup);
+
+ EXPECT_TRUE(delivery_timer()->IsRunning());
+ delivery_timer()->Fire();
+ ASSERT_EQ(1u, pending_uploads().size());
+ pending_uploads()[0]->Complete(ReportingUploader::Outcome::SUCCESS);
+ EXPECT_EQ(0u, pending_uploads().size());
+
+ {
+ ReportingEndpoint::Statistics stats =
+ GetEndpointStatistics(kGroupKey_, kEndpoint_);
+ EXPECT_EQ(1, stats.attempted_uploads);
+ EXPECT_EQ(1, stats.successful_uploads);
+ EXPECT_EQ(1, stats.attempted_reports);
+ EXPECT_EQ(1, stats.successful_reports);
+ }
+ {
+ ReportingEndpoint::Statistics stats =
+ GetEndpointStatistics(kDifferentGroupKey, kEndpoint_);
+ EXPECT_EQ(1, stats.attempted_uploads);
+ EXPECT_EQ(1, stats.successful_uploads);
+ EXPECT_EQ(1, stats.attempted_reports);
+ EXPECT_EQ(1, stats.successful_reports);
+ }
}
} // namespace
diff --git a/chromium/net/reporting/reporting_header_parser.cc b/chromium/net/reporting/reporting_header_parser.cc
index 294f9039841..ec881490fdd 100644
--- a/chromium/net/reporting/reporting_header_parser.cc
+++ b/chromium/net/reporting/reporting_header_parser.cc
@@ -26,28 +26,6 @@ namespace net {
namespace {
-using HeaderEndpointGroupOutcome =
- ReportingHeaderParser::HeaderEndpointGroupOutcome;
-using HeaderEndpointOutcome = ReportingHeaderParser::HeaderEndpointOutcome;
-using HeaderOutcome = ReportingHeaderParser::HeaderOutcome;
-
-void RecordHeaderOutcome(HeaderOutcome outcome) {
- UMA_HISTOGRAM_ENUMERATION(ReportingHeaderParser::kHeaderOutcomeHistogram,
- outcome, HeaderOutcome::MAX);
-}
-
-void RecordHeaderEndpointGroupOutcome(HeaderEndpointGroupOutcome outcome) {
- UMA_HISTOGRAM_ENUMERATION(
- ReportingHeaderParser::kHeaderEndpointGroupOutcomeHistogram, outcome,
- HeaderEndpointGroupOutcome::MAX);
-}
-
-void RecordHeaderEndpointOutcome(HeaderEndpointOutcome outcome) {
- UMA_HISTOGRAM_ENUMERATION(
- ReportingHeaderParser::kHeaderEndpointOutcomeHistogram, outcome,
- HeaderEndpointOutcome::MAX);
-}
-
const char kUrlKey[] = "url";
const char kIncludeSubdomainsKey[] = "include_subdomains";
const char kEndpointsKey[] = "endpoints";
@@ -64,21 +42,21 @@ const char kWeightKey[] = "weight";
// |value| is the parsed JSON value of the endpoint tuple.
//
// |*endpoint_out| will contain the endpoint URL parsed out of the tuple.
-HeaderEndpointOutcome ProcessEndpoint(
- ReportingDelegate* delegate,
- const ReportingEndpointGroupKey& group_key,
- const base::Value& value,
- ReportingEndpoint::EndpointInfo* endpoint_info_out) {
+// Returns true on success or false if endpoint was discarded.
+bool ProcessEndpoint(ReportingDelegate* delegate,
+ const ReportingEndpointGroupKey& group_key,
+ const base::Value& value,
+ ReportingEndpoint::EndpointInfo* endpoint_info_out) {
const base::DictionaryValue* dict = nullptr;
if (!value.GetAsDictionary(&dict))
- return HeaderEndpointOutcome::DISCARDED_NOT_DICTIONARY;
+ return false;
DCHECK(dict);
std::string endpoint_url_string;
if (!dict->HasKey(kUrlKey))
- return HeaderEndpointOutcome::DISCARDED_URL_MISSING;
+ return false;
if (!dict->GetString(kUrlKey, &endpoint_url_string))
- return HeaderEndpointOutcome::DISCARDED_URL_NOT_STRING;
+ return false;
GURL endpoint_url;
// Support path-absolute-URL string
@@ -88,29 +66,26 @@ HeaderEndpointOutcome ProcessEndpoint(
endpoint_url = GURL(endpoint_url_string);
}
if (!endpoint_url.is_valid())
- return HeaderEndpointOutcome::DISCARDED_URL_INVALID;
+ return false;
if (!endpoint_url.SchemeIsCryptographic())
- return HeaderEndpointOutcome::DISCARDED_URL_INSECURE;
+ return false;
endpoint_info_out->url = std::move(endpoint_url);
int priority = ReportingEndpoint::EndpointInfo::kDefaultPriority;
if (dict->HasKey(kPriorityKey) && !dict->GetInteger(kPriorityKey, &priority))
- return HeaderEndpointOutcome::DISCARDED_PRIORITY_NOT_INTEGER;
+ return false;
if (priority < 0)
- return HeaderEndpointOutcome::DISCARDED_PRIORITY_NEGATIVE;
+ return false;
endpoint_info_out->priority = priority;
int weight = ReportingEndpoint::EndpointInfo::kDefaultWeight;
if (dict->HasKey(kWeightKey) && !dict->GetInteger(kWeightKey, &weight))
- return HeaderEndpointOutcome::DISCARDED_WEIGHT_NOT_INTEGER;
+ return false;
if (weight < 0)
- return HeaderEndpointOutcome::DISCARDED_WEIGHT_NEGATIVE;
+ return false;
endpoint_info_out->weight = weight;
- if (!delegate->CanSetClient(group_key.origin, endpoint_url))
- return HeaderEndpointOutcome::SET_REJECTED_BY_DELEGATE;
-
- return HeaderEndpointOutcome::SET;
+ return delegate->CanSetClient(group_key.origin, endpoint_url);
}
// Processes a single endpoint group tuple received in a Report-To header.
@@ -118,36 +93,37 @@ HeaderEndpointOutcome ProcessEndpoint(
// |origin| is the origin that sent the Report-To header.
//
// |value| is the parsed JSON value of the endpoint group tuple.
-HeaderEndpointGroupOutcome ProcessEndpointGroup(
- ReportingDelegate* delegate,
- ReportingCache* cache,
- const NetworkIsolationKey& network_isolation_key,
- const url::Origin& origin,
- const base::Value& value,
- ReportingEndpointGroup* parsed_endpoint_group_out) {
+// Returns true on successfully adding a non-empty group, or false if endpoint
+// group was discarded or processed as a deletion.
+bool ProcessEndpointGroup(ReportingDelegate* delegate,
+ ReportingCache* cache,
+ const NetworkIsolationKey& network_isolation_key,
+ const url::Origin& origin,
+ const base::Value& value,
+ ReportingEndpointGroup* parsed_endpoint_group_out) {
const base::DictionaryValue* dict = nullptr;
if (!value.GetAsDictionary(&dict))
- return HeaderEndpointGroupOutcome::DISCARDED_NOT_DICTIONARY;
+ return false;
DCHECK(dict);
std::string group_name = kDefaultGroupName;
if (dict->HasKey(kGroupKey) && !dict->GetString(kGroupKey, &group_name))
- return HeaderEndpointGroupOutcome::DISCARDED_GROUP_NOT_STRING;
+ return false;
ReportingEndpointGroupKey group_key(network_isolation_key, origin,
group_name);
parsed_endpoint_group_out->group_key = group_key;
int ttl_sec = -1;
if (!dict->HasKey(kMaxAgeKey))
- return HeaderEndpointGroupOutcome::DISCARDED_TTL_MISSING;
+ return false;
if (!dict->GetInteger(kMaxAgeKey, &ttl_sec))
- return HeaderEndpointGroupOutcome::DISCARDED_TTL_NOT_INTEGER;
+ return false;
if (ttl_sec < 0)
- return HeaderEndpointGroupOutcome::DISCARDED_TTL_NEGATIVE;
+ return false;
// max_age: 0 signifies removal of the endpoint group.
if (ttl_sec == 0) {
cache->RemoveEndpointGroup(group_key);
- return HeaderEndpointGroupOutcome::REMOVED_TTL_ZERO;
+ return false;
}
parsed_endpoint_group_out->ttl = base::TimeDelta::FromSeconds(ttl_sec);
@@ -160,8 +136,7 @@ HeaderEndpointGroupOutcome ProcessEndpointGroup(
origin.GetURL(),
registry_controlled_domains::INCLUDE_UNKNOWN_REGISTRIES,
registry_controlled_domains::INCLUDE_PRIVATE_REGISTRIES) == 0) {
- return HeaderEndpointGroupOutcome::
- DISCARDED_INCLUDE_SUBDOMAINS_NOT_ALLOWED;
+ return false;
}
parsed_endpoint_group_out->include_subdomains = OriginSubdomains::INCLUDE;
@@ -169,9 +144,9 @@ HeaderEndpointGroupOutcome ProcessEndpointGroup(
const base::ListValue* endpoint_list = nullptr;
if (!dict->HasKey(kEndpointsKey))
- return HeaderEndpointGroupOutcome::DISCARDED_ENDPOINTS_MISSING;
+ return false;
if (!dict->GetList(kEndpointsKey, &endpoint_list))
- return HeaderEndpointGroupOutcome::DISCARDED_ENDPOINTS_NOT_LIST;
+ return false;
std::vector<ReportingEndpoint::EndpointInfo> endpoints;
@@ -182,66 +157,24 @@ HeaderEndpointGroupOutcome ProcessEndpointGroup(
ReportingEndpoint::EndpointInfo parsed_endpoint;
- HeaderEndpointOutcome outcome =
- ProcessEndpoint(delegate, group_key, *endpoint, &parsed_endpoint);
-
- if (outcome == HeaderEndpointOutcome::SET)
+ if (ProcessEndpoint(delegate, group_key, *endpoint, &parsed_endpoint))
endpoints.push_back(std::move(parsed_endpoint));
-
- RecordHeaderEndpointOutcome(outcome);
}
// Remove the group if it is empty.
if (endpoints.empty()) {
cache->RemoveEndpointGroup(group_key);
- return HeaderEndpointGroupOutcome::REMOVED_EMPTY;
+ return false;
}
parsed_endpoint_group_out->endpoints = std::move(endpoints);
- return HeaderEndpointGroupOutcome::PARSED;
+ return true;
}
} // namespace
// static
-const char ReportingHeaderParser::kHeaderOutcomeHistogram[] =
- "Net.Reporting.HeaderOutcome";
-
-// static
-const char ReportingHeaderParser::kHeaderEndpointGroupOutcomeHistogram[] =
- "Net.Reporting.HeaderEndpointGroupOutcome";
-
-// static
-const char ReportingHeaderParser::kHeaderEndpointOutcomeHistogram[] =
- "Net.Reporting.HeaderEndpointOutcome";
-
-// static
-void ReportingHeaderParser::RecordHeaderDiscardedForNoReportingService() {
- RecordHeaderOutcome(HeaderOutcome::DISCARDED_NO_REPORTING_SERVICE);
-}
-
-// static
-void ReportingHeaderParser::RecordHeaderDiscardedForInvalidSSLInfo() {
- RecordHeaderOutcome(HeaderOutcome::DISCARDED_INVALID_SSL_INFO);
-}
-
-// static
-void ReportingHeaderParser::RecordHeaderDiscardedForCertStatusError() {
- RecordHeaderOutcome(HeaderOutcome::DISCARDED_CERT_STATUS_ERROR);
-}
-
-// static
-void ReportingHeaderParser::RecordHeaderDiscardedForJsonInvalid() {
- RecordHeaderOutcome(HeaderOutcome::DISCARDED_JSON_INVALID);
-}
-
-// static
-void ReportingHeaderParser::RecordHeaderDiscardedForJsonTooBig() {
- RecordHeaderOutcome(HeaderOutcome::DISCARDED_JSON_TOO_BIG);
-}
-
-// static
void ReportingHeaderParser::ParseHeader(
ReportingContext* context,
const NetworkIsolationKey& network_isolation_key,
@@ -265,25 +198,21 @@ void ReportingHeaderParser::ParseHeader(
bool got_group = group_list->Get(i, &group_value);
DCHECK(got_group);
ReportingEndpointGroup parsed_endpoint_group;
- HeaderEndpointGroupOutcome outcome =
- ProcessEndpointGroup(delegate, cache, network_isolation_key, origin,
- *group_value, &parsed_endpoint_group);
- RecordHeaderEndpointGroupOutcome(outcome);
- if (outcome == HeaderEndpointGroupOutcome::PARSED)
+ if (ProcessEndpointGroup(delegate, cache, network_isolation_key, origin,
+ *group_value, &parsed_endpoint_group)) {
parsed_header.push_back(std::move(parsed_endpoint_group));
+ }
}
// Remove the client if it has no valid endpoint groups.
if (parsed_header.empty()) {
// TODO(chlily): Pass NIK to cache.
cache->RemoveClient(NetworkIsolationKey::Todo(), origin);
- RecordHeaderOutcome(HeaderOutcome::REMOVED_EMPTY);
return;
}
cache->OnParsedHeader(network_isolation_key, origin,
std::move(parsed_header));
- RecordHeaderOutcome(HeaderOutcome::PARSED);
}
} // namespace net
diff --git a/chromium/net/reporting/reporting_header_parser.h b/chromium/net/reporting/reporting_header_parser.h
index adba204742a..2e167f2ccde 100644
--- a/chromium/net/reporting/reporting_header_parser.h
+++ b/chromium/net/reporting/reporting_header_parser.h
@@ -22,64 +22,6 @@ class ReportingContext;
class NET_EXPORT ReportingHeaderParser {
public:
- // Histograms. These are mainly used in test cases to verify that interesting
- // events occurred.
-
- static const char kHeaderOutcomeHistogram[];
- static const char kHeaderEndpointGroupOutcomeHistogram[];
- static const char kHeaderEndpointOutcomeHistogram[];
-
- enum class HeaderOutcome {
- DISCARDED_NO_REPORTING_SERVICE = 0,
- DISCARDED_INVALID_SSL_INFO = 1,
- DISCARDED_CERT_STATUS_ERROR = 2,
- DISCARDED_JSON_TOO_BIG = 3,
- DISCARDED_JSON_INVALID = 4,
- PARSED = 5,
- REMOVED_EMPTY = 6,
- MAX
- };
-
- enum class HeaderEndpointGroupOutcome {
- DISCARDED_NOT_DICTIONARY = 0,
- DISCARDED_GROUP_NOT_STRING = 1,
- DISCARDED_TTL_MISSING = 2,
- DISCARDED_TTL_NOT_INTEGER = 3,
- DISCARDED_TTL_NEGATIVE = 4,
- DISCARDED_ENDPOINTS_MISSING = 5,
- DISCARDED_ENDPOINTS_NOT_LIST = 6,
-
- PARSED = 7,
- REMOVED_TTL_ZERO = 8,
- REMOVED_EMPTY = 9,
- DISCARDED_INCLUDE_SUBDOMAINS_NOT_ALLOWED = 10,
- MAX
- };
-
- enum class HeaderEndpointOutcome {
- DISCARDED_NOT_DICTIONARY = 0,
- DISCARDED_URL_MISSING = 1,
- DISCARDED_URL_NOT_STRING = 2,
- DISCARDED_URL_INVALID = 3,
- DISCARDED_URL_INSECURE = 4,
- DISCARDED_PRIORITY_NOT_INTEGER = 5,
- DISCARDED_WEIGHT_NOT_INTEGER = 6,
- DISCARDED_WEIGHT_NEGATIVE = 7,
-
- REMOVED = 8, // Obsolete: removing for max_age: 0 is done on a group basis.
- SET_REJECTED_BY_DELEGATE = 9,
- SET = 10,
- DISCARDED_PRIORITY_NEGATIVE = 11,
-
- MAX
- };
-
- static void RecordHeaderDiscardedForNoReportingService();
- static void RecordHeaderDiscardedForInvalidSSLInfo();
- static void RecordHeaderDiscardedForCertStatusError();
- static void RecordHeaderDiscardedForJsonInvalid();
- static void RecordHeaderDiscardedForJsonTooBig();
-
static void ParseHeader(ReportingContext* context,
const NetworkIsolationKey& network_isolation_key,
const GURL& url,
diff --git a/chromium/net/reporting/reporting_report.cc b/chromium/net/reporting/reporting_report.cc
index 63648cff1b9..3bc047dfcb6 100644
--- a/chromium/net/reporting/reporting_report.cc
+++ b/chromium/net/reporting/reporting_report.cc
@@ -45,16 +45,14 @@ ReportingReport::ReportingReport(
attempts(attempts) {}
ReportingReport::~ReportingReport() {
- if (outcome == Outcome::DELIVERED) {
- // |delivered| should always have a value here, since the ReportingCache
- // records the delivery time for any successful delivery.
- UMA_HISTOGRAM_LONG_TIMES_100("Net.Reporting.ReportDeliveredLatency",
- delivered.value() - queued);
- UMA_HISTOGRAM_COUNTS_100("Net.Reporting.ReportDeliveredAttempts", attempts);
- }
RecordReportOutcome(outcome);
}
+ReportingEndpointGroupKey ReportingReport::GetGroupKey() const {
+ return ReportingEndpointGroupKey(network_isolation_key,
+ url::Origin::Create(url), group);
+}
+
// static
void ReportingReport::RecordReportDiscardedForNoURLRequestContext() {
RecordReportOutcome(Outcome::DISCARDED_NO_URL_REQUEST_CONTEXT);
diff --git a/chromium/net/reporting/reporting_report.h b/chromium/net/reporting/reporting_report.h
index 02018ad25df..dee358878cc 100644
--- a/chromium/net/reporting/reporting_report.h
+++ b/chromium/net/reporting/reporting_report.h
@@ -12,6 +12,7 @@
#include "base/time/time.h"
#include "net/base/net_export.h"
#include "net/base/network_isolation_key.h"
+#include "net/reporting/reporting_endpoint.h"
#include "url/gurl.h"
namespace base {
@@ -65,6 +66,13 @@ struct NET_EXPORT ReportingReport {
// Records metrics about report outcome.
~ReportingReport();
+ // Bundles together the NIK, origin of the report URL, and group name.
+ // This is not exactly the same as the group key of the endpoint that the
+ // report will be delivered to. The origin may differ if the endpoint is
+ // configured for a superdomain of the report's origin. The NIK and group name
+ // will be the same.
+ ReportingEndpointGroupKey GetGroupKey() const;
+
static void RecordReportDiscardedForNoURLRequestContext();
static void RecordReportDiscardedForNoReportingService();
@@ -101,10 +109,6 @@ struct NET_EXPORT ReportingReport {
// relative to the time of the delivery attempt.)
base::TimeTicks queued;
- // Time when report was delivered, if it was delivered successfully.
- // The destructor assumes that this has a value if the outcome is DELIVERED.
- base::Optional<base::TimeTicks> delivered = base::nullopt;
-
// The number of delivery attempts made so far, not including an active
// attempt. (Not included in the delivered report.)
int attempts = 0;
diff --git a/chromium/net/reporting/reporting_service.cc b/chromium/net/reporting/reporting_service.cc
index 2a410343302..ca44492f9e2 100644
--- a/chromium/net/reporting/reporting_service.cc
+++ b/chromium/net/reporting/reporting_service.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/json/json_reader.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/time/tick_clock.h"
@@ -81,18 +82,14 @@ class ReportingServiceImpl : public ReportingService {
void ProcessHeader(const GURL& url,
const std::string& header_string) override {
- if (header_string.size() > kMaxJsonSize) {
- ReportingHeaderParser::RecordHeaderDiscardedForJsonTooBig();
+ if (header_string.size() > kMaxJsonSize)
return;
- }
std::unique_ptr<base::Value> header_value =
base::JSONReader::ReadDeprecated("[" + header_string + "]",
base::JSON_PARSE_RFC, kMaxJsonDepth);
- if (!header_value) {
- ReportingHeaderParser::RecordHeaderDiscardedForJsonInvalid();
+ if (!header_value)
return;
- }
DVLOG(1) << "Received Reporting policy for " << url.GetOrigin();
// TODO(chlily): Get the proper NetworkIsolationKey from the caller.
@@ -101,7 +98,7 @@ class ReportingServiceImpl : public ReportingService {
NetworkIsolationKey::Todo(), url, std::move(header_value)));
}
- void RemoveBrowsingData(int data_type_mask,
+ void RemoveBrowsingData(uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>&
origin_filter) override {
DoOrBacklogTask(base::BindOnce(&ReportingServiceImpl::DoRemoveBrowsingData,
@@ -109,7 +106,7 @@ class ReportingServiceImpl : public ReportingService {
origin_filter));
}
- void RemoveAllBrowsingData(int data_type_mask) override {
+ void RemoveAllBrowsingData(uint64_t data_type_mask) override {
DoOrBacklogTask(
base::BindOnce(&ReportingServiceImpl::DoRemoveAllBrowsingData,
base::Unretained(this), data_type_mask));
@@ -174,14 +171,14 @@ class ReportingServiceImpl : public ReportingService {
}
void DoRemoveBrowsingData(
- int data_type_mask,
+ uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>& origin_filter) {
DCHECK(initialized_);
ReportingBrowsingDataRemover::RemoveBrowsingData(
context_->cache(), data_type_mask, origin_filter);
}
- void DoRemoveAllBrowsingData(int data_type_mask) {
+ void DoRemoveAllBrowsingData(uint64_t data_type_mask) {
DCHECK(initialized_);
ReportingBrowsingDataRemover::RemoveAllBrowsingData(context_->cache(),
data_type_mask);
diff --git a/chromium/net/reporting/reporting_service.h b/chromium/net/reporting/reporting_service.h
index 233608c0b2a..e2152361002 100644
--- a/chromium/net/reporting/reporting_service.h
+++ b/chromium/net/reporting/reporting_service.h
@@ -68,12 +68,12 @@ class NET_EXPORT ReportingService {
// Removes browsing data from the Reporting system. See
// ReportingBrowsingDataRemover for more details.
virtual void RemoveBrowsingData(
- int data_type_mask,
+ uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>& origin_filter) = 0;
// Like RemoveBrowsingData except removes data for all origins without a
// filter.
- virtual void RemoveAllBrowsingData(int data_type_mask) = 0;
+ virtual void RemoveAllBrowsingData(uint64_t data_type_mask) = 0;
// Shuts down the Reporting service so that no new headers or reports are
// processed, and pending uploads are cancelled.
diff --git a/chromium/net/reporting/reporting_service_unittest.cc b/chromium/net/reporting/reporting_service_unittest.cc
index 96365b1adf0..0ac00ac44d7 100644
--- a/chromium/net/reporting/reporting_service_unittest.cc
+++ b/chromium/net/reporting/reporting_service_unittest.cc
@@ -44,11 +44,9 @@ class ReportingServiceTest : public ::testing::TestWithParam<bool>,
const std::string kGroup_ = "group";
const std::string kType_ = "type";
const ReportingEndpointGroupKey kGroupKey_ =
- ReportingEndpointGroupKey(NetworkIsolationKey::Todo(), kOrigin_, kGroup_);
+ ReportingEndpointGroupKey(NetworkIsolationKey(), kOrigin_, kGroup_);
const ReportingEndpointGroupKey kGroupKey2_ =
- ReportingEndpointGroupKey(NetworkIsolationKey::Todo(),
- kOrigin2_,
- kGroup_);
+ ReportingEndpointGroupKey(NetworkIsolationKey(), kOrigin2_, kGroup_);
ReportingServiceTest() {
if (GetParam())
diff --git a/chromium/net/reporting/reporting_test_util.cc b/chromium/net/reporting/reporting_test_util.cc
index 47ebd724330..fc3b0150182 100644
--- a/chromium/net/reporting/reporting_test_util.cc
+++ b/chromium/net/reporting/reporting_test_util.cc
@@ -334,12 +334,12 @@ void TestReportingService::ProcessHeader(const GURL& url,
}
void TestReportingService::RemoveBrowsingData(
- int data_type_mask,
+ uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>& origin_filter) {
NOTREACHED();
}
-void TestReportingService::RemoveAllBrowsingData(int data_type_mask) {
+void TestReportingService::RemoveAllBrowsingData(uint64_t data_type_mask) {
NOTREACHED();
}
diff --git a/chromium/net/reporting/reporting_test_util.h b/chromium/net/reporting/reporting_test_util.h
index 5f7c990e5f6..e58f449154a 100644
--- a/chromium/net/reporting/reporting_test_util.h
+++ b/chromium/net/reporting/reporting_test_util.h
@@ -320,10 +320,10 @@ class TestReportingService : public ReportingService {
void ProcessHeader(const GURL& url, const std::string& header_value) override;
void RemoveBrowsingData(
- int data_type_mask,
+ uint64_t data_type_mask,
const base::RepeatingCallback<bool(const GURL&)>& origin_filter) override;
- void RemoveAllBrowsingData(int data_type_mask) override;
+ void RemoveAllBrowsingData(uint64_t data_type_mask) override;
void OnShutdown() override;
diff --git a/chromium/net/reporting/reporting_uploader.cc b/chromium/net/reporting/reporting_uploader.cc
index aacec913e1f..a54bfc4fcbe 100644
--- a/chromium/net/reporting/reporting_uploader.cc
+++ b/chromium/net/reporting/reporting_uploader.cc
@@ -9,8 +9,6 @@
#include <vector>
#include "base/callback_helpers.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "net/base/elements_upload_data_stream.h"
@@ -78,27 +76,6 @@ ReportingUploader::Outcome ResponseCodeToOutcome(int response_code) {
return ReportingUploader::Outcome::FAILURE;
}
-enum class UploadOutcome {
- CANCELED_REDIRECT_TO_INSECURE_URL = 0,
- CANCELED_AUTH_REQUIRED = 1,
- CANCELED_CERTIFICATE_REQUESTED = 2,
- CANCELED_SSL_CERTIFICATE_ERROR = 3,
- CANCELED_REPORTING_SHUTDOWN = 4,
- FAILED = 5, // See Net.Reporting.UploadError for breakdown.
- SUCCEEDED_SUCCESS = 6,
- SUCCEEDED_REMOVE_ENDPOINT = 7,
- CORS_PREFLIGHT_ERROR = 8,
-
- MAX
-};
-
-void RecordUploadOutcome(UploadOutcome outcome) {
- UMA_HISTOGRAM_ENUMERATION("Net.Reporting.UploadOutcome", outcome,
- UploadOutcome::MAX);
-}
-
-// TODO: Record net and HTTP error.
-
struct PendingUpload {
enum State { CREATED, SENDING_PREFLIGHT, SENDING_PAYLOAD };
@@ -269,8 +246,6 @@ class ReportingUploaderImpl : public ReportingUploader, URLRequest::Delegate {
uploads_.erase(it);
if (net_error != OK) {
- RecordUploadOutcome(UploadOutcome::FAILED);
- base::UmaHistogramSparse("Net.Reporting.UploadError", net_error);
upload->RunCallback(ReportingUploader::Outcome::FAILURE);
return;
}
@@ -310,7 +285,6 @@ class ReportingUploaderImpl : public ReportingUploader, URLRequest::Delegate {
HasHeaderValues(request, "Access-Control-Allow-Headers",
{"content-type"});
if (!preflight_succeeded) {
- RecordUploadOutcome(UploadOutcome::CORS_PREFLIGHT_ERROR);
upload->RunCallback(ReportingUploader::Outcome::FAILURE);
return;
}
@@ -320,14 +294,6 @@ class ReportingUploaderImpl : public ReportingUploader, URLRequest::Delegate {
void HandlePayloadResponse(std::unique_ptr<PendingUpload> upload,
int response_code) {
- if (response_code >= 200 && response_code <= 299) {
- RecordUploadOutcome(UploadOutcome::SUCCEEDED_SUCCESS);
- } else if (response_code == 410) {
- RecordUploadOutcome(UploadOutcome::SUCCEEDED_REMOVE_ENDPOINT);
- } else {
- RecordUploadOutcome(UploadOutcome::FAILED);
- base::UmaHistogramSparse("Net.Reporting.UploadError", response_code);
- }
upload->RunCallback(ResponseCodeToOutcome(response_code));
}
diff --git a/chromium/net/reporting/reporting_uploader_unittest.cc b/chromium/net/reporting/reporting_uploader_unittest.cc
index 66b61e7e4ef..364585b3cfd 100644
--- a/chromium/net/reporting/reporting_uploader_unittest.cc
+++ b/chromium/net/reporting/reporting_uploader_unittest.cc
@@ -14,6 +14,7 @@
#include "base/test/scoped_feature_list.h"
#include "net/base/features.h"
#include "net/base/network_isolation_key.h"
+#include "net/cookies/cookie_inclusion_status.h"
#include "net/cookies/cookie_store.h"
#include "net/cookies/cookie_store_test_callbacks.h"
#include "net/http/http_status_code.h"
@@ -446,8 +447,7 @@ TEST_F(ReportingUploaderTest, DontSendCookies) {
server_.RegisterRequestHandler(base::BindRepeating(&ReturnResponse, HTTP_OK));
ASSERT_TRUE(server_.Start());
- ResultSavingCookieCallback<CanonicalCookie::CookieInclusionStatus>
- cookie_callback;
+ ResultSavingCookieCallback<CookieInclusionStatus> cookie_callback;
GURL url = server_.GetURL("/");
auto cookie = CanonicalCookie::Create(url, "foo=bar", base::Time::Now(),
base::nullopt /* server_time */);