summaryrefslogtreecommitdiff
path: root/test/network_tests/npdu_tests
diff options
context:
space:
mode:
Diffstat (limited to 'test/network_tests/npdu_tests')
-rw-r--r--test/network_tests/npdu_tests/conf/npdu_test_client_no_npdu.json.in39
-rw-r--r--test/network_tests/npdu_tests/conf/npdu_test_client_npdu.json.in167
-rw-r--r--test/network_tests/npdu_tests/conf/npdu_test_service_no_npdu.json.in87
-rw-r--r--test/network_tests/npdu_tests/conf/npdu_test_service_npdu.json.in168
-rw-r--r--test/network_tests/npdu_tests/npdu_test_client.cpp599
-rw-r--r--test/network_tests/npdu_tests/npdu_test_client.hpp103
-rwxr-xr-xtest/network_tests/npdu_tests/npdu_test_client_no_npdu_start.sh82
-rwxr-xr-xtest/network_tests/npdu_tests/npdu_test_client_npdu_start.sh75
-rw-r--r--test/network_tests/npdu_tests/npdu_test_globals.hpp45
-rw-r--r--test/network_tests/npdu_tests/npdu_test_rmd.cpp160
-rw-r--r--test/network_tests/npdu_tests/npdu_test_rmd.hpp45
-rw-r--r--test/network_tests/npdu_tests/npdu_test_service.cpp306
-rw-r--r--test/network_tests/npdu_tests/npdu_test_service.hpp64
-rwxr-xr-xtest/network_tests/npdu_tests/npdu_test_service_no_npdu_start.sh64
-rwxr-xr-xtest/network_tests/npdu_tests/npdu_test_service_npdu_start.sh64
-rwxr-xr-xtest/network_tests/npdu_tests/npdu_test_starter.sh96
16 files changed, 2164 insertions, 0 deletions
diff --git a/test/network_tests/npdu_tests/conf/npdu_test_client_no_npdu.json.in b/test/network_tests/npdu_tests/conf/npdu_test_client_no_npdu.json.in
new file mode 100644
index 0000000..07cfe08
--- /dev/null
+++ b/test/network_tests/npdu_tests/conf/npdu_test_client_no_npdu.json.in
@@ -0,0 +1,39 @@
+{
+ "unicast":"@TEST_IP_SLAVE@",
+ "logging":
+ {
+ "level":"info",
+ "console":"true"
+ },
+ "applications":
+ [
+ {
+ "name":"npdu_test_routing_manager_daemon_client_side",
+ "id":"0x6666"
+ },
+ {
+ "name":"npdu_test_client_one",
+ "id":"0x1111"
+ },
+ {
+ "name":"npdu_test_client_two",
+ "id":"0x2222"
+ },
+ {
+ "name":"npdu_test_client_three",
+ "id":"0x3333"
+ },
+ {
+ "name":"npdu_test_client_four",
+ "id":"0x4444"
+ }
+ ],
+ "routing":"npdu_test_routing_manager_daemon_client_side",
+ "service-discovery":
+ {
+ "enable":"true",
+ "multicast":"224.0.0.1",
+ "port":"30490",
+ "protocol":"udp"
+ }
+}
diff --git a/test/network_tests/npdu_tests/conf/npdu_test_client_npdu.json.in b/test/network_tests/npdu_tests/conf/npdu_test_client_npdu.json.in
new file mode 100644
index 0000000..dc35023
--- /dev/null
+++ b/test/network_tests/npdu_tests/conf/npdu_test_client_npdu.json.in
@@ -0,0 +1,167 @@
+{
+ "unicast":"@TEST_IP_SLAVE@",
+ "logging":
+ {
+ "level":"info",
+ "console":"true"
+ },
+ "applications":
+ [
+ {
+ "name":"npdu_test_routing_manager_daemon_client_side",
+ "id":"0x6666"
+ },
+ {
+ "name":"npdu_test_client_one",
+ "id":"0x1111"
+ },
+ {
+ "name":"npdu_test_client_two",
+ "id":"0x2222"
+ },
+ {
+ "name":"npdu_test_client_three",
+ "id":"0x3333"
+ },
+ {
+ "name":"npdu_test_client_four",
+ "id":"0x4444"
+ }
+ ],
+ "services":
+ [
+ {
+ "service":"0x1000",
+ "instance":"0x0001",
+ "unicast":"@TEST_IP_MASTER@",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times":
+ {
+ "requests" : {
+ "0x1001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x1002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x1003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x1004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ },
+ {
+ "service":"0x2000",
+ "instance":"0x0002",
+ "unicast":"@TEST_IP_MASTER@",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times" : {
+ "requests" : {
+ "0x2001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x2002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x2003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x2004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ },
+ {
+ "service":"0x3000",
+ "instance":"0x0003",
+ "unicast":"@TEST_IP_MASTER@",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times" : {
+ "requests" : {
+ "0x3001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x3002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x3003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x3004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ },
+ {
+ "service":"0x4000",
+ "instance":"0x0004",
+ "unicast":"@TEST_IP_MASTER@",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times": {
+ "requests" : {
+ "0x4001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x4002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x4003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x4004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ }
+ ],
+ "routing":"npdu_test_routing_manager_daemon_client_side",
+ "service-discovery":
+ {
+ "enable":"true",
+ "multicast":"224.0.0.1",
+ "port":"30490",
+ "protocol":"udp"
+ }
+}
diff --git a/test/network_tests/npdu_tests/conf/npdu_test_service_no_npdu.json.in b/test/network_tests/npdu_tests/conf/npdu_test_service_no_npdu.json.in
new file mode 100644
index 0000000..b4c8eaa
--- /dev/null
+++ b/test/network_tests/npdu_tests/conf/npdu_test_service_no_npdu.json.in
@@ -0,0 +1,87 @@
+{
+ "unicast":"@TEST_IP_MASTER@",
+ "logging":
+ {
+ "level":"info",
+ "console":"true"
+ },
+ "applications":
+ [
+ {
+ "name":"npdu_test_routing_manager_daemon_service_side",
+ "id":"0x6667"
+ },
+ {
+ "name":"npdu_test_service_one",
+ "id":"0x1000"
+ },
+ {
+ "name":"npdu_test_service_two",
+ "id":"0x2000"
+ },
+ {
+ "name":"npdu_test_service_three",
+ "id":"0x3000"
+ },
+ {
+ "name":"npdu_test_service_four",
+ "id":"0x4000"
+ }
+ ],
+ "services":
+ [
+ {
+ "service":"0x6667",
+ "instance":"0x6666",
+ "unreliable":"60666"
+ },
+ {
+ "service":"0x1000",
+ "instance":"0x0001",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ }
+ },
+ {
+ "service":"0x2000",
+ "instance":"0x0002",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ }
+ },
+ {
+ "service":"0x3000",
+ "instance":"0x0003",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ }
+ },
+ {
+ "service":"0x4000",
+ "instance":"0x0004",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ }
+ }
+ ],
+ "routing":"npdu_test_routing_manager_daemon_service_side",
+ "service-discovery":
+ {
+ "enable":"true",
+ "multicast":"224.0.0.1",
+ "port":"30490",
+ "protocol":"udp"
+ }
+}
diff --git a/test/network_tests/npdu_tests/conf/npdu_test_service_npdu.json.in b/test/network_tests/npdu_tests/conf/npdu_test_service_npdu.json.in
new file mode 100644
index 0000000..0de75cf
--- /dev/null
+++ b/test/network_tests/npdu_tests/conf/npdu_test_service_npdu.json.in
@@ -0,0 +1,168 @@
+{
+ "unicast":"@TEST_IP_MASTER@",
+ "logging":
+ {
+ "level":"info",
+ "console":"true"
+ },
+ "applications":
+ [
+ {
+ "name":"npdu_test_routing_manager_daemon_service_side",
+ "id":"0x6667"
+ },
+ {
+ "name":"npdu_test_service_one",
+ "id":"0x1000"
+ },
+ {
+ "name":"npdu_test_service_two",
+ "id":"0x2000"
+ },
+ {
+ "name":"npdu_test_service_three",
+ "id":"0x3000"
+ },
+ {
+ "name":"npdu_test_service_four",
+ "id":"0x4000"
+ }
+ ],
+ "services":
+ [
+ {
+ "service":"0x6667",
+ "instance":"0x6666",
+ "unreliable":"60666"
+ },
+ {
+ "service":"0x1000",
+ "instance":"0x0001",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times":
+ {
+ "responses" : {
+ "0x1001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x1002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x1003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x1004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ },
+ {
+ "service":"0x2000",
+ "instance":"0x0002",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times" : {
+ "responses" : {
+ "0x2001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x2002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x2003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x2004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ },
+ {
+ "service":"0x3000",
+ "instance":"0x0003",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times" : {
+ "responses" : {
+ "0x3001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x3002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x3003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x3004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ },
+ {
+ "service":"0x4000",
+ "instance":"0x0004",
+ "unreliable":"30509",
+ "reliable":
+ {
+ "port":"30510",
+ "enable-magic-cookies":"false"
+ },
+ "debounce-times": {
+ "responses" : {
+ "0x4001" : {
+ "debounce-time" : "10",
+ "maximum-retention-time" : "100"
+ },
+ "0x4002" : {
+ "debounce-time" : "20",
+ "maximum-retention-time" : "200"
+ },
+ "0x4003" : {
+ "debounce-time" : "30",
+ "maximum-retention-time" : "300"
+ },
+ "0x4004" : {
+ "debounce-time" : "40",
+ "maximum-retention-time" : "400"
+ }
+ }
+ }
+ }
+ ],
+ "routing":"npdu_test_routing_manager_daemon_service_side",
+ "service-discovery":
+ {
+ "enable":"true",
+ "multicast":"224.0.0.1",
+ "port":"30490",
+ "protocol":"udp"
+ }
+}
diff --git a/test/network_tests/npdu_tests/npdu_test_client.cpp b/test/network_tests/npdu_tests/npdu_test_client.cpp
new file mode 100644
index 0000000..d89d4f4
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_client.cpp
@@ -0,0 +1,599 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#include "../npdu_tests/npdu_test_client.hpp"
+
+#include <vsomeip/internal/logger.hpp>
+#include "../../implementation/configuration/include/configuration.hpp"
+#include "../../implementation/configuration/include/configuration_impl.hpp"
+#include "../../implementation/configuration/include/configuration_plugin.hpp"
+#include "../../implementation/plugin/include/plugin_manager_impl.hpp"
+
+enum class payloadsize
+ : std::uint8_t
+ {
+ UDS, TCP, UDP
+};
+
+// this variables are changed via cmdline parameters
+static bool use_tcp = false;
+static bool call_service_sync = true;
+static bool wait_for_replies = true;
+static std::uint32_t sliding_window_size = vsomeip_test::NUMBER_OF_MESSAGES_TO_SEND;
+static payloadsize max_payload_size = payloadsize::UDS;
+static bool shutdown_service_at_end = true;
+
+npdu_test_client::npdu_test_client(
+ bool _use_tcp,
+ bool _call_service_sync,
+ std::uint32_t _sliding_window_size,
+ bool _wait_for_replies,
+ std::array<std::array<std::chrono::milliseconds, 4>, 4> _applicative_debounce) :
+ app_(vsomeip::runtime::get()->create_application()),
+ request_(vsomeip::runtime::get()->create_request(_use_tcp)),
+ call_service_sync_(_call_service_sync),
+ wait_for_replies_(_wait_for_replies),
+ sliding_window_size_(_sliding_window_size),
+ blocked_({false}),
+ is_available_({false}), // will set first element to false, rest to 0
+ number_of_messages_to_send_(vsomeip_test::NUMBER_OF_MESSAGES_TO_SEND),
+ number_of_sent_messages_{0,0,0,0},
+ number_of_acknowledged_messages_{{{0,0,0,0},{0,0,0,0},{0,0,0,0},{0,0,0,0}}},
+ current_payload_size_({0}),
+ all_msg_acknowledged_({false, false, false, false}),
+ acknowledgements_{0,0,0,0},
+ applicative_debounce_(_applicative_debounce),
+ finished_waiter_(&npdu_test_client::wait_for_all_senders, this)
+{
+ senders_[0] = std::thread(&npdu_test_client::run<0>, this);
+ senders_[1] = std::thread(&npdu_test_client::run<1>, this);
+ senders_[2] = std::thread(&npdu_test_client::run<2>, this);
+ senders_[3] = std::thread(&npdu_test_client::run<3>, this);
+}
+
+npdu_test_client::~npdu_test_client() {
+ finished_waiter_.join();
+}
+
+void npdu_test_client::init()
+{
+ app_->init();
+
+ app_->register_state_handler(
+ std::bind(&npdu_test_client::on_state, this,
+ std::placeholders::_1));
+
+ register_availability_handler<0>();
+ register_availability_handler<1>();
+ register_availability_handler<2>();
+ register_availability_handler<3>();
+
+ register_message_handler_for_all_service_methods<0>();
+ register_message_handler_for_all_service_methods<1>();
+ register_message_handler_for_all_service_methods<2>();
+ register_message_handler_for_all_service_methods<3>();
+
+ request_->set_service(vsomeip_test::TEST_SERVICE_SERVICE_ID);
+ request_->set_instance(vsomeip_test::TEST_SERVICE_INSTANCE_ID);
+ if(!wait_for_replies_)
+ request_->set_message_type(vsomeip::message_type_e::MT_REQUEST_NO_RETURN);
+}
+
+template<int service_idx>
+void npdu_test_client::register_availability_handler() {
+ app_->register_availability_handler(npdu_test::service_ids[service_idx],
+ npdu_test::instance_ids[service_idx],
+ std::bind(
+ &npdu_test_client::on_availability<service_idx>,
+ this, std::placeholders::_1, std::placeholders::_2,
+ std::placeholders::_3));
+}
+
+template<int service_idx>
+void npdu_test_client::register_message_handler_for_all_service_methods() {
+ register_message_handler<service_idx, 0>();
+ register_message_handler<service_idx, 1>();
+ register_message_handler<service_idx, 2>();
+ register_message_handler<service_idx, 3>();
+}
+
+template<int service_idx, int method_idx>
+void npdu_test_client::register_message_handler() {
+ app_->register_message_handler(npdu_test::service_ids[service_idx],
+ npdu_test::instance_ids[service_idx],
+ npdu_test::method_ids[service_idx][method_idx],
+ std::bind(
+ &npdu_test_client::on_message<service_idx, method_idx>,
+ this, std::placeholders::_1));
+}
+
+void npdu_test_client::start()
+{
+ VSOMEIP_INFO << "Starting...";
+ app_->start();
+}
+
+void npdu_test_client::stop()
+{
+ VSOMEIP_INFO << "Stopping...";
+
+ app_->unregister_state_handler();
+
+ for (unsigned int i = 0; i< npdu_test::service_ids.size(); i++) {
+ app_->unregister_availability_handler(npdu_test::service_ids[i],
+ npdu_test::instance_ids[i]);
+
+ for(unsigned int j = 0; j < npdu_test::method_ids[i].size(); j++) {
+ app_->unregister_message_handler(npdu_test::service_ids[i],
+ npdu_test::instance_ids[i],
+ npdu_test::method_ids[i][j]);
+ }
+ }
+
+ if(shutdown_service_at_end) {
+ // notify the routing manager daemon that were finished
+ request_->set_service(npdu_test::RMD_SERVICE_ID_CLIENT_SIDE);
+ request_->set_instance(npdu_test::RMD_INSTANCE_ID);
+ request_->set_method(npdu_test::RMD_SHUTDOWN_METHOD_ID);
+ request_->set_payload(vsomeip::runtime::get()->create_payload());
+ request_->set_message_type(vsomeip::message_type_e::MT_REQUEST_NO_RETURN);
+ app_->send(request_);
+ // sleep otherwise the app will shutdown before the message reaches the rmd
+ std::this_thread::sleep_for(std::chrono::seconds(5));
+ }
+ app_->stop();
+}
+
+void npdu_test_client::join_sender_thread() {
+ for (auto& t : senders_) {
+ t.join();
+ }
+}
+
+void npdu_test_client::on_state(vsomeip::state_type_e _state)
+{
+ if(_state == vsomeip::state_type_e::ST_REGISTERED)
+ {
+ for (unsigned int i = 0; i< npdu_test::service_ids.size(); i++) {
+ app_->request_service(npdu_test::service_ids[i],
+ npdu_test::instance_ids[i]);
+ }
+ }
+}
+
+template<int service_idx>
+void npdu_test_client::on_availability(vsomeip::service_t _service,
+ vsomeip::instance_t _instance, bool _is_available)
+{
+ VSOMEIP_INFO<< "Service [" << std::setw(4) << std::setfill('0') << std::hex
+ << _service << "." << std::setw(4) << std::setfill('0') << _instance << "] is "
+ << (_is_available ? "available." : "NOT available.");
+ if(npdu_test::service_ids[service_idx] == _service
+ && npdu_test::instance_ids[service_idx] == _instance) {
+ if(is_available_[service_idx] && !_is_available)
+ {
+ is_available_[service_idx] = false;
+ }
+ else if(_is_available && !is_available_[service_idx])
+ {
+ is_available_[service_idx] = true;
+ send<service_idx>();
+ }
+ }
+}
+
+template<int service_idx, int method_idx>
+void npdu_test_client::on_message(const std::shared_ptr<vsomeip::message>& _response) {
+ (void)_response;
+ //TODO make sure the replies were sent within demanded debounce times
+ VSOMEIP_DEBUG << "Received reply from:" << std::setw(4) << std::setfill('0')
+ << std::hex << npdu_test::service_ids[service_idx] << ":"
+ << std::setw(4) << std::setfill('0') << std::hex
+ << npdu_test::instance_ids[service_idx] << ":" << std::setw(4)
+ << std::setfill('0') << std::hex
+ << npdu_test::method_ids[service_idx][method_idx];
+
+ if(call_service_sync_)
+ {
+ // We notify the sender thread every time a message was acknowledged
+ std::lock_guard<std::mutex> lk(all_msg_acknowledged_mutexes_[service_idx][method_idx]);
+ all_msg_acknowledged_[service_idx][method_idx] = true;
+ all_msg_acknowledged_cvs_[service_idx][method_idx].notify_one();
+ }
+ else
+ {
+
+ std::lock_guard<std::mutex> its_lock(number_of_acknowledged_messages_mutexes_[service_idx][method_idx]);
+ number_of_acknowledged_messages_[service_idx][method_idx]++;
+
+ // We notify the sender thread only if all sent messages have been acknowledged
+ if(number_of_acknowledged_messages_[service_idx][method_idx] == number_of_messages_to_send_)
+ {
+ std::lock_guard<std::mutex> lk(all_msg_acknowledged_mutexes_[service_idx][method_idx]);
+ // reset
+ number_of_acknowledged_messages_[service_idx][method_idx] = 0;
+ all_msg_acknowledged_[service_idx][method_idx] = true;
+ all_msg_acknowledged_cvs_[service_idx][method_idx].notify_one();
+ } else if(number_of_acknowledged_messages_[service_idx][method_idx] % sliding_window_size == 0)
+ {
+ std::lock_guard<std::mutex> lk(all_msg_acknowledged_mutexes_[service_idx][method_idx]);
+ all_msg_acknowledged_[service_idx][method_idx] = true;
+ all_msg_acknowledged_cvs_[service_idx][method_idx].notify_one();
+ }
+ }
+}
+
+template<int service_idx>
+void npdu_test_client::send()
+{
+ std::lock_guard<std::mutex> its_lock(mutexes_[service_idx]);
+ blocked_[service_idx] = true;
+ conditions_[service_idx].notify_one();
+}
+
+template<int service_idx>
+void npdu_test_client::run()
+{
+ std::unique_lock<std::mutex> its_lock(mutexes_[service_idx]);
+ while (!blocked_[service_idx])
+ {
+ conditions_[service_idx].wait(its_lock);
+ }
+ current_payload_size_[service_idx] = 1;
+
+ std::uint32_t max_allowed_payload = get_max_allowed_payload();
+
+ for (int var = 0; var < 4; ++var) {
+ payloads_[service_idx][var] = vsomeip::runtime::get()->create_payload();
+ payload_data_[service_idx][var] = std::vector<vsomeip::byte_t>();
+ }
+
+ bool lastrun = false;
+ while (current_payload_size_[service_idx] <= max_allowed_payload)
+ {
+ // prepare the payloads w/ current payloadsize
+ for (int var = 0; var < 4; ++var) {
+ // assign 0x11 to first, 0x22 to second...
+ payload_data_[service_idx][var].assign(
+ current_payload_size_[service_idx], static_cast<vsomeip::byte_t>(0x11 * (var + 1)));
+ payloads_[service_idx][var]->set_data(payload_data_[service_idx][var]);
+ }
+
+ // send the payloads to the service's methods
+ if(wait_for_replies_) {
+ call_service_sync_ ? send_messages_sync<service_idx>() : send_messages_async<service_idx>();
+ } else {
+ send_messages_and_dont_wait_for_reply<service_idx>();
+ }
+
+ // Increase array size for next iteration
+ current_payload_size_[service_idx] *= 2;
+
+ //special case to test the biggest payload possible as last test
+ // 16 Bytes are reserved for the SOME/IP header
+ if(current_payload_size_[service_idx] > max_allowed_payload - 16 && !lastrun)
+ {
+ current_payload_size_[service_idx] = max_allowed_payload - 16;
+ lastrun = true;
+ }
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ }
+ blocked_[service_idx] = false;
+
+ {
+ std::lock_guard<std::mutex> its_lock(finished_mutex_);
+ finished_[service_idx] = true;
+ }
+}
+
+
+std::uint32_t npdu_test_client::get_max_allowed_payload()
+{
+ std::uint32_t payload;
+ switch (max_payload_size)
+ {
+ case payloadsize::UDS:
+ payload = VSOMEIP_MAX_LOCAL_MESSAGE_SIZE;
+ break;
+ case payloadsize::TCP:
+ payload = 4095;
+ break;
+ case payloadsize::UDP:
+ payload = VSOMEIP_MAX_UDP_MESSAGE_SIZE;
+ break;
+ default:
+ payload = VSOMEIP_MAX_LOCAL_MESSAGE_SIZE;
+ break;
+ }
+ return payload;
+}
+
+template<int service_idx>
+void npdu_test_client::send_messages_sync()
+{
+ std::thread t0 = start_send_thread_sync<service_idx, 0>();
+ std::thread t1 = start_send_thread_sync<service_idx, 1>();
+ std::thread t2 = start_send_thread_sync<service_idx, 2>();
+ std::thread t3 = start_send_thread_sync<service_idx, 3>();
+ t0.join();
+ t1.join();
+ t2.join();
+ t3.join();
+}
+
+template<int service_idx, int method_idx>
+std::thread npdu_test_client::start_send_thread_sync() {
+ return std::thread([&]() {
+ all_msg_acknowledged_unique_locks_[service_idx][method_idx] =
+ std::unique_lock<std::mutex>
+ (all_msg_acknowledged_mutexes_[service_idx][method_idx]);
+
+ std::shared_ptr<vsomeip::message> request = vsomeip::runtime::get()->create_request(use_tcp);
+ request->set_service(npdu_test::service_ids[service_idx]);
+ request->set_instance(npdu_test::instance_ids[service_idx]);
+ request->set_method(npdu_test::method_ids[service_idx][method_idx]);
+ request->set_payload(payloads_[service_idx][method_idx]);
+ for (std::uint32_t i = 0; i < number_of_messages_to_send_; i++)
+ {
+ all_msg_acknowledged_[service_idx][method_idx] = false;
+ app_->send(request);
+
+ std::chrono::high_resolution_clock::time_point sent =
+ std::chrono::high_resolution_clock::now();
+
+ while(!all_msg_acknowledged_[service_idx][method_idx]) {
+ all_msg_acknowledged_cvs_[service_idx][method_idx].wait(
+ all_msg_acknowledged_unique_locks_[service_idx][method_idx]);
+ }
+
+ std::chrono::nanoseconds waited_for_response =
+ std::chrono::high_resolution_clock::now() - sent;
+ if(waited_for_response < applicative_debounce_[service_idx][method_idx]) {
+ // make sure we don't send faster than debounce time + max retention time
+ std::this_thread::sleep_for(
+ applicative_debounce_[service_idx][method_idx]
+ - waited_for_response);
+ }
+ }
+ all_msg_acknowledged_unique_locks_[service_idx][method_idx].unlock();
+ });
+}
+
+template<int service_idx>
+void npdu_test_client::send_messages_async()
+{
+ std::thread t0 = start_send_thread_async<service_idx, 0>();
+ std::thread t1 = start_send_thread_async<service_idx, 1>();
+ std::thread t2 = start_send_thread_async<service_idx, 2>();
+ std::thread t3 = start_send_thread_async<service_idx, 3>();
+ t0.join();
+ t1.join();
+ t2.join();
+ t3.join();
+}
+
+template<int service_idx, int method_idx>
+std::thread npdu_test_client::start_send_thread_async() {
+ return std::thread([&]() {
+ all_msg_acknowledged_unique_locks_[service_idx][method_idx] =
+ std::unique_lock<std::mutex>
+ (all_msg_acknowledged_mutexes_[service_idx][method_idx]);
+ std::shared_ptr<vsomeip::message> request = vsomeip::runtime::get()->create_request(use_tcp);
+ request->set_service(npdu_test::service_ids[service_idx]);
+ request->set_instance(npdu_test::instance_ids[service_idx]);
+ request->set_method(npdu_test::method_ids[service_idx][method_idx]);
+ request->set_payload(payloads_[service_idx][method_idx]);
+ for (std::uint32_t i = 0; i < number_of_messages_to_send_; i++)
+ {
+ app_->send(request);
+
+ if((i+1) == number_of_messages_to_send_ || (i+1) % sliding_window_size == 0) {
+ // wait until all send messages have been acknowledged
+ // as long we wait lk is released; after wait returns lk is reacquired
+ while(!all_msg_acknowledged_[service_idx][method_idx]) {
+ all_msg_acknowledged_cvs_[service_idx][method_idx].wait(
+ all_msg_acknowledged_unique_locks_[service_idx][method_idx]);
+ }
+ // Reset condition variable
+ all_msg_acknowledged_[service_idx][method_idx] = false;
+ }
+ // make sure we don't send faster than debounce time + max retention time
+ std::this_thread::sleep_for(applicative_debounce_[service_idx][method_idx]);
+ }
+ all_msg_acknowledged_unique_locks_[service_idx][method_idx].unlock();
+ });
+}
+
+template<int service_idx>
+void npdu_test_client::send_messages_and_dont_wait_for_reply()
+{
+ std::thread t0 = start_send_thread<service_idx, 0>();
+ std::thread t1 = start_send_thread<service_idx, 1>();
+ std::thread t2 = start_send_thread<service_idx, 2>();
+ std::thread t3 = start_send_thread<service_idx, 3>();
+ t0.join();
+ t1.join();
+ t2.join();
+ t3.join();
+}
+
+template<int service_idx, int method_idx>
+std::thread npdu_test_client::start_send_thread() {
+ return std::thread([&]() {
+ std::shared_ptr<vsomeip::message> request = vsomeip::runtime::get()->create_request(use_tcp);
+ request->set_service(npdu_test::service_ids[service_idx]);
+ request->set_instance(npdu_test::instance_ids[service_idx]);
+ request->set_message_type(vsomeip::message_type_e::MT_REQUEST_NO_RETURN);
+ request->set_method(npdu_test::method_ids[service_idx][method_idx]);
+ request->set_payload(payloads_[service_idx][method_idx]);
+ for (std::uint32_t i = 0; i < number_of_messages_to_send_; i++)
+ {
+ app_->send(request);
+ // make sure we don't send faster than debounce time + max retention time
+ std::this_thread::sleep_for(applicative_debounce_[service_idx][method_idx]);
+ }
+ });
+}
+
+void npdu_test_client::wait_for_all_senders() {
+ bool all_finished(false);
+ while (!all_finished) {
+ {
+ std::lock_guard<std::mutex> its_lock(finished_mutex_);
+ if (std::all_of(finished_.begin(), finished_.end(), [](bool i) { return i; })) {
+ all_finished = true;
+ }
+ }
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ }
+ join_sender_thread();
+
+ if (!wait_for_replies_ || !call_service_sync_) {
+ // sleep longer here as sending is asynchronously and it's necessary
+ // to wait until all messages have left the application
+ VSOMEIP_INFO << "Sleeping for 180sec since the client is running "
+ "in --dont-wait-for-replies or --async mode. "
+ "Otherwise it might be possible that not all messages leave the "
+ "application.";
+ for(int i = 0; i < 180; i++) {
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ std::cout << ".";
+ std::cout.flush();
+ }
+ } else {
+ std::this_thread::sleep_for(std::chrono::seconds(5));
+ }
+ stop();
+}
+
+TEST(someip_npdu_test, send_different_payloads)
+{
+ // get the configuration
+ std::shared_ptr<vsomeip::configuration> its_configuration;
+ auto its_plugin = vsomeip::plugin_manager::get()->get_plugin(
+ vsomeip::plugin_type_e::CONFIGURATION_PLUGIN, VSOMEIP_CFG_LIBRARY);
+ if (its_plugin) {
+ auto its_config_plugin = std::dynamic_pointer_cast<vsomeip::configuration_plugin>(its_plugin);
+ if (its_config_plugin) {
+ its_configuration = its_config_plugin->get_configuration("","");
+ }
+ }
+ if (!its_configuration) {
+ ADD_FAILURE() << "No configuration object. "
+ "Either memory overflow or loading error detected!";
+ return;
+ }
+
+ // used to store the debounce times
+ std::array<std::array<std::chrono::milliseconds, 4>, 4> applicative_debounce;
+
+ // query the debouncetimes from the configuration. We want to know the
+ // debounce times which the _clients_ of this service have to comply with
+ // when they send requests to this service.
+ // This is necessary as we must ensure a applicative debouncing greater than
+ // debounce time + maximum retention time. Therefore the send threads sleep
+ // for this amount of time after sending a message.
+ for(int service_id = 0; service_id < 4; service_id++) {
+ for(int method_id = 0; method_id < 4; method_id++) {
+ std::chrono::nanoseconds debounce(0), retention(0);
+ its_configuration->get_configured_timing_requests(
+ npdu_test::service_ids[service_id],
+ its_configuration->get_unicast_address(npdu_test::service_ids[service_id],
+ npdu_test::instance_ids[service_id]),
+ its_configuration->get_unreliable_port(
+ npdu_test::service_ids[service_id],
+ npdu_test::instance_ids[service_id]),
+ npdu_test::method_ids[service_id][method_id],
+ &debounce, &retention);
+ if (debounce == std::chrono::nanoseconds(VSOMEIP_DEFAULT_NPDU_DEBOUNCING_NANO) &&
+ retention == std::chrono::nanoseconds(VSOMEIP_DEFAULT_NPDU_MAXIMUM_RETENTION_NANO)) {
+ // no timings specified don't don't sleep after sending...
+ applicative_debounce[service_id][method_id] =
+ std::chrono::milliseconds(0);
+ } else {
+ // we add 1 milliseconds to sleep a little bit longer
+ applicative_debounce[service_id][method_id] = std::chrono::duration_cast<
+ std::chrono::milliseconds>(debounce + retention)
+ + std::chrono::milliseconds(1);
+
+ }
+
+ }
+ }
+
+ npdu_test_client test_client_(use_tcp, call_service_sync,
+ sliding_window_size, wait_for_replies,
+ applicative_debounce);
+ test_client_.init();
+ test_client_.start();
+}
+
+
+#if defined(__linux__) || defined(ANDROID)
+int main(int argc, char** argv)
+{
+ std::string tcp_enable("--TCP");
+ std::string udp_enable("--UDP");
+ std::string sync_enable("--sync");
+ std::string async_enable("--async");
+ std::string no_reply_enable("--dont-wait-for-replies");
+ std::string sliding_window_size_param("--sliding-window-size");
+ std::string max_payload_size_param("--max-payload-size");
+ std::string shutdown_service_disable_param("--dont-shutdown-service");
+ std::string help("--help");
+
+ int i = 1;
+ while (i < argc)
+ {
+ if (tcp_enable == argv[i]) {
+ use_tcp = true;
+ } else if (udp_enable == argv[i]) {
+ use_tcp = false;
+ } else if (sync_enable == argv[i]) {
+ call_service_sync = true;
+ } else if (async_enable == argv[i]) {
+ call_service_sync = false;
+ } else if (no_reply_enable == argv[i]) {
+ wait_for_replies = false;
+ } else if (sliding_window_size_param == argv[i] && i + 1 < argc) {
+ i++;
+ std::stringstream converter(argv[i]);
+ converter >> sliding_window_size;
+ } else if (max_payload_size_param == argv[i] && i + 1 < argc) {
+ i++;
+ if (std::string("UDS") == argv[i]) {
+ max_payload_size = payloadsize::UDS;
+ } else if (std::string("TCP") == argv[i]) {
+ max_payload_size = payloadsize::TCP;
+ } else if (std::string("UDP") == argv[i]) {
+ max_payload_size = payloadsize::UDP;
+ }
+ } else if (shutdown_service_disable_param == argv[i]) {
+ shutdown_service_at_end = false;
+ } else if (help == argv[i]) {
+ VSOMEIP_INFO << "Parameters:\n"
+ << "--TCP: Send messages via TCP\n"
+ << "--UDP: Send messages via UDP (default)\n"
+ << "--sync: Wait for acknowledge before sending next message (default)\n"
+ << "--async: Send multiple messages w/o waiting for"
+ " acknowledge of service\n"
+ << "--dont-wait-for-replies: Just send out the messages w/o waiting for "
+ "a reply by the service (use REQUEST_NO_RETURN message type)\n"
+ << "--sliding-window-size: Number of messages to send before waiting "
+ "for acknowledge of service. Default: " << sliding_window_size << "\n"
+ << "--max-payload-size: limit the maximum payloadsize of send requests. One of {"
+ "UDS (=" << VSOMEIP_MAX_LOCAL_MESSAGE_SIZE << "byte), "
+ "UDP (=" << VSOMEIP_MAX_UDP_MESSAGE_SIZE << "byte), "
+ "TCP (=" << VSOMEIP_MAX_TCP_MESSAGE_SIZE << "byte)}, default: UDS\n"
+ << "--dont-shutdown-service: Don't shutdown the service upon "
+ "finishing of the payload test\n"
+ << "--help: print this help";
+ }
+ i++;
+ }
+
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+#endif
diff --git a/test/network_tests/npdu_tests/npdu_test_client.hpp b/test/network_tests/npdu_tests/npdu_test_client.hpp
new file mode 100644
index 0000000..980c16a
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_client.hpp
@@ -0,0 +1,103 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef PAYLOADTESTCLIENT_HPP_
+#define NPDUTESTCLIENT_HPP_
+
+#include <gtest/gtest.h>
+
+#include <vsomeip/vsomeip.hpp>
+
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <functional>
+#include <map>
+
+#include "../npdu_tests/npdu_test_globals.hpp"
+#include "../someip_test_globals.hpp"
+
+class npdu_test_client
+{
+public:
+ npdu_test_client(bool _use_tcp, bool _call_service_sync,
+ std::uint32_t _sliding_window_size,
+ bool _wait_for_replies,
+ std::array<std::array<std::chrono::milliseconds, 4>, 4> _applicative_debounce);
+ ~npdu_test_client();
+ void init();
+ void start();
+ void stop();
+ void join_sender_thread();
+ void on_state(vsomeip::state_type_e _state);
+ template<int service_idx> void on_availability(vsomeip::service_t _service,
+ vsomeip::instance_t _instance,
+ bool _is_available);
+ template<int service_idx, int method_idx> void on_message(
+ const std::shared_ptr<vsomeip::message> &_response);
+ template<int service_idx> void send();
+ template<int service_idx> void run();
+
+private:
+ template<int service_idx> void send_messages_sync();
+ template<int service_idx, int method_idx> std::thread start_send_thread_sync();
+ template<int service_idx> void send_messages_async();
+ template<int service_idx, int method_idx> std::thread start_send_thread_async();
+ template<int service_idx> void send_messages_and_dont_wait_for_reply();
+ std::uint32_t get_max_allowed_payload();
+ template<int service_idx> void register_availability_handler();
+ template<int service_idx> void register_message_handler_for_all_service_methods();
+ template<int service_idx, int method_idx> void register_message_handler();
+ template<int service_idx, int method_idx>
+ std::thread start_send_thread();
+ void wait_for_all_senders();
+
+private:
+ std::shared_ptr<vsomeip::application> app_;
+ std::shared_ptr<vsomeip::message> request_;
+ bool call_service_sync_;
+ bool wait_for_replies_;
+ std::uint32_t sliding_window_size_;
+
+ std::array<std::mutex, npdu_test::service_ids.size()> mutexes_;
+ std::array<std::condition_variable, npdu_test::service_ids.size()> conditions_;
+ std::array<bool, npdu_test::service_ids.size()> blocked_;
+ std::array<bool, npdu_test::service_ids.size()> is_available_;
+ const std::uint32_t number_of_messages_to_send_;
+ std::uint32_t number_of_sent_messages_[npdu_test::service_ids.size()];
+ std::array<std::array<std::uint32_t, npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> number_of_acknowledged_messages_;
+ std::array<std::array<std::mutex, npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> number_of_acknowledged_messages_mutexes_;
+
+ std::array<std::uint32_t, npdu_test::service_ids.size()> current_payload_size_;
+
+ std::array<std::array<bool, npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> all_msg_acknowledged_;
+ std::array<std::array<std::mutex, npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> all_msg_acknowledged_mutexes_;
+ std::array<std::array<std::unique_lock<std::mutex>, npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> all_msg_acknowledged_unique_locks_;
+ std::array<
+ std::array<std::condition_variable,
+ npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> all_msg_acknowledged_cvs_;
+ std::array<std::uint32_t, 4> acknowledgements_;
+ std::array<std::array<std::chrono::milliseconds, 4>, 4> applicative_debounce_;
+ std::array<
+ std::array<std::shared_ptr<vsomeip::payload>,
+ npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> payloads_;
+ std::array<
+ std::array<std::vector<vsomeip::byte_t>,
+ npdu_test::method_ids[0].size()>,
+ npdu_test::service_ids.size()> payload_data_;
+ std::array<std::thread, npdu_test::service_ids.size()> senders_;
+ std::mutex finished_mutex_;
+ std::array<bool, npdu_test::service_ids.size()> finished_;
+ std::thread finished_waiter_;
+};
+
+#endif /* NPDUTESTCLIENT_HPP_ */
diff --git a/test/network_tests/npdu_tests/npdu_test_client_no_npdu_start.sh b/test/network_tests/npdu_tests/npdu_test_client_no_npdu_start.sh
new file mode 100755
index 0000000..bc84421
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_client_no_npdu_start.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+# Copyright (C) 2015 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Purpose: This script is needed to start the routing manager daemon and the
+# clients with one command. This is necessary as ctest - which is used to run
+# the tests - isn't able to start multiple binaries for one testcase. Therefore
+# the testcase simply executes this script. This script then runs the routing
+# manager daemon and the clients and checks if all of them exit successfully.
+
+FAIL=0
+
+if [ $# -lt 2 ]; then
+ echo "Error: Please pass a protocol and communication mode to this script."
+ echo "Valid protocols are [UDP,TCP]."
+ echo "Valid communication modes are [sync, async]."
+ echo "For example $> $0 UDP sync"
+ exit 1;
+fi
+
+FAIL=0
+PROTOCOL=$1
+COMMUNICATION_MODE=$2
+
+start_clients(){
+ export VSOMEIP_CONFIGURATION=npdu_test_client_no_npdu.json
+
+ # Start the routing manager daemon
+ export VSOMEIP_APPLICATION_NAME=npdu_test_routing_manager_daemon_client_side
+ ./npdu_test_rmd_client_side &
+
+ # sleep 1 second to let the RMD startup.
+ sleep 1
+ # Start client 1
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_one
+ ./npdu_test_client_1 $* &
+
+ # Start client 2
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_two
+ ./npdu_test_client_2 $* &
+
+ # Start client 3
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_three
+ ./npdu_test_client_3 $* &
+
+ # Start client 4
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_four
+ ./npdu_test_client_4 $* &
+}
+
+wait_for_bg_processes(){
+ # Wait until client and service are finished
+ for job in $(jobs -p)
+ do
+ # Fail gets incremented if one of the jobs exit
+ # with a non-zero exit code
+ wait $job || ((FAIL+=1))
+ done
+
+ # Check if everything exited successfully
+ if [ $FAIL -eq 0 ]
+ then
+ echo "All clients exited successfully"
+ else
+ echo "Something went wrong"
+ exit 1
+ fi
+}
+
+if [ $# -eq 0 ]
+then
+ echo "Error: Please pass a mode to this script: UDP or TCP."
+ echo "For example $> $0 UDP"
+fi
+
+echo "Contacting services via $PROTOCOL"
+start_clients --$PROTOCOL --max-payload-size $PROTOCOL --$COMMUNICATION_MODE
+wait_for_bg_processes
+
+exit 0
diff --git a/test/network_tests/npdu_tests/npdu_test_client_npdu_start.sh b/test/network_tests/npdu_tests/npdu_test_client_npdu_start.sh
new file mode 100755
index 0000000..70b6c53
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_client_npdu_start.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Purpose: This script is needed to start the routing manager daemon and the
+# clients with one command. This is necessary as ctest - which is used to run
+# the tests - isn't able to start multiple binaries for one testcase. Therefore
+# the testcase simply executes this script. This script then runs the routing
+# manager daemon and the clients and checks if all of them exit successfully.
+
+if [ $# -lt 2 ]; then
+ echo "Error: Please pass a protocol and communication mode to this script."
+ echo "Valid protocols are [UDP,TCP]."
+ echo "Valid communication modes are [sync, async]."
+ echo "For example $> $0 UDP sync"
+ exit 1;
+fi
+
+FAIL=0
+PROTOCOL=$1
+COMMUNICATION_MODE=$2
+
+start_clients(){
+ export VSOMEIP_CONFIGURATION=npdu_test_client_npdu.json
+
+ # Start the routing manager daemon
+ export VSOMEIP_APPLICATION_NAME=npdu_test_routing_manager_daemon_client_side
+ ./npdu_test_rmd_client_side &
+
+ # sleep 1 second to let the RMD startup.
+ sleep 1
+ # Start client 1
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_one
+ ./npdu_test_client_1 $* &
+
+ # Start client 2
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_two
+ ./npdu_test_client_2 $* &
+
+ # Start client 3
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_three
+ ./npdu_test_client_3 $* &
+
+ # Start client 4
+ export VSOMEIP_APPLICATION_NAME=npdu_test_client_four
+ ./npdu_test_client_4 $* &
+}
+
+wait_for_bg_processes(){
+ # Wait until client and service are finished
+ for job in $(jobs -p)
+ do
+ # Fail gets incremented if one of the jobs exit
+ # with a non-zero exit code
+ wait $job || ((FAIL+=1))
+ done
+
+ # Check if everything exited successfully
+ if [ $FAIL -eq 0 ]
+ then
+ echo "All clients exited successfully"
+ else
+ echo "Something went wrong"
+ exit 1
+ fi
+}
+
+
+echo "Contacting services via $PROTOCOL"
+start_clients --$PROTOCOL --max-payload-size $PROTOCOL --$COMMUNICATION_MODE
+wait_for_bg_processes
+
+exit 0
diff --git a/test/network_tests/npdu_tests/npdu_test_globals.hpp b/test/network_tests/npdu_tests/npdu_test_globals.hpp
new file mode 100644
index 0000000..8cee3ee
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_globals.hpp
@@ -0,0 +1,45 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef NPDU_TESTS_NPDU_TEST_GLOBALS_HPP_
+#define NPDU_TESTS_NPDU_TEST_GLOBALS_HPP_
+
+namespace npdu_test {
+
+// Routing manager daemon
+constexpr vsomeip::client_t RMD_CLIENT_ID_CLIENT_SIDE = 0x6666;
+constexpr vsomeip::service_t RMD_SERVICE_ID_CLIENT_SIDE = 0x6666;
+
+constexpr vsomeip::client_t RMD_CLIENT_ID_SERVICE_SIDE = 0x6667;
+constexpr vsomeip::service_t RMD_SERVICE_ID_SERVICE_SIDE = 0x6667;
+
+constexpr vsomeip::instance_t RMD_INSTANCE_ID = 0x6666;
+constexpr vsomeip::method_t RMD_SHUTDOWN_METHOD_ID = 0x6666;
+
+
+
+constexpr vsomeip::method_t NPDU_SERVICE_SHUTDOWNMETHOD_ID = 0x7777;
+
+constexpr std::array<vsomeip::service_t, 4> service_ids =
+ { 0x1000, 0x2000, 0x3000, 0x4000 };
+constexpr std::array<vsomeip::instance_t, 4> instance_ids =
+ { service_ids[0] >> 12,
+ service_ids[1] >> 12,
+ service_ids[2] >> 12,
+ service_ids[3] >> 12 };
+constexpr std::array<std::array<vsomeip::method_t, 4>, 4> method_ids = {{
+ { service_ids[0]+1, service_ids[0]+2 ,service_ids[0]+3 ,service_ids[0]+4 },
+ { service_ids[1]+1, service_ids[1]+2 ,service_ids[1]+3 ,service_ids[1]+4 },
+ { service_ids[2]+1, service_ids[2]+2 ,service_ids[2]+3 ,service_ids[2]+4 },
+ { service_ids[3]+1, service_ids[3]+2 ,service_ids[3]+3 ,service_ids[3]+4 }
+}};
+
+constexpr std::array<vsomeip::client_t, 4> client_ids_clients =
+ { 0x1111, 0x2222, 0x3333, 0x4444 };
+
+constexpr std::array<vsomeip::client_t, 4> client_ids_services = service_ids;
+
+}
+#endif /* NPDU_TESTS_NPDU_TEST_GLOBALS_HPP_ */
diff --git a/test/network_tests/npdu_tests/npdu_test_rmd.cpp b/test/network_tests/npdu_tests/npdu_test_rmd.cpp
new file mode 100644
index 0000000..8e5451b
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_rmd.cpp
@@ -0,0 +1,160 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include <atomic>
+
+#include "../npdu_tests/npdu_test_rmd.hpp"
+
+#include <vsomeip/internal/logger.hpp>
+#include "npdu_test_globals.hpp"
+
+#include "../npdu_tests/npdu_test_globals.hpp"
+
+npdu_test_rmd::npdu_test_rmd() :
+ app_(vsomeip::runtime::get()->create_application()),
+ is_registered_(false),
+ blocked_(false),
+ offer_thread_(std::bind(&npdu_test_rmd::run, this))
+{
+ // TODO Auto-generated constructor stub
+}
+
+void npdu_test_rmd::init() {
+ std::lock_guard<std::mutex> its_lock(mutex_);
+
+ app_->init();
+
+#ifdef RMD_CLIENT_SIDE
+ app_->register_message_handler(npdu_test::RMD_SERVICE_ID_CLIENT_SIDE,
+#elif defined (RMD_SERVICE_SIDE)
+ app_->register_message_handler(npdu_test::RMD_SERVICE_ID_SERVICE_SIDE,
+#endif
+ npdu_test::RMD_INSTANCE_ID, npdu_test::RMD_SHUTDOWN_METHOD_ID,
+ std::bind(&npdu_test_rmd::on_message_shutdown,
+ this, std::placeholders::_1));
+
+ app_->register_state_handler(
+ std::bind(&npdu_test_rmd::on_state, this,
+ std::placeholders::_1));
+}
+
+void npdu_test_rmd::start() {
+ VSOMEIP_INFO << "Starting...";
+ app_->start();
+}
+
+void npdu_test_rmd::stop() {
+ VSOMEIP_INFO << "Stopping...";
+
+ app_->unregister_message_handler(npdu_test::RMD_SERVICE_ID_CLIENT_SIDE,
+ npdu_test::RMD_INSTANCE_ID, npdu_test::RMD_SHUTDOWN_METHOD_ID);
+ app_->unregister_state_handler();
+ offer_thread_.join();
+ app_->stop();
+}
+
+void npdu_test_rmd::on_state(
+ vsomeip::state_type_e _state) {
+ VSOMEIP_INFO << "Application " << app_->get_name() << " is "
+ << (_state == vsomeip::state_type_e::ST_REGISTERED ? "registered." :
+ "deregistered.");
+
+ if(_state == vsomeip::state_type_e::ST_REGISTERED)
+ {
+ if(!is_registered_)
+ {
+ std::lock_guard<std::mutex> its_lock(mutex_);
+ is_registered_ = true;
+ blocked_ = true;
+ // "start" the run method thread
+ condition_.notify_one();
+ }
+ }
+ else
+ {
+ is_registered_ = false;
+ }
+}
+
+void npdu_test_rmd::on_message_shutdown(
+ const std::shared_ptr<vsomeip::message>& _request) {
+ (void)_request;
+ std::shared_ptr<vsomeip::message> request = vsomeip::runtime::get()->create_request(false);
+#ifdef RMD_CLIENT_SIDE
+ static uint32_t counter = 0;
+ counter++;
+ VSOMEIP_INFO << counter << " of " << npdu_test::client_ids_clients.size()
+ << " clients are finished.";
+
+ if (counter == npdu_test::client_ids_clients.size()) {
+ VSOMEIP_INFO << "All clients are finished, notify routing manager daemon on service side.";
+ // notify the RMD_SERVICE_SIDE that he can shutdown as well
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ request->set_service(npdu_test::RMD_SERVICE_ID_SERVICE_SIDE);
+ request->set_instance(npdu_test::RMD_INSTANCE_ID);
+ request->set_method(npdu_test::RMD_SHUTDOWN_METHOD_ID);
+ request->set_message_type(vsomeip::message_type_e::MT_REQUEST_NO_RETURN);
+ app_->send(request);
+ std::this_thread::sleep_for(std::chrono::seconds(5));
+ stop();
+ }
+#elif defined RMD_SERVICE_SIDE
+ VSOMEIP_INFO << "All clients are finished shutting down services";
+ // shutdown all services
+ for(unsigned int i = 0; i < npdu_test::service_ids.size(); i++) {
+ request->set_service(npdu_test::service_ids[i]);
+ request->set_instance(npdu_test::instance_ids[i]);
+ request->set_method(npdu_test::NPDU_SERVICE_SHUTDOWNMETHOD_ID);
+ request->set_message_type(vsomeip::message_type_e::MT_REQUEST_NO_RETURN);
+ app_->send(request);
+ }
+ app_->stop_offer_service(npdu_test::RMD_SERVICE_ID_SERVICE_SIDE, npdu_test::RMD_INSTANCE_ID);
+
+ VSOMEIP_INFO << "Wait a few seconds until all services are shutdown.";
+ std::atomic<bool> finished(false);
+ for (int i = 0; !finished && i < 20; i++) {
+ app_->get_offered_services_async(
+ vsomeip::offer_type_e::OT_REMOTE,
+ [&](const std::vector<std::pair<vsomeip::service_t,
+ vsomeip::instance_t>> &_services){
+ if (_services.empty()) {
+ finished = true;
+ }
+ });
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ }
+ stop();
+#endif
+}
+
+void npdu_test_rmd::join_shutdown_thread() {
+ shutdown_thread_.join();
+}
+
+void npdu_test_rmd::run() {
+ std::unique_lock<std::mutex> its_lock(mutex_);
+ while (!blocked_)
+ condition_.wait(its_lock);
+#ifdef RMD_CLIENT_SIDE
+ app_->offer_service(npdu_test::RMD_SERVICE_ID_CLIENT_SIDE, npdu_test::RMD_INSTANCE_ID);
+#elif defined (RMD_SERVICE_SIDE)
+ app_->offer_service(npdu_test::RMD_SERVICE_ID_SERVICE_SIDE, npdu_test::RMD_INSTANCE_ID);
+#endif
+}
+
+TEST(someip_npdu_test, offer_routing_manager_functionality)
+{
+ npdu_test_rmd daemon;
+ daemon.init();
+ daemon.start();
+}
+
+int main(int argc, char** argv)
+{
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+
+
diff --git a/test/network_tests/npdu_tests/npdu_test_rmd.hpp b/test/network_tests/npdu_tests/npdu_test_rmd.hpp
new file mode 100644
index 0000000..0b1e28d
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_rmd.hpp
@@ -0,0 +1,45 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef NPDU_TESTS_NPDUTESTROUTINGMANAGERDAEMON_HPP_
+#define NPDU_TESTS_NPDUTESTROUTINGMANAGERDAEMON_HPP_
+
+#include <gtest/gtest.h>
+
+#include <vsomeip/vsomeip.hpp>
+
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <functional>
+
+class npdu_test_rmd {
+
+public:
+ npdu_test_rmd();
+ void init();
+ void start();
+ void stop();
+ void on_state(vsomeip::state_type_e _state);
+ void on_message_shutdown(const std::shared_ptr<vsomeip::message> &_request);
+ void join_shutdown_thread();
+ void run();
+
+private:
+ std::shared_ptr<vsomeip::application> app_;
+ bool is_registered_;
+
+ std::mutex mutex_;
+ std::mutex mutex2_;
+ std::condition_variable condition_;
+ std::condition_variable condition2_;
+ bool blocked_;
+ bool blocked2_;
+ std::thread offer_thread_;
+ std::thread shutdown_thread_;
+
+};
+
+#endif /* NPDU_TESTS_NPDUTESTROUTINGMANAGERDAEMON_HPP_ */
diff --git a/test/network_tests/npdu_tests/npdu_test_service.cpp b/test/network_tests/npdu_tests/npdu_test_service.cpp
new file mode 100644
index 0000000..b4ee984
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_service.cpp
@@ -0,0 +1,306 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "../npdu_tests/npdu_test_service.hpp"
+#include "../npdu_tests/npdu_test_globals.hpp"
+
+#include <vsomeip/internal/logger.hpp>
+#include "../../implementation/configuration/include/configuration.hpp"
+#include "../../implementation/configuration/include/configuration_impl.hpp"
+#include "../../implementation/configuration/include/configuration_plugin.hpp"
+#include "../../implementation/plugin/include/plugin_manager_impl.hpp"
+
+
+
+// this variable is set during compile time to create 4 service binaries of
+// which each of them offers a service.
+// Based on this number the service id, instance id and method ids are
+// selected from the arrays defined in npdu_test_globals.hpp
+#ifndef SERVICE_NUMBER
+#define SERVICE_NUMBER 0
+#endif
+
+npdu_test_service::npdu_test_service(vsomeip::service_t _service_id,
+ vsomeip::instance_t _instance_id,
+ std::array<vsomeip::method_t, 4> _method_ids,
+ std::array<std::chrono::nanoseconds, 4> _debounce_times,
+ std::array<std::chrono::nanoseconds, 4> _max_retention_times) :
+ app_(vsomeip::runtime::get()->create_application()),
+ is_registered_(false),
+ method_ids_(_method_ids),
+ debounce_times_(_debounce_times),
+ max_retention_times_(_max_retention_times),
+ service_id_(_service_id),
+ instance_id_(_instance_id),
+ blocked_(false),
+ allowed_to_shutdown_(false),
+ number_of_received_messages_(0),
+ offer_thread_(std::bind(&npdu_test_service::run, this)),
+ shutdown_thread_(std::bind(&npdu_test_service::stop, this))
+{
+ // init timepoints of last received message to one hour before now.
+ // needed that the first message which arrives isn't registered as undershot
+ // debounce time
+ for(auto &tp : timepoint_last_received_message_) {
+ tp = std::chrono::steady_clock::now() - std::chrono::hours(1);
+ }
+}
+
+void npdu_test_service::init()
+{
+ std::lock_guard<std::mutex> its_lock(mutex_);
+
+ app_->init();
+
+ register_message_handler<0>();
+ register_message_handler<1>();
+ register_message_handler<2>();
+ register_message_handler<3>();
+
+ app_->register_message_handler(service_id_, instance_id_,
+ npdu_test::NPDU_SERVICE_SHUTDOWNMETHOD_ID,
+ std::bind(&npdu_test_service::on_message_shutdown, this,
+ std::placeholders::_1));
+
+ app_->register_state_handler(
+ std::bind(&npdu_test_service::on_state, this,
+ std::placeholders::_1));
+}
+
+template <int method_idx>
+void npdu_test_service::register_message_handler() {
+ app_->register_message_handler(service_id_, instance_id_, method_ids_[method_idx],
+ std::bind(&npdu_test_service::on_message<method_idx>, this,
+ std::placeholders::_1));
+}
+
+void npdu_test_service::start()
+{
+ VSOMEIP_INFO << "Starting...";
+ app_->start();
+}
+
+void npdu_test_service::stop()
+{
+ std::unique_lock<std::mutex> its_lock(shutdown_mutex_);
+ while (!allowed_to_shutdown_) {
+ shutdown_condition_.wait(its_lock);
+ }
+
+ VSOMEIP_INFO << "Stopping...";
+ if (!undershot_debounce_times_.empty()) {
+ std::chrono::microseconds sum(0);
+ for (const auto t : undershot_debounce_times_) {
+ sum += t;
+ }
+ double average = static_cast<double>(sum.count())/static_cast<double>(undershot_debounce_times_.size());
+ VSOMEIP_INFO << "["
+ << std::setw(4) << std::setfill('0') << std::hex << service_id_ << "."
+ << std::setw(4) << std::setfill('0') << std::hex << instance_id_ << "]: "
+ << " Debounce time was undershot " << std::dec << undershot_debounce_times_.size() << "/" << number_of_received_messages_
+ << "(" << std::setprecision(2) << (static_cast<double>(undershot_debounce_times_.size()) / static_cast<double>(number_of_received_messages_)) * 100.00
+ << "%) on average: " << std::setprecision(4) << average << "µs";
+ }
+ app_->unregister_message_handler(service_id_, instance_id_, method_ids_[0]);
+ app_->unregister_message_handler(service_id_, instance_id_, method_ids_[1]);
+ app_->unregister_message_handler(service_id_, instance_id_, method_ids_[2]);
+ app_->unregister_message_handler(service_id_, instance_id_, method_ids_[3]);
+ app_->unregister_message_handler(service_id_,
+ instance_id_, npdu_test::NPDU_SERVICE_SHUTDOWNMETHOD_ID);
+ app_->unregister_state_handler();
+ offer_thread_.join();
+ stop_offer();
+ app_->stop();
+}
+
+void npdu_test_service::offer()
+{
+ app_->offer_service(service_id_, instance_id_);
+}
+
+void npdu_test_service::stop_offer()
+{
+ app_->stop_offer_service(service_id_, instance_id_);
+}
+
+void npdu_test_service::join_shutdown_thread() {
+ shutdown_thread_.join();
+}
+
+void npdu_test_service::on_state(vsomeip::state_type_e _state)
+{
+ VSOMEIP_INFO << "Application " << app_->get_name() << " is "
+ << (_state == vsomeip::state_type_e::ST_REGISTERED ? "registered." :
+ "deregistered.");
+
+ if(_state == vsomeip::state_type_e::ST_REGISTERED)
+ {
+ if(!is_registered_)
+ {
+ std::lock_guard<std::mutex> its_lock(mutex_);
+ is_registered_ = true;
+ blocked_ = true;
+ // "start" the run method thread
+ condition_.notify_one();
+ }
+ }
+ else
+ {
+ is_registered_ = false;
+ }
+}
+
+template<int method_idx>
+void npdu_test_service::check_times() {
+ std::lock_guard<std::mutex> its_lock(timepoint_mutexes_[method_idx]);
+ // what time is it?
+ std::chrono::steady_clock::time_point now =
+ std::chrono::steady_clock::now();
+ // how long is it since we received the last message?
+ std::chrono::nanoseconds time_since_last_message =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(
+ now - timepoint_last_received_message_[method_idx]);
+ // store the current time
+ timepoint_last_received_message_[method_idx] = now;
+
+ // check if the debounce time was undershot
+ if (time_since_last_message < debounce_times_[method_idx]) {
+ const auto time_undershot = std::chrono::duration_cast<
+ std::chrono::microseconds>(debounce_times_[method_idx] - time_since_last_message);
+ undershot_debounce_times_.push_back(time_undershot);
+ }
+ // check if maximum retention time was exceeded
+ // Disabled as it can't be guaranteed that exact every max retention time a
+ // message leaves the client endpoint.
+#if 0
+ if(time_since_last_message > max_retention_times_[method_idx]) {
+ VSOMEIP_ERROR << std::setw(4) << std::setfill('0') << std::hex
+ << service_id_ << ":" << std::setw(4) << std::setfill('0')
+ << std::hex << instance_id_ << ":" << std::setw(4) << std::setfill('0')
+ << std::hex << npdu_test::method_ids[SERVICE_NUMBER][method_idx]
+ << ": max_retention_time exceeded by: " << std::dec
+ << std::chrono::duration_cast<std::chrono::milliseconds>(
+ time_since_last_message - max_retention_times_[method_idx]).count()
+ << "ms";
+ GTEST_FATAL_FAILURE_("Max retention time was exceeded");
+ }
+#endif
+}
+
+template<int method_idx>
+void npdu_test_service::on_message(const std::shared_ptr<vsomeip::message>& _request)
+{
+ number_of_received_messages_++;
+ check_times<method_idx>();
+ VSOMEIP_DEBUG << __func__ << " 0x" << std::setw(4) << std::setfill('0') << std::hex
+ << method_ids_[method_idx] << " payload size: "
+ << std::dec << _request->get_payload()->get_length();
+ if(_request->get_message_type() != vsomeip::message_type_e::MT_REQUEST_NO_RETURN) {
+ std::shared_ptr<vsomeip::message> its_response =
+ vsomeip::runtime::get()->create_response(_request);
+ app_->send(its_response);
+ }
+}
+
+void npdu_test_service::on_message_shutdown(
+ const std::shared_ptr<vsomeip::message>& _request)
+{
+ (void)_request;
+ VSOMEIP_DEBUG << "Number of received messages: " << number_of_received_messages_;
+ VSOMEIP_INFO << "Shutdown method was called, going down now.";
+
+ std::lock_guard<std::mutex> its_lock(shutdown_mutex_);
+ allowed_to_shutdown_ = true;
+ shutdown_condition_.notify_one();
+}
+
+void npdu_test_service::run()
+{
+ std::unique_lock<std::mutex> its_lock(mutex_);
+ while (!blocked_)
+ condition_.wait(its_lock);
+
+ offer();
+}
+
+TEST(someip_npdu_test, offer_service_and_check_debounce_times)
+{
+ // get the configuration
+ std::shared_ptr<vsomeip::configuration> its_configuration;
+ auto its_plugin = vsomeip::plugin_manager::get()->get_plugin(
+ vsomeip::plugin_type_e::CONFIGURATION_PLUGIN, VSOMEIP_CFG_LIBRARY);
+ if (its_plugin) {
+ auto its_config_plugin = std::dynamic_pointer_cast<vsomeip::configuration_plugin>(its_plugin);
+ if (its_config_plugin) {
+ its_configuration = its_config_plugin->get_configuration("","");
+ }
+ }
+ if (!its_configuration) {
+ ADD_FAILURE() << "No configuration object. "
+ "Either memory overflow or loading error detected!";
+ return;
+ }
+
+ // used to store the debounce times
+ std::array<std::chrono::nanoseconds, 4> debounce_times;
+ std::array<std::chrono::nanoseconds, 4> max_retention_times;
+
+
+ // query the debouncetimes from the configuration. We want to know the
+ // debounce times which the _clients_ of this service have to comply with
+ // when they send requests to this service. This is necessary as we want to
+ // check on the service side if they adhere to them.
+ // client one will only query method one, client two will only query method
+ // two and so on.
+ for(int i = 0; i < 4; i++) {
+ std::chrono::nanoseconds debounce(0), retention(0);
+ its_configuration->get_configured_timing_requests(
+ npdu_test::service_ids[SERVICE_NUMBER],
+ its_configuration->get_unicast_address().to_string(),
+ its_configuration->get_unreliable_port(
+ npdu_test::service_ids[SERVICE_NUMBER],
+ npdu_test::instance_ids[SERVICE_NUMBER]),
+ npdu_test::method_ids[SERVICE_NUMBER][i],
+ &debounce_times[i],
+ &max_retention_times[i]);
+ if (debounce == std::chrono::nanoseconds(VSOMEIP_DEFAULT_NPDU_DEBOUNCING_NANO) &&
+ retention == std::chrono::nanoseconds(VSOMEIP_DEFAULT_NPDU_MAXIMUM_RETENTION_NANO)) {
+ // no timings specified - checks in check_times() should never
+ // report an error in this case.
+ // set debounce time to 0 this can't be undershot
+ debounce_times[i] = std::chrono::nanoseconds(0);
+ // set max retention time its max, this won't be exceeded
+ max_retention_times[i] = std::chrono::nanoseconds::max();
+ }
+ }
+
+ npdu_test_service test_service(
+ npdu_test::service_ids[SERVICE_NUMBER],
+ npdu_test::instance_ids[SERVICE_NUMBER],
+ npdu_test::method_ids[SERVICE_NUMBER],
+ debounce_times, max_retention_times);
+ test_service.init();
+ test_service.start();
+ test_service.join_shutdown_thread();
+}
+
+#if defined(__linux__) || defined(ANDROID)
+int main(int argc, char** argv)
+{
+ int i = 1;
+ while (i < argc)
+ {
+ if(std::string("--help") == argv[i])
+ {
+ VSOMEIP_INFO << "Parameters:\n"
+ << "--help: print this help";
+ }
+ i++;
+ }
+
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+#endif
diff --git a/test/network_tests/npdu_tests/npdu_test_service.hpp b/test/network_tests/npdu_tests/npdu_test_service.hpp
new file mode 100644
index 0000000..bef0680
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_service.hpp
@@ -0,0 +1,64 @@
+// Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef NPDUTESTSERVICE_HPP_
+#define NPDUTESTSERVICE_HPP_
+#include <gtest/gtest.h>
+
+#include <vsomeip/vsomeip.hpp>
+
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <functional>
+#include <chrono>
+#include <deque>
+
+class npdu_test_service
+{
+public:
+ npdu_test_service(vsomeip::service_t _service_id,
+ vsomeip::instance_t _instance_id,
+ std::array<vsomeip::method_t, 4> _method_ids,
+ std::array<std::chrono::nanoseconds, 4> _debounce_times,
+ std::array<std::chrono::nanoseconds, 4> _max_retention_times);
+ void init();
+ void start();
+ void stop();
+ void offer();
+ void stop_offer();
+ void join_shutdown_thread();
+ void on_state(vsomeip::state_type_e _state);
+ template<int method_idx> void on_message(const std::shared_ptr<vsomeip::message> &_request);
+ void on_message_shutdown(const std::shared_ptr<vsomeip::message> &_request);
+ void run();
+
+private:
+ template<int method_idx> void check_times();
+ template <int method_idx> void register_message_handler();
+
+private:
+ std::shared_ptr<vsomeip::application> app_;
+ bool is_registered_;
+ std::array<vsomeip::method_t, 4> method_ids_;
+ std::array<std::chrono::nanoseconds, 4> debounce_times_;
+ std::array<std::chrono::nanoseconds, 4> max_retention_times_;
+ std::array<std::chrono::steady_clock::time_point, 4> timepoint_last_received_message_;
+ std::array<std::mutex, 4> timepoint_mutexes_;
+ std::deque<std::chrono::microseconds> undershot_debounce_times_;
+ vsomeip::service_t service_id_;
+ vsomeip::instance_t instance_id_;
+ std::mutex mutex_;
+ std::condition_variable condition_;
+ bool blocked_;
+ std::mutex shutdown_mutex_;
+ std::condition_variable shutdown_condition_;
+ bool allowed_to_shutdown_;
+ std::uint32_t number_of_received_messages_;
+ std::thread offer_thread_;
+ std::thread shutdown_thread_;
+};
+
+#endif /* NPDUTESTSERVICE_HPP_ */
diff --git a/test/network_tests/npdu_tests/npdu_test_service_no_npdu_start.sh b/test/network_tests/npdu_tests/npdu_test_service_no_npdu_start.sh
new file mode 100755
index 0000000..cf05aaa
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_service_no_npdu_start.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Purpose: This script is needed to start the routing manager daemon and the
+# services with one command. This is necessary as ctest - which is used to run
+# the tests - isn't able to start multiple binaries for one testcase. Therefore
+# the testcase simply executes this script. This script then runs the routing
+# manager daemon and the services and checks if all of them exit successfully.
+
+FAIL=0
+
+start_services(){
+ export VSOMEIP_CONFIGURATION=npdu_test_service_no_npdu.json
+
+ # Start the routing manager daemon
+ export VSOMEIP_APPLICATION_NAME=npdu_test_routing_manager_daemon_service_side
+ ./npdu_test_rmd_service_side &
+
+ # sleep 1 second to let the RMD startup.
+ sleep 1
+
+ # Start service 1
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_one
+ ./npdu_test_service_1 $* &
+
+ # Start service 2
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_two
+ ./npdu_test_service_2 $* &
+
+ # Start service 3
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_three
+ ./npdu_test_service_3 $* &
+
+ # Start service 4
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_four
+ ./npdu_test_service_4 $* &
+}
+
+wait_for_bg_processes(){
+ # Wait until client and service are finished
+ for job in $(jobs -p)
+ do
+ # Fail gets incremented if one of the jobs exit
+ # with a non-zero exit code
+ wait $job || ((FAIL+=1))
+ done
+
+ # Check if everything exited successfully
+ if [ $FAIL -eq 0 ]
+ then
+ echo "All services exited successfully"
+ else
+ echo "Something went wrong"
+ exit 1
+ fi
+}
+
+start_services
+wait_for_bg_processes
+
+exit 0
diff --git a/test/network_tests/npdu_tests/npdu_test_service_npdu_start.sh b/test/network_tests/npdu_tests/npdu_test_service_npdu_start.sh
new file mode 100755
index 0000000..0ca238b
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_service_npdu_start.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Purpose: This script is needed to start the routing manager daemon and the
+# services with one command. This is necessary as ctest - which is used to run
+# the tests - isn't able to start multiple binaries for one testcase. Therefore
+# the testcase simply executes this script. This script then runs the routing
+# manager daemon and the services and checks if all of them exit successfully.
+
+FAIL=0
+
+start_services(){
+ export VSOMEIP_CONFIGURATION=npdu_test_service_npdu.json
+
+ # Start the routing manager daemon
+ export VSOMEIP_APPLICATION_NAME=npdu_test_routing_manager_daemon_service_side
+ ./npdu_test_rmd_service_side &
+
+ # sleep 1 second to let the RMD startup.
+ sleep 1
+
+ # Start service 1
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_one
+ ./npdu_test_service_1 $* &
+
+ # Start service 2
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_two
+ ./npdu_test_service_2 $* &
+
+ # Start service 3
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_three
+ ./npdu_test_service_3 $* &
+
+ # Start service 4
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_four
+ ./npdu_test_service_4 $* &
+}
+
+wait_for_bg_processes(){
+ # Wait until client and service are finished
+ for job in $(jobs -p)
+ do
+ # Fail gets incremented if one of the jobs exit
+ # with a non-zero exit code
+ wait $job || ((FAIL+=1))
+ done
+
+ # Check if everything exited successfully
+ if [ $FAIL -eq 0 ]
+ then
+ echo "All services exited successfully"
+ else
+ echo "Something went wrong"
+ exit 1
+ fi
+}
+
+start_services
+wait_for_bg_processes
+
+exit 0
diff --git a/test/network_tests/npdu_tests/npdu_test_starter.sh b/test/network_tests/npdu_tests/npdu_test_starter.sh
new file mode 100755
index 0000000..51904c9
--- /dev/null
+++ b/test/network_tests/npdu_tests/npdu_test_starter.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+# Copyright (C) 2015-2019 Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Purpose: This script is needed to start the routing manager daemon and the
+# services with one command. This is necessary as ctest - which is used to run
+# the tests - isn't able to start multiple binaries for one testcase. Therefore
+# the testcase simply executes this script. This script then runs the routing
+# manager daemon and the services and checks if all of them exit successfully.
+
+FAIL=0
+
+if [ $# -lt 2 ]; then
+ echo "Error: Please pass a protocol and communication mode to this script."
+ echo "Valid protocols are [UDP,TCP]."
+ echo "Valid communication modes are [sync, async]."
+ echo "For example $> $0 UDP sync"
+ exit 1;
+fi
+
+start_services(){
+ export VSOMEIP_CONFIGURATION=npdu_test_service_npdu.json
+
+ # Start the routing manager daemon
+ export VSOMEIP_APPLICATION_NAME=npdu_test_routing_manager_daemon_service_side
+ ./npdu_test_rmd_service_side &
+
+ # sleep 1 second to let the RMD startup.
+ sleep 1
+
+ # Start service 1
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_one
+ ./npdu_test_service_1 $* &
+
+ # Start service 2
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_two
+ ./npdu_test_service_2 $* &
+
+ # Start service 3
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_three
+ ./npdu_test_service_3 $* &
+
+ # Start service 4
+ export VSOMEIP_APPLICATION_NAME=npdu_test_service_four
+ ./npdu_test_service_4 $* &
+}
+
+wait_for_bg_processes(){
+ # Wait until client and service are finished
+ for job in $(jobs -p)
+ do
+ # Fail gets incremented if one of the jobs exit
+ # with a non-zero exit code
+ wait $job || ((FAIL+=1))
+ done
+
+ # Check if everything exited successfully
+ if [ $FAIL -eq 0 ]
+ then
+ echo "All services exited successfully"
+ else
+ echo "Something went wrong"
+ exit 1
+ fi
+}
+
+
+start_services
+
+if [ ! -z "$USE_LXC_TEST" ]; then
+ echo "starting magic cookies test on slave LXC"
+ ssh -tt -i $SANDBOX_ROOT_DIR/commonapi_main/lxc-config/.ssh/mgc_lxc/rsa_key_file.pub -o StrictHostKeyChecking=no root@$LXC_TEST_SLAVE_IP "bash -ci \"set -m; cd \\\$SANDBOX_TARGET_DIR/vsomeip_lib/test/network_tests; ./npdu_test_client_npdu_start.sh $*\"" &
+elif [ ! -z "$USE_DOCKER" ]; then
+ docker exec $DOCKER_IMAGE sh -c "cd $DOCKER_TESTS && ./npdu_test_client_npdu_start.sh $*" &
+else
+sleep 1
+cat <<End-of-message
+*******************************************************************************
+*******************************************************************************
+** Please now run:
+** npdu_test_client_npdu_start.sh $*
+** from an external host to successfully complete this test.
+**
+** You probably will need to adapt the 'unicast' settings in
+** npdu_test_client_npdu.json and
+** npdu_test_service_npdu.json to your personal setup.
+*******************************************************************************
+*******************************************************************************
+End-of-message
+fi
+
+wait_for_bg_processes
+
+exit 0